ngram
listlengths
0
67.8k
[ "temperature_file = open(location) # Reading the file... content = temperature_file.read() # Closing file", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier) # sauvegarde(temperature,", "extract_temperature_from_content (content) : # We don't car about the first line, temperature is", "the file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the temperature in", "in degree return (float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location): file = open(file_location,", "# time.sleep(60) else : print(\"Sensor not found. Please check your setup.\") #if len(routes_capteurs)", "about the first line, temperature is given on the second line of the", "file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\" +", "file = open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n') file.close() #", "print(\"Sensor not found. Please check your setup.\") #if len(routes_capteurs) > 0 : #while", "Closing file after reading temperature_file.close() return content def extract_temperature_from_content (content) : # We", "\" : \" + str(temperature)) c += 1 # time.sleep(60) else : print(\"Sensor", "temperature #\" + str(c) + \" : \" + str(temperature)) c += 1", "# Return the temperature in degree return (float(temperature) / 1000) def save_temperature_into_file(temperature, date,", "your setup.\") #if len(routes_capteurs) > 0 : #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d", "Please check your setup.\") #if len(routes_capteurs) > 0 : #while True: # date", ": print(\"Sensor not found. Please check your setup.\") #if len(routes_capteurs) > 0 :", "+ \" : \" + str(temperature)) c += 1 # time.sleep(60) else :", "setup.\") #if len(routes_capteurs) > 0 : #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", ": \" + str(temperature)) c += 1 # time.sleep(60) else : print(\"Sensor not", "# We don't car about the first line, temperature is given on the", "save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature) +", "# Opens the file containing the temperature temperature_file = open(location) # Reading the", "(location) : # Opens the file containing the temperature temperature_file = open(location) #", "file.write(str(temperature) + '\\r\\n') file.close() # We retrieve all the temperature sensors plugged and", "def extract_temperature_from_content (content) : # We don't car about the first line, temperature", "import glob import time import datetime def read_temperature_file (location) : # Opens the", "+ sensor + \"] Sensor's temperature #\" + str(c) + \" : \"", "for sensor in routes_sensors : file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\"", "on the second line of the file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1]", "= temperature_file.read() # Closing file after reading temperature_file.close() return content def extract_temperature_from_content (content)", "str(c) + \" : \" + str(temperature)) c += 1 # time.sleep(60) else", "+ str(c) + \" : \" + str(temperature)) c += 1 # time.sleep(60)", "content = temperature_file.read() # Closing file after reading temperature_file.close() return content def extract_temperature_from_content", "the second line of the file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] #", "\" \") file.write(str(temperature) + '\\r\\n') file.close() # We retrieve all the temperature sensors", "\" + str(temperature)) c += 1 # time.sleep(60) else : print(\"Sensor not found.", "temperature = second_line.split(\"t=\")[1] # Return the temperature in degree return (float(temperature) / 1000)", "the file... content = temperature_file.read() # Closing file after reading temperature_file.close() return content", "the first line, temperature is given on the second line of the file", "sensor + \"] Sensor's temperature #\" + str(c) + \" : \" +", "str(temperature)) c += 1 # time.sleep(60) else : print(\"Sensor not found. Please check", "# We retrieve all the temperature sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\")", "def read_temperature_file (location) : # Opens the file containing the temperature temperature_file =", "file after reading temperature_file.close() return content def extract_temperature_from_content (content) : # We don't", "the temperature sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0", "glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\" + sensor + \"] Sensor's temperature", "print (\"[\" + str(date) + \"/\" + sensor + \"] Sensor's temperature #\"", "/ 1000) def save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\") file.write(str(date) + \"", "routes_sensors : file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date) +", "time import datetime def read_temperature_file (location) : # Opens the file containing the", "all the temperature sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) >", "\"/\" + sensor + \"] Sensor's temperature #\" + str(c) + \" :", ": #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) #", "file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the temperature in degree", "= extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\" + sensor + \"] Sensor's", "len(routes_capteurs) > 0 : #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier", "True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature =", "temperature sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 :", "We retrieve all the temperature sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if", ": c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors :", "temperature_file.close() return content def extract_temperature_from_content (content) : # We don't car about the", "+ str(temperature)) c += 1 # time.sleep(60) else : print(\"Sensor not found. Please", "the temperature temperature_file = open(location) # Reading the file... content = temperature_file.read() #", "detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c = 1 date", "+ \"] Sensor's temperature #\" + str(c) + \" : \" + str(temperature))", "1000) def save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\") file.write(str(date) + \" \")", "given on the second line of the file second_line = content.split(\"\\n\")[1] temperature =", "+ '\\r\\n') file.close() # We retrieve all the temperature sensors plugged and detected", "not found. Please check your setup.\") #if len(routes_capteurs) > 0 : #while True:", "# contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier) # sauvegarde(temperature, date, \"Temperature.txt\") #", "read_temperature_file (location) : # Opens the file containing the temperature temperature_file = open(location)", "import datetime def read_temperature_file (location) : # Opens the file containing the temperature", "# Closing file after reading temperature_file.close() return content def extract_temperature_from_content (content) : #", "return content def extract_temperature_from_content (content) : # We don't car about the first", "sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c", "= open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n') file.close() # We", "+= 1 # time.sleep(60) else : print(\"Sensor not found. Please check your setup.\")", "check your setup.\") #if len(routes_capteurs) > 0 : #while True: # date =", "(\"[\" + str(date) + \"/\" + sensor + \"] Sensor's temperature #\" +", "of the file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the temperature", "#if len(routes_capteurs) > 0 : #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') #", "open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n') file.close() # We retrieve", "= open(location) # Reading the file... content = temperature_file.read() # Closing file after", "found. Please check your setup.\") #if len(routes_capteurs) > 0 : #while True: #", "# date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier)", "temperature_file.read() # Closing file after reading temperature_file.close() return content def extract_temperature_from_content (content) :", "the temperature in degree return (float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location): file", "= second_line.split(\"t=\")[1] # Return the temperature in degree return (float(temperature) / 1000) def", "and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c = 1", "return (float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\") file.write(str(date)", "0 : #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0])", "file... content = temperature_file.read() # Closing file after reading temperature_file.close() return content def", "else : print(\"Sensor not found. Please check your setup.\") #if len(routes_capteurs) > 0", "sensor in routes_sensors : file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" +", "c += 1 # time.sleep(60) else : print(\"Sensor not found. Please check your", "contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier) # sauvegarde(temperature, date, \"Temperature.txt\") # time.sleep(60)", "reading temperature_file.close() return content def extract_temperature_from_content (content) : # We don't car about", "content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the temperature in degree return (float(temperature) /", "+ str(date) + \"/\" + sensor + \"] Sensor's temperature #\" + str(c)", "(float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\") file.write(str(date) +", "content def extract_temperature_from_content (content) : # We don't car about the first line,", "file.close() # We retrieve all the temperature sensors plugged and detected routes_sensors =", "\"] Sensor's temperature #\" + str(c) + \" : \" + str(temperature)) c", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors : file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content)", "second line of the file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return", "= content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the temperature in degree return (float(temperature)", "if len(routes_sensors) > 0 : c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for", ": file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\"", "#while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature", "Opens the file containing the temperature temperature_file = open(location) # Reading the file...", "= glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d", "plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c =", "date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier) #", ": # We don't car about the first line, temperature is given on", "second_line.split(\"t=\")[1] # Return the temperature in degree return (float(temperature) / 1000) def save_temperature_into_file(temperature,", "1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors : file_content = read_temperature_file(sensor)", "import time import datetime def read_temperature_file (location) : # Opens the file containing", "= 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors : file_content =", "retrieve all the temperature sensors plugged and detected routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors)", "don't car about the first line, temperature is given on the second line", "Sensor's temperature #\" + str(c) + \" : \" + str(temperature)) c +=", ": # Opens the file containing the temperature temperature_file = open(location) # Reading", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors : file_content = read_temperature_file(sensor) temperature =", "file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n') file.close() # We retrieve all the", "> 0 : c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in", "\"a\") file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n') file.close() # We retrieve all", "datetime def read_temperature_file (location) : # Opens the file containing the temperature temperature_file", "(content) : # We don't car about the first line, temperature is given", "len(routes_sensors) > 0 : c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor", "> 0 : #while True: # date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier =", "\") file.write(str(temperature) + '\\r\\n') file.close() # We retrieve all the temperature sensors plugged", "after reading temperature_file.close() return content def extract_temperature_from_content (content) : # We don't car", "temperature is given on the second line of the file second_line = content.split(\"\\n\")[1]", "1 # time.sleep(60) else : print(\"Sensor not found. Please check your setup.\") #if", "containing the temperature temperature_file = open(location) # Reading the file... content = temperature_file.read()", "str(date) + \"/\" + sensor + \"] Sensor's temperature #\" + str(c) +", "date, file_location): file = open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n')", "line of the file second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the", "0 : c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors", "c = 1 date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors : file_content", "+ \" \") file.write(str(temperature) + '\\r\\n') file.close() # We retrieve all the temperature", "time.sleep(60) else : print(\"Sensor not found. Please check your setup.\") #if len(routes_capteurs) >", "Reading the file... content = temperature_file.read() # Closing file after reading temperature_file.close() return", "read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\" + sensor +", "+ \"/\" + sensor + \"] Sensor's temperature #\" + str(c) + \"", "%H:%M:%S') for sensor in routes_sensors : file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print", "is given on the second line of the file second_line = content.split(\"\\n\")[1] temperature", "file_location): file = open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature) + '\\r\\n') file.close()", "first line, temperature is given on the second line of the file second_line", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier) # sauvegarde(temperature, date,", "degree return (float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\")", "= read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\" + sensor", "%H:%M:%S') # contenu_fichier = lire_fichier(routes_capteurs[0]) # temperature = extraire_temperature(contenu_fichier) # sauvegarde(temperature, date, \"Temperature.txt\")", "open(location) # Reading the file... content = temperature_file.read() # Closing file after reading", "second_line = content.split(\"\\n\")[1] temperature = second_line.split(\"t=\")[1] # Return the temperature in degree return", "'\\r\\n') file.close() # We retrieve all the temperature sensors plugged and detected routes_sensors", "temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date) + \"/\" + sensor + \"]", "file containing the temperature temperature_file = open(location) # Reading the file... content =", "def save_temperature_into_file(temperature, date, file_location): file = open(file_location, \"a\") file.write(str(date) + \" \") file.write(str(temperature)", "date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') for sensor in routes_sensors : file_content = read_temperature_file(sensor) temperature", "in routes_sensors : file_content = read_temperature_file(sensor) temperature = extract_temperature_from_content(file_content) print (\"[\" + str(date)", "glob import time import datetime def read_temperature_file (location) : # Opens the file", "Return the temperature in degree return (float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location):", "temperature temperature_file = open(location) # Reading the file... content = temperature_file.read() # Closing", "# Reading the file... content = temperature_file.read() # Closing file after reading temperature_file.close()", "routes_sensors = glob.glob(\"/sys/bus/w1/devices/28*/w1_slave\") if len(routes_sensors) > 0 : c = 1 date =", "car about the first line, temperature is given on the second line of", "the file containing the temperature temperature_file = open(location) # Reading the file... content", "We don't car about the first line, temperature is given on the second", "temperature in degree return (float(temperature) / 1000) def save_temperature_into_file(temperature, date, file_location): file =", "line, temperature is given on the second line of the file second_line =", "#\" + str(c) + \" : \" + str(temperature)) c += 1 #" ]
[ "import simplejson def prepare_for_emit(obj): \"\"\" Prepare the object for emit() by Tornadio2's (too", "the object for emit() by Tornadio2's (too simple) JSON renderer - render to", "- convert back to _simple_ Python object using Django's simplejson \"\"\" json =", "renderer - render to JSON using Django REST Framework 2's JSON renderer -", "simplejson def prepare_for_emit(obj): \"\"\" Prepare the object for emit() by Tornadio2's (too simple)", "django.utils import simplejson def prepare_for_emit(obj): \"\"\" Prepare the object for emit() by Tornadio2's", "simple) JSON renderer - render to JSON using Django REST Framework 2's JSON", "using Django REST Framework 2's JSON renderer - convert back to _simple_ Python", "- render to JSON using Django REST Framework 2's JSON renderer - convert", "REST Framework 2's JSON renderer - convert back to _simple_ Python object using", "2's JSON renderer - convert back to _simple_ Python object using Django's simplejson", "(too simple) JSON renderer - render to JSON using Django REST Framework 2's", "prepare_for_emit(obj): \"\"\" Prepare the object for emit() by Tornadio2's (too simple) JSON renderer", "from rest_framework.renderers import JSONRenderer from django.utils import simplejson def prepare_for_emit(obj): \"\"\" Prepare the", "to JSON using Django REST Framework 2's JSON renderer - convert back to", "back to _simple_ Python object using Django's simplejson \"\"\" json = JSONRenderer().render(obj) return", "convert back to _simple_ Python object using Django's simplejson \"\"\" json = JSONRenderer().render(obj)", "JSON renderer - convert back to _simple_ Python object using Django's simplejson \"\"\"", "\"\"\" Prepare the object for emit() by Tornadio2's (too simple) JSON renderer -", "rest_framework.renderers import JSONRenderer from django.utils import simplejson def prepare_for_emit(obj): \"\"\" Prepare the object", "Prepare the object for emit() by Tornadio2's (too simple) JSON renderer - render", "for emit() by Tornadio2's (too simple) JSON renderer - render to JSON using", "Framework 2's JSON renderer - convert back to _simple_ Python object using Django's", "import JSONRenderer from django.utils import simplejson def prepare_for_emit(obj): \"\"\" Prepare the object for", "JSON renderer - render to JSON using Django REST Framework 2's JSON renderer", "emit() by Tornadio2's (too simple) JSON renderer - render to JSON using Django", "JSONRenderer from django.utils import simplejson def prepare_for_emit(obj): \"\"\" Prepare the object for emit()", "renderer - convert back to _simple_ Python object using Django's simplejson \"\"\" json", "Tornadio2's (too simple) JSON renderer - render to JSON using Django REST Framework", "from django.utils import simplejson def prepare_for_emit(obj): \"\"\" Prepare the object for emit() by", "to _simple_ Python object using Django's simplejson \"\"\" json = JSONRenderer().render(obj) return simplejson.loads(json)", "def prepare_for_emit(obj): \"\"\" Prepare the object for emit() by Tornadio2's (too simple) JSON", "render to JSON using Django REST Framework 2's JSON renderer - convert back", "JSON using Django REST Framework 2's JSON renderer - convert back to _simple_", "by Tornadio2's (too simple) JSON renderer - render to JSON using Django REST", "Django REST Framework 2's JSON renderer - convert back to _simple_ Python object", "object for emit() by Tornadio2's (too simple) JSON renderer - render to JSON" ]
[ "if it were inserted in order. You may assume no duplicates in the", "nums[hi] > target: return midpoint(low, hi) elif nums[hi] < target: return hi +", "return hi + 1 else: return midpoint(low, hi) + 1 if candidate ==", "return midpoint(low, hi) + 1 if candidate == target: return midpoint(low, hi) elif", "hi): print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low, hi)] if abs(hi - low)", "\"\"\" return binary_search(nums, target, 0, len(nums) - 1) def binary_search(nums, target, low, hi):", "it were inserted in order. You may assume no duplicates in the array.", "target, low, midpoint(low, hi)) def midpoint(low, hi): return (low + hi) / 2", "1 else: return midpoint(low, hi) + 1 if candidate == target: return midpoint(low,", "it would be if it were inserted in order. You may assume no", "a target value, return the index if the target is found. If not,", "binary_search(nums, target, low, hi): print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low, hi)] if", "else: return midpoint(low, hi) + 1 if candidate == target: return midpoint(low, hi)", "return binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low, hi): return (low + hi)", "low) <= 1: if nums[low] > target: return low elif nums[hi] > target:", "if the target is found. If not, return the index where it would", "in order. You may assume no duplicates in the array. \"\"\" return binary_search(nums,", "Given a sorted array and a target value, return the index if the", "Solution: def searchInsert(self, nums, target): \"\"\" Given a sorted array and a target", "target, low, hi): print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low, hi)] if abs(hi", "duplicates in the array. \"\"\" return binary_search(nums, target, 0, len(nums) - 1) def", "binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low, hi): return (low + hi) /", "target): \"\"\" Given a sorted array and a target value, return the index", "in the array. \"\"\" return binary_search(nums, target, 0, len(nums) - 1) def binary_search(nums,", "hi) elif nums[hi] < target: return hi + 1 else: return midpoint(low, hi)", "hi) elif candidate < target: return binary_search(nums, target, midpoint(low, hi), hi) elif candidate", "return the index if the target is found. If not, return the index", "target: return midpoint(low, hi) elif nums[hi] < target: return hi + 1 else:", "elif nums[hi] > target: return midpoint(low, hi) elif nums[hi] < target: return hi", "if abs(hi - low) <= 1: if nums[low] > target: return low elif", "hi + 1 else: return midpoint(low, hi) + 1 if candidate == target:", "may assume no duplicates in the array. \"\"\" return binary_search(nums, target, 0, len(nums)", "low elif nums[hi] > target: return midpoint(low, hi) elif nums[hi] < target: return", "array. \"\"\" return binary_search(nums, target, 0, len(nums) - 1) def binary_search(nums, target, low,", "sorted array and a target value, return the index if the target is", "and a target value, return the index if the target is found. If", "nums[hi] < target: return hi + 1 else: return midpoint(low, hi) + 1", "nums, target): \"\"\" Given a sorted array and a target value, return the", "- 1) def binary_search(nums, target, low, hi): print(\"low\", low, \"high\", hi) candidate =", "0, len(nums) - 1) def binary_search(nums, target, low, hi): print(\"low\", low, \"high\", hi)", "searchInsert(self, nums, target): \"\"\" Given a sorted array and a target value, return", "candidate < target: return binary_search(nums, target, midpoint(low, hi), hi) elif candidate > target:", "> target: return midpoint(low, hi) elif nums[hi] < target: return hi + 1", "index if the target is found. If not, return the index where it", "hi)] if abs(hi - low) <= 1: if nums[low] > target: return low", "midpoint(low, hi) elif candidate < target: return binary_search(nums, target, midpoint(low, hi), hi) elif", "nums[low] > target: return low elif nums[hi] > target: return midpoint(low, hi) elif", "return low elif nums[hi] > target: return midpoint(low, hi) elif nums[hi] < target:", "a sorted array and a target value, return the index if the target", "return midpoint(low, hi) elif candidate < target: return binary_search(nums, target, midpoint(low, hi), hi)", "\"\"\" Given a sorted array and a target value, return the index if", "be if it were inserted in order. You may assume no duplicates in", "target: return low elif nums[hi] > target: return midpoint(low, hi) elif nums[hi] <", "return binary_search(nums, target, midpoint(low, hi), hi) elif candidate > target: return binary_search(nums, target,", "no duplicates in the array. \"\"\" return binary_search(nums, target, 0, len(nums) - 1)", "were inserted in order. You may assume no duplicates in the array. \"\"\"", "if nums[low] > target: return low elif nums[hi] > target: return midpoint(low, hi)", "\"high\", hi) candidate = nums[midpoint(low, hi)] if abs(hi - low) <= 1: if", "target, 0, len(nums) - 1) def binary_search(nums, target, low, hi): print(\"low\", low, \"high\",", "the array. \"\"\" return binary_search(nums, target, 0, len(nums) - 1) def binary_search(nums, target,", "If not, return the index where it would be if it were inserted", "target: return binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low, hi): return (low +", "midpoint(low, hi) elif nums[hi] < target: return hi + 1 else: return midpoint(low,", "elif candidate > target: return binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low, hi):", "1 if candidate == target: return midpoint(low, hi) elif candidate < target: return", "target is found. If not, return the index where it would be if", "target: return binary_search(nums, target, midpoint(low, hi), hi) elif candidate > target: return binary_search(nums,", "midpoint(low, hi), hi) elif candidate > target: return binary_search(nums, target, low, midpoint(low, hi))", "where it would be if it were inserted in order. You may assume", "1) def binary_search(nums, target, low, hi): print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low,", "array and a target value, return the index if the target is found.", "candidate == target: return midpoint(low, hi) elif candidate < target: return binary_search(nums, target,", "<= 1: if nums[low] > target: return low elif nums[hi] > target: return", "value, return the index if the target is found. If not, return the", "hi), hi) elif candidate > target: return binary_search(nums, target, low, midpoint(low, hi)) def", "class Solution: def searchInsert(self, nums, target): \"\"\" Given a sorted array and a", "return binary_search(nums, target, 0, len(nums) - 1) def binary_search(nums, target, low, hi): print(\"low\",", "midpoint(low, hi) + 1 if candidate == target: return midpoint(low, hi) elif candidate", "would be if it were inserted in order. You may assume no duplicates", "== target: return midpoint(low, hi) elif candidate < target: return binary_search(nums, target, midpoint(low,", "assume no duplicates in the array. \"\"\" return binary_search(nums, target, 0, len(nums) -", "inserted in order. You may assume no duplicates in the array. \"\"\" return", "elif candidate < target: return binary_search(nums, target, midpoint(low, hi), hi) elif candidate >", "def searchInsert(self, nums, target): \"\"\" Given a sorted array and a target value,", "found. If not, return the index where it would be if it were", "< target: return hi + 1 else: return midpoint(low, hi) + 1 if", "target: return midpoint(low, hi) elif candidate < target: return binary_search(nums, target, midpoint(low, hi),", "the index if the target is found. If not, return the index where", "< target: return binary_search(nums, target, midpoint(low, hi), hi) elif candidate > target: return", "def binary_search(nums, target, low, hi): print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low, hi)]", "hi) + 1 if candidate == target: return midpoint(low, hi) elif candidate <", "= nums[midpoint(low, hi)] if abs(hi - low) <= 1: if nums[low] > target:", "low, hi): print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low, hi)] if abs(hi -", "candidate = nums[midpoint(low, hi)] if abs(hi - low) <= 1: if nums[low] >", "binary_search(nums, target, 0, len(nums) - 1) def binary_search(nums, target, low, hi): print(\"low\", low,", "return midpoint(low, hi) elif nums[hi] < target: return hi + 1 else: return", "> target: return binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low, hi): return (low", "not, return the index where it would be if it were inserted in", "1: if nums[low] > target: return low elif nums[hi] > target: return midpoint(low,", "You may assume no duplicates in the array. \"\"\" return binary_search(nums, target, 0,", "+ 1 if candidate == target: return midpoint(low, hi) elif candidate < target:", "is found. If not, return the index where it would be if it", "the index where it would be if it were inserted in order. You", "- low) <= 1: if nums[low] > target: return low elif nums[hi] >", "+ 1 else: return midpoint(low, hi) + 1 if candidate == target: return", "hi) elif candidate > target: return binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low,", "candidate > target: return binary_search(nums, target, low, midpoint(low, hi)) def midpoint(low, hi): return", "nums[midpoint(low, hi)] if abs(hi - low) <= 1: if nums[low] > target: return", "low, \"high\", hi) candidate = nums[midpoint(low, hi)] if abs(hi - low) <= 1:", "if candidate == target: return midpoint(low, hi) elif candidate < target: return binary_search(nums,", "binary_search(nums, target, midpoint(low, hi), hi) elif candidate > target: return binary_search(nums, target, low,", "elif nums[hi] < target: return hi + 1 else: return midpoint(low, hi) +", "abs(hi - low) <= 1: if nums[low] > target: return low elif nums[hi]", "> target: return low elif nums[hi] > target: return midpoint(low, hi) elif nums[hi]", "target: return hi + 1 else: return midpoint(low, hi) + 1 if candidate", "return the index where it would be if it were inserted in order.", "the target is found. If not, return the index where it would be", "len(nums) - 1) def binary_search(nums, target, low, hi): print(\"low\", low, \"high\", hi) candidate", "index where it would be if it were inserted in order. You may", "print(\"low\", low, \"high\", hi) candidate = nums[midpoint(low, hi)] if abs(hi - low) <=", "order. You may assume no duplicates in the array. \"\"\" return binary_search(nums, target,", "target value, return the index if the target is found. If not, return", "target, midpoint(low, hi), hi) elif candidate > target: return binary_search(nums, target, low, midpoint(low,", "hi) candidate = nums[midpoint(low, hi)] if abs(hi - low) <= 1: if nums[low]" ]
[ "= [8,4,7,6,2,3,5,-2,-1,0,1,-6,-8,5,0,-9] print(mylist) for i in range(len(mylist)): if mylist[i]>0: mylist[i]=1 elif mylist[i]<0: mylist[i]=-1", "[8,4,7,6,2,3,5,-2,-1,0,1,-6,-8,5,0,-9] print(mylist) for i in range(len(mylist)): if mylist[i]>0: mylist[i]=1 elif mylist[i]<0: mylist[i]=-1 else:", "mylist = [8,4,7,6,2,3,5,-2,-1,0,1,-6,-8,5,0,-9] print(mylist) for i in range(len(mylist)): if mylist[i]>0: mylist[i]=1 elif mylist[i]<0:", "<reponame>gptakhil/Python_Practice_Beginner mylist = [8,4,7,6,2,3,5,-2,-1,0,1,-6,-8,5,0,-9] print(mylist) for i in range(len(mylist)): if mylist[i]>0: mylist[i]=1 elif", "for i in range(len(mylist)): if mylist[i]>0: mylist[i]=1 elif mylist[i]<0: mylist[i]=-1 else: mylist[i]=0 print(mylist)", "print(mylist) for i in range(len(mylist)): if mylist[i]>0: mylist[i]=1 elif mylist[i]<0: mylist[i]=-1 else: mylist[i]=0" ]
[ "primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='', max_length=200)), ('year', models.DateField(default=datetime.date.today)), ], ),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ]", "Generated by Django 2.0.6 on 2019-02-21 17:52 import datetime from django.db import migrations,", "'0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "2019-02-21 17:52 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "2.0.6 on 2019-02-21 17:52 import datetime from django.db import migrations, models class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car',", "on 2019-02-21 17:52 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies", "by Django 2.0.6 on 2019-02-21 17:52 import datetime from django.db import migrations, models", "Django 2.0.6 on 2019-02-21 17:52 import datetime from django.db import migrations, models class", "[ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model',", "migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='',", "dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id',", "operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='',", "Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car', fields=[", "migrations, models class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations = [", "[ ('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations =", "datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'),", "('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='', max_length=200)),", "17:52 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "= [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='', max_length=200)), ('year',", "# Generated by Django 2.0.6 on 2019-02-21 17:52 import datetime from django.db import", "models class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel(", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='', max_length=200)), ('year', models.DateField(default=datetime.date.today)),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='', max_length=200)), ('year', models.DateField(default=datetime.date.today)), ],", "] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('make',", "serialize=False, verbose_name='ID')), ('make', models.CharField(default='', max_length=200)), ('model', models.CharField(default='', max_length=200)), ('year', models.DateField(default=datetime.date.today)), ], ), ]", "= [ ('cwApp', '0002_auto_20190221_1735'), ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True,", "<filename>cwProject/cwApp/migrations/0003_car.py<gh_stars>0 # Generated by Django 2.0.6 on 2019-02-21 17:52 import datetime from django.db", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cwApp', '0002_auto_20190221_1735'), ] operations", "import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cwApp'," ]
[ "dash_rq_demo.core import conn if __name__ == \"__main__\": with app.server.app_context(): with Connection(conn): w =", "rq import Connection, Worker from dash_rq_demo import app, queue from dash_rq_demo.core import conn", "from dash_rq_demo import app, queue from dash_rq_demo.core import conn if __name__ == \"__main__\":", "import conn if __name__ == \"__main__\": with app.server.app_context(): with Connection(conn): w = Worker([queue])", "dash_rq_demo import app, queue from dash_rq_demo.core import conn if __name__ == \"__main__\": with", "import app, queue from dash_rq_demo.core import conn if __name__ == \"__main__\": with app.server.app_context():", "from rq import Connection, Worker from dash_rq_demo import app, queue from dash_rq_demo.core import", "<reponame>mdylan2/propertyfinderscraper from rq import Connection, Worker from dash_rq_demo import app, queue from dash_rq_demo.core", "import Connection, Worker from dash_rq_demo import app, queue from dash_rq_demo.core import conn if", "queue from dash_rq_demo.core import conn if __name__ == \"__main__\": with app.server.app_context(): with Connection(conn):", "conn if __name__ == \"__main__\": with app.server.app_context(): with Connection(conn): w = Worker([queue]) w.work()", "app, queue from dash_rq_demo.core import conn if __name__ == \"__main__\": with app.server.app_context(): with", "from dash_rq_demo.core import conn if __name__ == \"__main__\": with app.server.app_context(): with Connection(conn): w", "Connection, Worker from dash_rq_demo import app, queue from dash_rq_demo.core import conn if __name__", "Worker from dash_rq_demo import app, queue from dash_rq_demo.core import conn if __name__ ==" ]
[ "# Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format':", "= \"tfrecord\", train_pct: float = 1.0, ): \"\"\" Creates TFRecord files corresponding to", "files corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"]", "ID (i.e. the label's line number). So a labels file like so: cat", "annotation = image_annotations[image_file_name] # get the image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir,", "args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input", "region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords)", "file names and subsets of file IDs if train_pct < 1.0: # get", "required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str,", ":param tfrecord_dir: directory where the output TFRecord files will be written :param num_shards:", "used as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory where", "when converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of", "requested if combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name),", "mask array for each mask region if not combine_into_one: # allocate memory for", "args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes into a single mask file\", )", "tool :param masks_dir: directory where PNG mask files will be written :param class_labels_file:", "the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined", "------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one: bool", "training subset \" \"(validation subset will equal 1.0 - train_pct), if 1.0 then", "as the key image_annotations[data[\"filename\"]] = data # get a dictionary of class labels", "# write the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") #", "class_labels_file: str, combine_into_one: bool = False, ): \"\"\" Creates mask files from annotations", "into a single mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of", "region[\"region_attributes\"] # find the class ID corresponding to the region's class attribute class_label", "to all file IDs if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix", "= region[\"region_attributes\"] # find the class ID corresponding to the region's class attribute", "= {} # loop over the file ID and annotations themselves (values) for", "image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the image image_file_name =", "number of samples in each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\")", "= \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping", "f'shard {args[\"shard_id\"]}') # read the image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path =", "an 80% training and 20% validation split: $ python mask.py --images /data/lesions/images \\", "with an 80% training and 20% validation split: $ python mask.py --images /data/lesions/images", "tool. :param images_dir: directory containing JPG image files :param annotations_file : annotation file", "if not combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name),", "the VIA tool) and initialize the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations =", "format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks to use for", "TFRecord files # based on the presence of a specified file base name", "per shard num_images: total number of images in dataset file_ids: file IDs for", "the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord creation", "\"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\":", "creating masks from VIA annotations: $ python mask.py --in_format vgg \\ --images /data/images", "args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx,", "to directory containing input image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to", "mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse the command line arguments", "split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number of", "# write a combined mask file, if requested if combine_into_one: # write the", "mask file # then we'll only need to allocate the mask array once", "train/valid split)\") # create an iterable of arguments that will be mapped to", "to concurrent future processes args_iterable = [] for base_name, file_ids in split_names_to_ids.items(): num_images", "the set of file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, } # report", "Creates TFRecord files corresponding to a dataset of JPG images with corresponding set", "output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the class labels file", "int = 1, dataset_base_name: str = \"tfrecord\", train_pct: float = 1.0, ): \"\"\"", "for training, with (1.0 minus this value as the validation percentage), if this", "TFRecord files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] ==", "_bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'),", "then no split will occur \"\"\" masks_ext = \".png\" images_ext = \".jpg\" file_ids", "of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output", "mask region if not combine_into_one: # allocate memory for the region mask region_mask", "if requested if combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir,", "if not combining all regions into a single mask file then # we'll", "the training subset \" \"(validation subset will equal 1.0 - train_pct), if 1.0", "np.zeros((height, width, 3), dtype=\"uint8\") # grab the shape and region attributes shape_attributes =", "= shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords = [[x, y]", "the VGG Image Annotator (VIA) tool. :param images_dir: directory containing JPG image files", "args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords(", "width, 3), dtype=\"uint8\") # loop over each of the annotated regions for (i,", "raise RuntimeError('Shape mismatched between image and mask.') # Convert to tf example. example", "files :param annotations_file : annotation file containing segmentation (mask) regions, expected to be", "dog panda will result in a dictionary like so: { \"cat\": 1, \"dog\":", "with corresponding set PNG masks. :param images_dir: directory containing image files :param masks_dir:", "the path of the TFRecord file to be written shard_id: shard ID (for", "collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, )", "regions for (i, region) in enumerate(annotation[\"regions\"]): # if not combining all regions into", "TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord creation function", "args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks to use for the training", "num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor", "_logger which will write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d", "{args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\":", "= os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ =", "containing image files masks_dir: directory containing mask files corresponding to the images \"\"\"", "np.zeros((height, width, 3), dtype=\"uint8\") # loop over each of the annotated regions for", "image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path,", "corresponding to a dataset of JPG images with corresponding set PNG masks. :param", "+ \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get", "region_attributes = region[\"region_attributes\"] # find the class ID corresponding to the region's class", "file IDs if train_pct < 1.0: # get the correct file name prefix", "mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining all masks into a", "file_id = os.path.splitext(image_file_name)[0] # grab the image info and then grab the annotation", "files to use when converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0,", "# then write this mask into its own file if not combine_into_one: #", "file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping of base", "= \".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) #", ") -> tf.train.Feature: \"\"\" Returns a TF-Feature of bytes. :param values a string", "in the region attributes -- label: {class_label}\", ) else: class_id = class_labels[class_label] #", "1.0 - train_pct), if 1.0 then \" \"no splitting will occur\", ) args_parser.add_argument(", "contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else: # we'll", "the class ID corresponding to the region's class attribute class_label = region_attributes[\"class\"] if", "images :param tfrecord_dir: directory where the output TFRecord files will be written :param", "# get the image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if", "name of the TFRecord files to be produced :param train_pct: the percentage of", ") args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of shard files to use when", "num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use", "annotations = json.loads(open(annotations_file).read()) image_annotations = {} # loop over the file ID and", "containing mask files corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx", "in class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1 return class_labels # ------------------------------------------------------------------------------ def", "int(len(file_ids) * train_pct) # map the file prefixes to the sets of file", "six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\"", "base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for", "# ------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of", "/data/masks For creating TFRecords from a masked dataset with an 80% training and", "be written \" \"(or found if used as an input)\", ) args_parser.add_argument( \"--tfrecords\",", "_logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir:", ":param args: dictionary containing the following function arguments: output_path: the path of the", "str, ) -> Dict: \"\"\" Reads a text file, which is assumed to", "tqdm from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up a basic,", "if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else:", "the points to (<# of coordinates>, 1, 2) pts = pts.reshape((-1, 1, 2))", "used in a segmentation dataset, with one class label per line :return: dictionary", "# report the number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid", "png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8 \"\"\"", "splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name of the", "num_shards)) for shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args =", "json import logging import math import os import random from typing import Dict", "files :param masks_dir: directory containing mask files corresponding to the images :param tfrecord_dir:", "in the JSON format created by the VGG Image Annotator tool :param masks_dir:", ") -> Dict: \"\"\" Reads a text file, which is assumed to contain", "args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory where mask files will be written", "image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions into a single mask file #", "coords] pts = np.array(poly_coords, np.int32) # reshape the points to (<# of coordinates>,", "line :param combine_into_one: if True then combine all mask regions for an image", "is assumed to contain one class label per line, and returns a dictionary", "shard num_images: total number of images in dataset file_ids: file IDs for image/mask", "x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords = [[x,", "required=False, type=str, help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\",", "datasets) num_per_shard: number of images/masks per shard num_images: total number of images in", "matching_ids # ------------------------------------------------------------------------------ # set up a basic, global _logger which will write", "Builds and writes a TFRecord with image and segmentation (mask) features. :param args:", "the filename as the key image_annotations[data[\"filename\"]] = data # get a dictionary of", "directory containing mask files corresponding to the images :param tfrecord_dir: directory where the", "choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of", "of images in dataset file_ids: file IDs for image/mask files images_dir: directory containing", "in tqdm(os.listdir(images_dir)): # skip any files without a *.jpg extension if not image_file_name.endswith(\".jpg\"):", "= pts.reshape((-1, 1, 2)) # draw the polygon mask, using the class ID", "# loop over each of the annotated regions for (i, region) in enumerate(annotation[\"regions\"]):", "images_dir: directory containing JPG image files :param annotations_file : annotation file containing segmentation", "in parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord creation function to", "_bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString())", "path: {annotations_file}\") # make the masks directory if it doesn't already exist os.makedirs(masks_dir,", "get the image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining", "mask files corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx =", "# using the VIA tool) and initialize the annotations dictionary annotations = json.loads(open(annotations_file).read())", "annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the class labels file listing", "into train/valid sets split_index = int(len(file_ids) * train_pct) # map the file prefixes", "cat dog panda will result in a dictionary like so: { \"cat\": 1,", "open(labels_path, \"r\") as class_labels_file: class_id = 1 for class_label in class_labels_file: class_labels[class_label.strip()] =", "= tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels':", "arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks(", "without a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab", "the mask array for each mask region if not combine_into_one: # allocate memory", "into a single mask file then # we'll need to reallocate the mask", "# read the semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\")", "label per line, and returns a dictionary with class labels as keys mapped", "and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the class", "= zip(x_coords, y_coords) poly_coords = [[x, y] for x, y in coords] pts", "if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise", "annotations file path: {annotations_file}\") # make the masks directory if it doesn't already", "_ = image_dimensions(image_path) # read the semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"],", "/data/via_annotations.json \\ --masks /data/masks For creating TFRecords from a masked dataset with an", "mask_file_name), region_mask) # write a combined mask file, if requested if combine_into_one: #", "from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up a basic, global", "of file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, } # report the number", "class labels file listing one class per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true',", "a ProcessPoolExecutor to facilitate creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor:", "(x, y)-coordinates for the region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"]", "regions into a single mask file # then we'll only need to allocate", ":param values a string :return TF-Feature of bytes \"\"\" def norm2bytes(value): return value.encode()", "will be written :param num_shards: number of shards :param dataset_base_name: base name of", "need to allocate the mask array once if combine_into_one: # allocate memory for", "files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip any files without a *.jpg extension", "/data/lesions/masks \\ --in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12 --", "(created # using the VIA tool) and initialize the annotations dictionary annotations =", "Dict, ): \"\"\" Builds and writes a TFRecord with image and segmentation (mask)", "a TF-Feature of bytes. :param values a string :return TF-Feature of bytes \"\"\"", "name mapped to all file IDs if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\"", "masks_dir: str, tfrecord_dir: str, num_shards: int = 1, dataset_base_name: str = \"tfrecord\", train_pct:", "\"png\", \"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"],", "Reads a text file, which is assumed to contain one class label per", "if this value is 1.0 then no split will occur \"\"\" masks_ext =", "------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\" Builds and writes a TFRecord with", ") args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False,", "grab the shape and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] #", "if __name__ == \"__main__\": \"\"\" Usage: For creating masks from VIA annotations: $", "= region_attributes[\"class\"] if class_label not in class_labels: raise ValueError( \"No corresponding class ID", "number of shards :param dataset_base_name: base name of the TFRecord files to be", "to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger =", "validation split: $ python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png", "vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one: bool = False,", "contents of the annotation JSON file (created # using the VIA tool) and", "class labels to ID values \"\"\" class_labels = {} with open(labels_path, \"r\") as", "output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir,", "# ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards: int =", "train_pct: the percentage of images/masks to use for training, with (1.0 minus this", "math import os import random from typing import Dict import cv2 import numpy", "the region's class attribute class_label = region_attributes[\"class\"] if class_label not in class_labels: raise", "python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png --out_format tfrecord \\", "values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) ->", "region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # grab the shape and region attributes", "return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\" Builds and writes", "and annotations themselves (values) for data in annotations.values(): # store the data in", "already exist os.makedirs(masks_dir, exist_ok=True) # load the contents of the annotation JSON file", "if 1.0 then \" \"no splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str,", "of samples in each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord", "class_id = 1 for class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1", "1.0, ): \"\"\" Creates TFRecord files corresponding to a dataset of JPG images", "directory if it doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load the contents of", "train/valid sets split_index = int(len(file_ids) * train_pct) # map the file prefixes to", "%(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str,", "IDs if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name #", "} :param labels_path: path to a file containing class labels used in a", "tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm from cvdata.utils", "type=str, help=\"path of the class labels file listing one class per line\", )", "--in_format vgg \\ --images /data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks For creating", "a dictionary of class labels to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask", "masks from VIA annotations: $ python mask.py --in_format vgg \\ --images /data/images \\", "\"dog\": 2, \"panda\": 3, } :param labels_path: path to a file containing class", "number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)\") # create", "so: { \"cat\": 1, \"dog\": 2, \"panda\": 3, } :param labels_path: path to", "= f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse the", "the dictionary using the filename as the key image_annotations[data[\"filename\"]] = data # get", "ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if", "annotations: $ python mask.py --in_format vgg \\ --images /data/images \\ --annotations /data/via_annotations.json \\", "containing mask files corresponding to the images :param tfrecord_dir: directory where the output", "tqdm import tqdm from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up", "class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip any", "for i in range(start_idx, end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard", "VGG Image Annotator tool :param masks_dir: directory where PNG mask files will be", "as the validation percentage), if this value is 1.0 then no split will", "arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory containing input", "base file names and subsets of file IDs if train_pct < 1.0: #", "prefixes to the sets of file IDs for the split sections split_names_to_ids =", "returns a dictionary with class labels as keys mapped to the class ID", "segmentation (mask) features. :param args: dictionary containing the following function arguments: output_path: the", "zip(x_coords, y_coords) poly_coords = [[x, y] for x, y in coords] pts =", "required=False, type=str, help=\"path to directory where TFRecord output files will be written\", )", "labels_path: path to a file containing class labels used in a segmentation dataset,", "value is 1.0 then no split will occur \"\"\" masks_ext = \".png\" images_ext", "if not combining all masks into a single file # then write this", "# map the TFRecord creation function to the iterable of arguments _logger.info(f\"Building TFRecords", "image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path) # read the semantic", "combine all mask regions for an image into a single mask file \"\"\"", "label: {class_label}\", ) else: class_id = class_labels[class_label] # get the array of (x,", "= f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined mask file, if requested", "as class_labels_file: class_id = 1 for class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id", "per line, and returns a dictionary with class labels as keys mapped to", "image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path) # read the", "Dict import cv2 import numpy as np import six import tensorflow as tf", "main(): # parse the command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True,", "be mapped to concurrent future processes args_iterable = [] for base_name, file_ids in", "\"\"\" masks_ext = \".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext))", "images in dataset file_ids: file IDs for image/mask files images_dir: directory containing image", "bytes. :param values a string :return TF-Feature of bytes \"\"\" def norm2bytes(value): return", "split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards):", "-> Dict: \"\"\" Reads a text file, which is assumed to contain one", "dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map the file prefixes", "executor: # map the TFRecord creation function to the iterable of arguments _logger.info(f\"Building", "argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory containing input image files\", )", "required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\",", "else: class_id = class_labels[class_label] # get the array of (x, y)-coordinates for the", "facilitate creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map the", "\"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], )", "a specified file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name", "mask array once if combine_into_one: # allocate memory for the region mask region_mask", "for class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1 return class_labels #", "TFRecord files corresponding to a dataset of JPG images with corresponding set PNG", "labels as keys mapped to the class ID (i.e. the label's line number).", "{ \"cat\": 1, \"dog\": 2, \"panda\": 3, } :param labels_path: path to a", "masks. :param images_dir: directory containing image files :param masks_dir: directory containing mask files", "found for the class label \" f\"found in the region attributes -- label:", "labels to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in", "'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def", "args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else:", "and returns a dictionary with class labels as keys mapped to the class", "+= 1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\"", "logging import math import os import random from typing import Dict import cv2", "with image and segmentation (mask) features. :param args: dictionary containing the following function", "elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\") # make the masks", ":param train_pct: the percentage of images/masks to use for training, with (1.0 minus", "will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name of the TFRecord", "random.shuffle(file_ids) # create a mapping of base file names and subsets of file", "shard ID (for multi-shard TFRecord datasets) num_per_shard: number of images/masks per shard num_images:", "= { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\":", "of arguments that will be mapped to concurrent future processes args_iterable = []", "= json.loads(open(annotations_file).read()) image_annotations = {} # loop over the file ID and annotations", "class_label not in class_labels: raise ValueError( \"No corresponding class ID found for the", "the file prefixes to the sets of file IDs for the split sections", "the image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all", "image files :param annotations_file : annotation file containing segmentation (mask) regions, expected to", "regions for an image into a single mask file \"\"\" # arguments validation", "{images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\") # make the", "tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" +", "class label per line :return: dictionary mapping class labels to ID values \"\"\"", "\"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"]", "samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else: # we'll just have one", "str, combine_into_one: bool = False, ): \"\"\" Creates mask files from annotations specified", "------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list.", "index to use for splitting into train/valid sets split_index = int(len(file_ids) * train_pct)", "files to be produced :param train_pct: the percentage of images/masks to use for", "array of (x, y)-coordinates for the region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords", "directory containing JPG image files :param annotations_file : annotation file containing segmentation (mask)", "mask file then # we'll need to reallocate the mask array for each", "i in range(start_idx, end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}')", "image files :param masks_dir: directory containing mask files corresponding to the images :param", "just have one base file name mapped to all file IDs if \"\"", "VIA annotations: $ python mask.py --in_format vgg \\ --images /data/images \\ --annotations /data/via_annotations.json", "_bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)),", "y in coords] pts = np.array(poly_coords, np.int32) # reshape the points to (<#", "# we'll need to reallocate the mask array for each mask region if", "\"\"\" Creates mask files from annotations specified in a JSON file exported from", "train_pct: float = 1.0, ): \"\"\" Creates TFRecord files corresponding to a dataset", "tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm from cvdata.utils import image_dimensions, matching_ids #", "# arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\") elif", "== \"__main__\": \"\"\" Usage: For creating masks from VIA annotations: $ python mask.py", "RuntimeError('Shape mismatched between image and mask.') # Convert to tf example. example =", "dataset contains {len(file_ids[split_index:])} validation samples\") else: # we'll just have one base file", "values: str, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of bytes. :param values", "number of images/masks per shard num_images: total number of images in dataset file_ids:", "name of the TFRecord files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\":", "tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number of samples in each", "the array of (x, y)-coordinates for the region's mask polygon x_coords = shape_attributes[\"all_points_x\"]", "if not isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature(", "to directory where mask files will be written \" \"(or found if used", ") args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument(", "containing the following function arguments: output_path: the path of the TFRecord file to", "the TFRecord file to be written shard_id: shard ID (for multi-shard TFRecord datasets)", "expected to be in the JSON format created by the VGG Image Annotator", "a dictionary like so: { \"cat\": 1, \"dog\": 2, \"panda\": 3, } :param", "mapping of base file names and subsets of file IDs if train_pct <", "input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage: For creating", "annotations_file : annotation file containing segmentation (mask) regions, expected to be in the", "\"(or found if used as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path", "image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab the image info and then grab", "1, 2) pts = pts.reshape((-1, 1, 2)) # draw the polygon mask, using", "import TFRecordWriter from tqdm import tqdm from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------", "[] for base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images /", "if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab the image info and", "splitting into train/valid sets split_index = int(len(file_ids) * train_pct) # map the file", "ID as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining all", "Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'),", "seg_width, seg_height, _ = image_dimensions(mask_path) if height != seg_height or width != seg_width:", "each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])}", "def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards: int = 1, dataset_base_name:", "class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1 return class_labels # ------------------------------------------------------------------------------", "= np.zeros((height, width, 3), dtype=\"uint8\") # grab the shape and region attributes shape_attributes", "directory where mask files will be written \" \"(or found if used as", "subset will equal 1.0 - train_pct), if 1.0 then \" \"no splitting will", "def _build_write_tfrecord( args: Dict, ): \"\"\" Builds and writes a TFRecord with image", "\\ --masks /data/masks For creating TFRecords from a masked dataset with an 80%", "written shard_id: shard ID (for multi-shard TFRecord datasets) num_per_shard: number of images/masks per", "tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids,", "images_ext)) random.shuffle(file_ids) # create a mapping of base file names and subsets of", "mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined mask file, if", "function arguments: output_path: the path of the TFRecord file to be written shard_id:", "file name prefix for the TFRecord files # based on the presence of", "not combining all regions into a single mask file then # we'll need", "where mask files will be written \" \"(or found if used as an", "to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks to", "creation function to the iterable of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \")", "dataset contains {len(file_ids)} samples (no train/valid split)\") # create an iterable of arguments", "containing class labels used in a segmentation dataset, with one class label per", "masks_dir: str, class_labels_file: str, combine_into_one: bool = False, ): \"\"\" Creates mask files", "of the annotated regions for (i, region) in enumerate(annotation[\"regions\"]): # if not combining", "load the contents of the annotation JSON file (created # using the VIA", "help=\"percentage of images/masks to use for the training subset \" \"(validation subset will", "dataset_base_name: str = \"tfrecord\", train_pct: float = 1.0, ): \"\"\" Creates TFRecord files", "if True then combine all mask regions for an image into a single", "unique image ID annotation = image_annotations[image_file_name] # get the image's dimensions width, height,", "of bytes \"\"\" def norm2bytes(value): return value.encode() if isinstance(value, str) and six.PY3 else", "1 for class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1 return class_labels", "the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx", "if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid =", "image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions", "to the images :param tfrecord_dir: directory where the output TFRecord files will be", "then write this mask into its own file if not combine_into_one: # write", "\"tfrecord\", train_pct: float = 1.0, ): \"\"\" Creates TFRecord files corresponding to a", ") tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\":", "creating TFRecords from a masked dataset with an 80% training and 20% validation", "JSON file exported from the VGG Image Annotator (VIA) tool. :param images_dir: directory", "+ \"_\" + dataset_base_name # get the split index to use for splitting", "print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the image", "Annotator tool :param masks_dir: directory where PNG mask files will be written :param", "region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the class ID corresponding to the region's", "# read the image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name)", "float = 1.0, ): \"\"\" Creates TFRecord files corresponding to a dataset of", ":return: \"\"\" if not isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------", "all regions/classes into a single mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int,", "the image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data =", "class labels as keys mapped to the class ID (i.e. the label's line", "file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate", "width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions into a", "args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the class labels file listing one class", "features. :param args: dictionary containing the following function arguments: output_path: the path of", "dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid", "the image info and then grab the annotation data for # the current", "or width != seg_width: raise RuntimeError('Shape mismatched between image and mask.') # Convert", "_ = image_dimensions(mask_path) if height != seg_height or width != seg_width: raise RuntimeError('Shape", "the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining all masks into", "(1.0 minus this value as the validation percentage), if this value is 1.0", "\"cat\": 1, \"dog\": 2, \"panda\": 3, } :param labels_path: path to a file", "of class labels to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for", ":param combine_into_one: if True then combine all mask regions for an image into", "region attributes -- label: {class_label}\", ) else: class_id = class_labels[class_label] # get the", "semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path,", "labels used in a segmentation dataset, with one class label per line :return:", "memory for the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # grab", "region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # grab the shape and", "train_pct) # map the file prefixes to the sets of file IDs for", "# the current image based on the unique image ID annotation = image_annotations[image_file_name]", "\") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str,", "class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature", "equal 1.0 - train_pct), if 1.0 then \" \"no splitting will occur\", )", "output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id,", "file \"\"\" # arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path:", "1.0 then \" \"no splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\",", "of the class labels file listing one class per line\", ) args_parser.add_argument( \"--combine\",", "string :return TF-Feature of bytes \"\"\" def norm2bytes(value): return value.encode() if isinstance(value, str)", "_int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list. :param values:", "'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format':", "------------------------------------------------------------------------------ def main(): # parse the command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument(", "class_labels = {} with open(labels_path, \"r\") as class_labels_file: class_id = 1 for class_label", "prefix for the TFRecord files # based on the presence of a specified", "file # then we'll only need to allocate the mask array once if", "annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of", "= args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for", "IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip", "args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"],", "draw the polygon mask, using the class ID as the mask value cv2.fillPoly(region_mask,", "\"--images\", required=True, type=str, help=\"path to directory containing input image files\", ) args_parser.add_argument( \"--masks\",", "written \" \"(or found if used as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False,", "masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate creating the TFRecords in", "use for splitting into train/valid sets split_index = int(len(file_ids) * train_pct) # map", "the validation percentage), if this value is 1.0 then no split will occur", "points to (<# of coordinates>, 1, 2) pts = pts.reshape((-1, 1, 2)) #", "\" f\"found in the region attributes -- label: {class_label}\", ) else: class_id =", "class label per line, and returns a dictionary with class labels as keys", "+ dataset_base_name # get the split index to use for splitting into train/valid", "into its own file if not combine_into_one: # write the mask file mask_file_name", "for image/mask files images_dir: directory containing image files masks_dir: directory containing mask files", "\"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate creating the TFRecords", "class_labels: raise ValueError( \"No corresponding class ID found for the class label \"", "'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded':", "/data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks For creating TFRecords from a masked", "writes a TFRecord with image and segmentation (mask) features. :param args: dictionary containing", "__name__ == \"__main__\": \"\"\" Usage: For creating masks from VIA annotations: $ python", "the annotation JSON file (created # using the VIA tool) and initialize the", "mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # loop over each of the", "import math import os import random from typing import Dict import cv2 import", "one class per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes into", "for splitting into train/valid sets split_index = int(len(file_ids) * train_pct) # map the", "required=False, type=str, default=\"\", help=\"base name of the TFRecord files\", ) args = vars(args_parser.parse_args())", "if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"]", "# loop over the file ID and annotations themselves (values) for data in", "single mask file then # we'll need to reallocate the mask array for", "region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # loop over each of", "y_coords) poly_coords = [[x, y] for x, y in coords] pts = np.array(poly_coords,", "_bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of bytes. :param", "command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory", "as tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm from cvdata.utils import", "Returns a TF-Feature of int64_list. :param values: :return: \"\"\" if not isinstance(values, collections.Iterable):", "# load the contents of the annotation JSON file (created # using the", "# if not combining all masks into a single file # then write", "import json import logging import math import os import random from typing import", "write a combined mask file, if requested if combine_into_one: # write the mask", "def main(): # parse the command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\",", "images/masks per shard num_images: total number of images in dataset file_ids: file IDs", "so: cat dog panda will result in a dictionary like so: { \"cat\":", "import six import tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import", "mask.') # Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name),", "= vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"],", "split will occur \"\"\" masks_ext = \".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir,", "image_annotations[image_file_name] # get the image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) #", "for the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # loop over", "TFRecord with image and segmentation (mask) features. :param args: dictionary containing the following", "if combine_into_one: # allocate memory for the region mask region_mask = np.zeros((height, width,", "output TFRecord files will be written :param num_shards: number of shards :param dataset_base_name:", "random from typing import Dict import cv2 import numpy as np import six", "str, masks_dir: str, tfrecord_dir: str, num_shards: int = 1, dataset_base_name: str = \"tfrecord\",", "== \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"],", "3), dtype=\"uint8\") # loop over each of the annotated regions for (i, region)", "a text file, which is assumed to contain one class label per line,", "its own file if not combine_into_one: # write the mask file mask_file_name =", "specified file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name !=", "collections import concurrent.futures import json import logging import math import os import random", "\".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping of", "combined mask file, if requested if combine_into_one: # write the mask file mask_file_name", "args: Dict, ): \"\"\" Builds and writes a TFRecord with image and segmentation", "will occur \"\"\" masks_ext = \".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir,", "to the class ID (i.e. the label's line number). So a labels file", "polygon mask, using the class ID as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3)", "loop over the file ID and annotations themselves (values) for data in annotations.values():", "with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] +", "to use for the training subset \" \"(validation subset will equal 1.0 -", "the class label \" f\"found in the region attributes -- label: {class_label}\", )", "be written :param num_shards: number of shards :param dataset_base_name: base name of the", "to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"]", "else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\"", "else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") #", "{tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir:", "= image_dimensions(image_path) # read the semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i]", "{i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the image image_file_name = args[\"file_ids\"][i]", "files will be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation file\",", "be written :param class_labels_file: text file containing one class label per line :param", "in a dictionary like so: { \"cat\": 1, \"dog\": 2, \"panda\": 3, }", "masks_dir: directory containing mask files corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as", "# grab the shape and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"]", "the presence of a specified file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid =", "str, masks_dir: str, class_labels_file: str, combine_into_one: bool = False, ): \"\"\" Creates mask", "six import tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm", "file listing one class per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all", "_int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str,", "to (<# of coordinates>, 1, 2) pts = pts.reshape((-1, 1, 2)) # draw", "TFRecord datasets) num_per_shard: number of images/masks per shard num_images: total number of images", "str, annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one: bool = False, ): \"\"\"", "subsets of file IDs if train_pct < 1.0: # get the correct file", "{len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else: # we'll just", "TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str,", "annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width,", "seg_width: raise RuntimeError('Shape mismatched between image and mask.') # Convert to tf example.", "# get the correct file name prefix for the TFRecord files # based", "file prefixes to the set of file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids,", "file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, } # report the number of", "shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords = [[x, y] for x, y in", "help=\"combine all regions/classes into a single mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1,", "seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if height != seg_height", "if isinstance(value, str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord(", "a combined mask file, if requested if combine_into_one: # write the mask file", "# map the file prefixes to the set of file IDs split_names_to_ids =", "annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {} # loop over the file", "typing import Dict import cv2 import numpy as np import six import tensorflow", "file like so: cat dog panda will result in a dictionary like so:", "end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx):", "the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # grab the shape", "file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined mask file,", "corresponding to the images :param tfrecord_dir: directory where the output TFRecord files will", "an image into a single mask file \"\"\" # arguments validation if not", "= class_labels[class_label] # get the array of (x, y)-coordinates for the region's mask", "\"\"\" Usage: For creating masks from VIA annotations: $ python mask.py --in_format vgg", "value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\" Builds and", "any files without a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0]", "\"' f'shard {args[\"shard_id\"]}') # read the image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path", "dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {} # loop over the file ID", "class per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes into a", "console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) #", "# if not combining all regions into a single mask file then #", "action='store_true', help=\"combine all regions/classes into a single mask file\", ) args_parser.add_argument( \"--shards\", required=False,", "create a mapping of base file names and subsets of file IDs if", "will result in a dictionary like so: { \"cat\": 1, \"dog\": 2, \"panda\":", "loop over each of the annotated regions for (i, region) in enumerate(annotation[\"regions\"]): #", "_ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions into a single mask", "\"r\") as class_labels_file: class_id = 1 for class_label in class_labels_file: class_labels[class_label.strip()] = class_id", "find the class ID corresponding to the region's class attribute class_label = region_attributes[\"class\"]", "of images/masks to use for training, with (1.0 minus this value as the", "class_labels_file: text file containing one class label per line :param combine_into_one: if True", "not combine_into_one: # allocate memory for the region mask region_mask = np.zeros((height, width,", "array for each mask region if not combine_into_one: # allocate memory for the", "+ \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _", "dataset, with one class label per line :return: dictionary mapping class labels to", "required=False, default=1, type=int, help=\"number of shard files to use when converting to TFRecord", "the class ID (i.e. the label's line number). So a labels file like", "a TF-Feature of int64_list. :param values: :return: \"\"\" if not isinstance(values, collections.Iterable): values", "ProcessPoolExecutor to facilitate creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor: #", "_class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\" Reads a text file, which is", "write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a", "to use for training, with (1.0 minus this value as the validation percentage),", "combining all masks into a single file # then write this mask into", "TF-Feature of bytes. :param values a string :return TF-Feature of bytes \"\"\" def", "image and segmentation (mask) features. :param args: dictionary containing the following function arguments:", "then grab the annotation data for # the current image based on the", "_logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else:", "a JSON file exported from the VGG Image Annotator (VIA) tool. :param images_dir:", "= {} with open(labels_path, \"r\") as class_labels_file: class_id = 1 for class_label in", "PNG mask files will be written :param class_labels_file: text file containing one class", "read the image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data", "per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes into a single", "f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse the command", "annotation file containing segmentation (mask) regions, expected to be in the JSON format", "that will be mapped to concurrent future processes args_iterable = [] for base_name,", "directory where PNG mask files will be written :param class_labels_file: text file containing", "file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input", "# map the file prefixes to the sets of file IDs for the", "file containing segmentation (mask) regions, expected to be in the JSON format created", "not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\") # make the masks directory", ") args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks to use for the", "args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"],", "tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"],", "a string :return TF-Feature of bytes \"\"\" def norm2bytes(value): return value.encode() if isinstance(value,", "1.0 then no split will occur \"\"\" masks_ext = \".png\" images_ext = \".jpg\"", "help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of", "to the iterable of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable)", "the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # loop over each", "1, dataset_base_name: str = \"tfrecord\", train_pct: float = 1.0, ): \"\"\" Creates TFRecord", "python mask.py --in_format vgg \\ --images /data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks", "the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {} # loop over the", "segmentation (mask) regions, expected to be in the JSON format created by the", "file_ids[split_index:], } # report the number of samples in each split section _logger.info(f\"TFRecord", "2) pts = pts.reshape((-1, 1, 2)) # draw the polygon mask, using the", "--images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords", "an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory where TFRecord output", "tqdm(os.listdir(images_dir)): # skip any files without a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue", "file if not combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir,", "in a segmentation dataset, with one class label per line :return: dictionary mapping", "split_names_to_ids = { tfrecord_file_prefix: file_ids, } # report the number of samples _logger.info(f\"TFRecord", "arguments: output_path: the path of the TFRecord file to be written shard_id: shard", "dtype=\"uint8\") # grab the shape and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes =", "base file name mapped to all file IDs if \"\" == dataset_base_name: tfrecord_file_prefix", "_logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)\") # create an iterable of", "info and then grab the annotation data for # the current image based", "\"_\" + dataset_base_name # get the split index to use for splitting into", "names and subsets of file IDs if train_pct < 1.0: # get the", "{} # loop over the file ID and annotations themselves (values) for data", "})) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards:", "\"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map the file prefixes to the set", "image based on the unique image ID annotation = image_annotations[image_file_name] # get the", "import argparse import collections import concurrent.futures import json import logging import math import", "def _class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\" Reads a text file, which", "training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else: # we'll just have", "written :param class_labels_file: text file containing one class label per line :param combine_into_one:", "'rb').read() width, height, _ = image_dimensions(image_path) # read the semantic segmentation annotation (mask)", "input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory where TFRecord output files", "and mask.') # Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename':", "_bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str,", "images with corresponding set PNG masks. :param images_dir: directory containing image files :param", "\"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid", "for each mask region if not combine_into_one: # allocate memory for the region", "section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\")", "of images/masks per shard num_images: total number of images in dataset file_ids: file", "dictionary like so: { \"cat\": 1, \"dog\": 2, \"panda\": 3, } :param labels_path:", "to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)):", "_build_write_tfrecord( args: Dict, ): \"\"\" Builds and writes a TFRecord with image and", "return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\" Returns", "path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\") # make", "$ python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png --out_format tfrecord", "percentage), if this value is 1.0 then no split will occur \"\"\" masks_ext", "tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name #", "\"--masks\", required=False, type=str, help=\"path to directory where mask files will be written \"", "For creating TFRecords from a masked dataset with an 80% training and 20%", "class_labels[class_label.strip()] = class_id class_id += 1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values,", "# ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\" Reads a text", "the VGG Image Annotator tool :param masks_dir: directory where PNG mask files will", "\"panda\": 3, } :param labels_path: path to a file containing class labels used", "\"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, }", "_class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip any files without", "if combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask)", "one class label per line :param combine_into_one: if True then combine all mask", "a single mask file \"\"\" # arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid", "args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory containing input image files\", ) args_parser.add_argument(", "args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])}", "the annotation data for # the current image based on the unique image", "args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of shard files to use when converting", "* args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>> Converting image {i +", "IDs if train_pct < 1.0: # get the correct file name prefix for", "Returns a TF-Feature of bytes. :param values a string :return TF-Feature of bytes", "the images :param tfrecord_dir: directory where the output TFRecord files will be written", "TFRecord output files will be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to", "data in annotations.values(): # store the data in the dictionary using the filename", "class labels used in a segmentation dataset, with one class label per line", "help=\"path to directory where TFRecord output files will be written\", ) args_parser.add_argument( \"--annotations\",", "= os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path) #", "output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__", "will equal 1.0 - train_pct), if 1.0 then \" \"no splitting will occur\",", "from typing import Dict import cv2 import numpy as np import six import", "ID (for multi-shard TFRecord datasets) num_per_shard: number of images/masks per shard num_images: total", "allocate the mask array once if combine_into_one: # allocate memory for the region", "future processes args_iterable = [] for base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids)", "exported from the VGG Image Annotator (VIA) tool. :param images_dir: directory containing JPG", "} # report the number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no", "args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if args[\"out_format\"] ==", "format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage: For creating masks", "keys mapped to the class ID (i.e. the label's line number). So a", "{annotations_file}\") # make the masks directory if it doesn't already exist os.makedirs(masks_dir, exist_ok=True)", "tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list. :param values: :return: \"\"\" if not", "'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir:", "for (i, region) in enumerate(annotation[\"regions\"]): # if not combining all regions into a", "reshape the points to (<# of coordinates>, 1, 2) pts = pts.reshape((-1, 1,", "ID found for the class label \" f\"found in the region attributes --", "class_labels_file: class_id = 1 for class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id +=", ") args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name of the TFRecord files\", )", "file prefixes to the sets of file IDs for the split sections split_names_to_ids", "%(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path:", "\"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map the", "format created by the VGG Image Annotator tool :param masks_dir: directory where PNG", "to reallocate the mask array for each mask region if not combine_into_one: #", "presence of a specified file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\"", "text file, which is assumed to contain one class label per line, and", "directory where TFRecord output files will be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str,", "sets of file IDs for the split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index],", "str, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of bytes. :param values a", "class ID as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining", "line, and returns a dictionary with class labels as keys mapped to the", ":param labels_path: path to a file containing class labels used in a segmentation", "output files will be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation", "- train_pct), if 1.0 then \" \"no splitting will occur\", ) args_parser.add_argument( \"--base_name\",", "samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)\") # create an iterable", "annotations themselves (values) for data in annotations.values(): # store the data in the", "help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the class", "file then # we'll need to reallocate the mask array for each mask", "default=False, action='store_true', help=\"combine all regions/classes into a single mask file\", ) args_parser.add_argument( \"--shards\",", "coords = zip(x_coords, y_coords) poly_coords = [[x, y] for x, y in coords]", "pts = pts.reshape((-1, 1, 2)) # draw the polygon mask, using the class", "the region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords,", "mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse", "split index to use for splitting into train/valid sets split_index = int(len(file_ids) *", "base name of the TFRecord files to be produced :param train_pct: the percentage", "end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the", "raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------", "like so: cat dog panda will result in a dictionary like so: {", "we'll just have one base file name mapped to all file IDs if", "images directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\")", "in dataset file_ids: file IDs for image/mask files images_dir: directory containing image files", "iterable of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------", "file, which is assumed to contain one class label per line, and returns", "mask.py --in_format vgg \\ --images /data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks For", "arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\") elif not", "Image Annotator (VIA) tool. :param images_dir: directory containing JPG image files :param annotations_file", "training, with (1.0 minus this value as the validation percentage), if this value", "get the split index to use for splitting into train/valid sets split_index =", "label per line :param combine_into_one: if True then combine all mask regions for", "file_ids: file IDs for image/mask files images_dir: directory containing image files masks_dir: directory", "masks into a single file # then write this mask into its own", "bytes \"\"\" def norm2bytes(value): return value.encode() if isinstance(value, str) and six.PY3 else value", "Creates mask files from annotations specified in a JSON file exported from the", "not combining all masks into a single file # then write this mask", "to directory where TFRecord output files will be written\", ) args_parser.add_argument( \"--annotations\", required=False,", "'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), }))", "continue file_id = os.path.splitext(image_file_name)[0] # grab the image info and then grab the", "pts = np.array(poly_coords, np.int32) # reshape the points to (<# of coordinates>, 1,", ") args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory where TFRecord output files will", "directory containing input image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory", "images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx =", "attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the class ID corresponding", "image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up a basic, global _logger which will", "shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args)", "x, y in coords] pts = np.array(poly_coords, np.int32) # reshape the points to", "function to the iterable of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord,", "np.int32) # reshape the points to (<# of coordinates>, 1, 2) pts =", "name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train =", "help=\"path to directory where mask files will be written \" \"(or found if", "TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks to use", "set up a basic, global _logger which will write to the console logging.basicConfig(", "tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path) # read the semantic segmentation annotation", "example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width),", "2)) # draw the polygon mask, using the class ID as the mask", "read the semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data", ":param images_dir: directory containing image files :param masks_dir: directory containing mask files corresponding", "in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename,", "the data in the dictionary using the filename as the key image_annotations[data[\"filename\"]] =", "{args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage: For creating masks from", "(mask) features. :param args: dictionary containing the following function arguments: output_path: the path", ") args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks(", ":param class_labels_file: text file containing one class label per line :param combine_into_one: if", "'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir:", "over the file ID and annotations themselves (values) for data in annotations.values(): #", "total number of images in dataset file_ids: file IDs for image/mask files images_dir:", "+ 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the image image_file_name = args[\"file_ids\"][i] +", "combining all regions into a single mask file then # we'll need to", "dataset with an 80% training and 20% validation split: $ python mask.py --images", "20% validation split: $ python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format", "of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def", "skip any files without a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id =", "\"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\"", "exist os.makedirs(masks_dir, exist_ok=True) # load the contents of the annotation JSON file (created", "which is assumed to contain one class label per line, and returns a", "file to be written shard_id: shard ID (for multi-shard TFRecord datasets) num_per_shard: number", "= int(len(file_ids) * train_pct) # map the file prefixes to the sets of", "= args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width,", "False, ): \"\"\" Creates mask files from annotations specified in a JSON file", "import Dict import cv2 import numpy as np import six import tensorflow as", "= image_annotations[image_file_name] # get the image's dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name))", "# write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write", "_logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse the command line arguments args_parser =", "help=\"path of the class labels file listing one class per line\", ) args_parser.add_argument(", "of base file names and subsets of file IDs if train_pct < 1.0:", "image_dimensions(mask_path) if height != seg_height or width != seg_width: raise RuntimeError('Shape mismatched between", "if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"],", "basic, global _logger which will write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s", "\"--annotations\", required=False, type=str, help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\",", "found if used as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to", ":param masks_dir: directory where PNG mask files will be written :param class_labels_file: text", "if height != seg_height or width != seg_width: raise RuntimeError('Shape mismatched between image", "else: # we'll just have one base file name mapped to all file", "coordinates>, 1, 2) pts = pts.reshape((-1, 1, 2)) # draw the polygon mask,", "\"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format", "raise ValueError(f\"Invalid images directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file", "= np.array(poly_coords, np.int32) # reshape the points to (<# of coordinates>, 1, 2)", "will be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation file\", )", "directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\") #", "# get the array of (x, y)-coordinates for the region's mask polygon x_coords", "{len(file_ids[split_index:])} validation samples\") else: # we'll just have one base file name mapped", "ID values \"\"\" class_labels = {} with open(labels_path, \"r\") as class_labels_file: class_id =", "images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate creating the", "into a single mask file \"\"\" # arguments validation if not os.path.exists(images_dir): raise", "have one base file name mapped to all file IDs if \"\" ==", "then \" \"no splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base", "# draw the polygon mask, using the class ID as the mask value", "3), dtype=\"uint8\") # grab the shape and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes", "to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format", "file containing one class label per line :param combine_into_one: if True then combine", "set PNG masks. :param images_dir: directory containing image files :param masks_dir: directory containing", "str, num_shards: int = 1, dataset_base_name: str = \"tfrecord\", train_pct: float = 1.0,", "tf.train.Feature: \"\"\" Returns a TF-Feature of bytes. :param values a string :return TF-Feature", "\\ --annotations /data/via_annotations.json \\ --masks /data/masks For creating TFRecords from a masked dataset", "images_dir: directory containing image files masks_dir: directory containing mask files corresponding to the", "args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\",", "files # based on the presence of a specified file base name tfrecord_file_prefix_train", "mask files will be written \" \"(or found if used as an input)\",", "f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined mask file, if requested if", "= logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\" Reads", "enumerate(annotation[\"regions\"]): # if not combining all regions into a single mask file then", "with (1.0 minus this value as the validation percentage), if this value is", "creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord", "args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"],", "as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining all masks", "be produced :param train_pct: the percentage of images/masks to use for training, with", "report the number of samples in each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])}", "shard_id: shard ID (for multi-shard TFRecord datasets) num_per_shard: number of images/masks per shard", "--masks /data/lesions/masks \\ --in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12", "tfrecord_dir: str, num_shards: int = 1, dataset_base_name: str = \"tfrecord\", train_pct: float =", "= [] for base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images", "class_id = class_labels[class_label] # get the array of (x, y)-coordinates for the region's", "ValueError(f\"Invalid annotations file path: {annotations_file}\") # make the masks directory if it doesn't", "split_index = int(len(file_ids) * train_pct) # map the file prefixes to the sets", "os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard,", "file IDs for the split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:],", "!= seg_width: raise RuntimeError('Shape mismatched between image and mask.') # Convert to tf", "\"(validation subset will equal 1.0 - train_pct), if 1.0 then \" \"no splitting", "mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png --out_format tfrecord \\ --tfrecords", "contain one class label per line, and returns a dictionary with class labels", "a labels file like so: cat dog panda will result in a dictionary", ") -> tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list. :param values: :return: \"\"\"", "which will write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\",", "validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\") elif not os.path.exists(annotations_file):", "in enumerate(annotation[\"regions\"]): # if not combining all regions into a single mask file", "\"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks to use for the training subset", "dataset file_ids: file IDs for image/mask files images_dir: directory containing image files masks_dir:", "f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images,", "type=str, help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\",", "import os import random from typing import Dict import cv2 import numpy as", "a single mask file # then we'll only need to allocate the mask", "== \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported", ":return TF-Feature of bytes \"\"\" def norm2bytes(value): return value.encode() if isinstance(value, str) and", "values \"\"\" class_labels = {} with open(labels_path, \"r\") as class_labels_file: class_id = 1", "-> tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list. :param values: :return: \"\"\" if", "type=str, help=\"path to directory where TFRecord output files will be written\", ) args_parser.add_argument(", "+ dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get the split", "map the file prefixes to the sets of file IDs for the split", "mask files will be written :param class_labels_file: text file containing one class label", "class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature(", "contains {len(file_ids)} samples (no train/valid split)\") # create an iterable of arguments that", "file ID and annotations themselves (values) for data in annotations.values(): # store the", "args: dictionary containing the following function arguments: output_path: the path of the TFRecord", "\"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\",", "mapped to the class ID (i.e. the label's line number). So a labels", "corresponding to the region's class attribute class_label = region_attributes[\"class\"] if class_label not in", "\"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if", "--masks /data/masks For creating TFRecords from a masked dataset with an 80% training", "if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map", "labels_path: str, ) -> Dict: \"\"\" Reads a text file, which is assumed", "= [[x, y] for x, y in coords] pts = np.array(poly_coords, np.int32) #", "for x, y in coords] pts = np.array(poly_coords, np.int32) # reshape the points", "to the set of file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, } #", "$ python mask.py --in_format vgg \\ --images /data/images \\ --annotations /data/via_annotations.json \\ --masks", "# we'll just have one base file name mapped to all file IDs", "in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id in", "the number of samples in each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training", "in a JSON file exported from the VGG Image Annotator (VIA) tool. :param", "converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage of images/masks", "minus this value as the validation percentage), if this value is 1.0 then", "attribute class_label = region_attributes[\"class\"] if class_label not in class_labels: raise ValueError( \"No corresponding", "/data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8 \"\"\" # run this module's main", "# find the class ID corresponding to the region's class attribute class_label =", "# allocate memory for the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\")", "(for multi-shard TFRecord datasets) num_per_shard: number of images/masks per shard num_images: total number", "processes args_iterable = [] for base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard", "be in the JSON format created by the VGG Image Annotator tool :param", "for data in annotations.values(): # store the data in the dictionary using the", "import numpy as np import six import tensorflow as tf from tensorflow.compat.v1.python_io import", "we'll need to reallocate the mask array for each mask region if not", "current image based on the unique image ID annotation = image_annotations[image_file_name] # get", "the following function arguments: output_path: the path of the TFRecord file to be", "files from annotations specified in a JSON file exported from the VGG Image", "it doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load the contents of the annotation", "file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of shard files to use", "use when converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float, help=\"percentage", "report the number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)\")", "str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict,", "to be produced :param train_pct: the percentage of images/masks to use for training,", "json.loads(open(annotations_file).read()) image_annotations = {} # loop over the file ID and annotations themselves", "args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory where TFRecord output files will be", "elif args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"],", "args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height,", "# then we'll only need to allocate the mask array once if combine_into_one:", "mask files from annotations specified in a JSON file exported from the VGG", "(<# of coordinates>, 1, 2) pts = pts.reshape((-1, 1, 2)) # draw the", "tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number of samples in each split section", "args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory containing input image", "numpy as np import six import tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter", "--in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8", "required=True, type=str, help=\"path to directory containing input image files\", ) args_parser.add_argument( \"--masks\", required=False,", "line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory containing", "image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path)", "= argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to directory containing input image files\",", "of the TFRecord file to be written shard_id: shard ID (for multi-shard TFRecord", "from tqdm import tqdm from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set", "to a dataset of JPG images with corresponding set PNG masks. :param images_dir:", "class ID found for the class label \" f\"found in the region attributes", "combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\")", "Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the image image_file_name", "TFRecord files to be produced :param train_pct: the percentage of images/masks to use", "80% training and 20% validation split: $ python mask.py --images /data/lesions/images \\ --masks", "{class_label}\", ) else: class_id = class_labels[class_label] # get the array of (x, y)-coordinates", "as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory where TFRecord", "type=int, help=\"number of shard files to use when converting to TFRecord format\", )", "= [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) -> tf.train.Feature:", "level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def", "write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger", ":param num_shards: number of shards :param dataset_base_name: base name of the TFRecord files", "{} with open(labels_path, \"r\") as class_labels_file: class_id = 1 for class_label in class_labels_file:", "[pts], color=[class_id]*3) # if not combining all masks into a single file #", "= 1.0, ): \"\"\" Creates TFRecord files corresponding to a dataset of JPG", "region_mask) # write a combined mask file, if requested if combine_into_one: # write", "to use when converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False, default=1.0, type=float,", "args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str,", "concurrent.futures import json import logging import math import os import random from typing", "for the region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords =", "to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height':", "for the class label \" f\"found in the region attributes -- label: {class_label}\",", "\"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the", "!= seg_height or width != seg_width: raise RuntimeError('Shape mismatched between image and mask.')", "default=\"\", help=\"base name of the TFRecord files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"]", "required=False, type=str, help=\"path of the class labels file listing one class per line\",", "all file IDs if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix =", "this value is 1.0 then no split will occur \"\"\" masks_ext = \".png\"", "= int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord',", "args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if", "seg_height or width != seg_width: raise RuntimeError('Shape mismatched between image and mask.') #", "of coordinates>, 1, 2) pts = pts.reshape((-1, 1, 2)) # draw the polygon", "is 1.0 then no split will occur \"\"\" masks_ext = \".png\" images_ext =", "= os.path.splitext(image_file_name)[0] # grab the image info and then grab the annotation data", "of shard files to use when converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\",", "split)\") # create an iterable of arguments that will be mapped to concurrent", "y] for x, y in coords] pts = np.array(poly_coords, np.int32) # reshape the", "JSON format created by the VGG Image Annotator tool :param masks_dir: directory where", "if it doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load the contents of the", "if train_pct < 1.0: # get the correct file name prefix for the", "file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number of samples in each split", "%H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) ->", "data # get a dictionary of class labels to class IDs class_labels =", "segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read()", "class ID (i.e. the label's line number). So a labels file like so:", "will write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", )", "get a dictionary of class labels to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating", "of int64_list. :param values: :return: \"\"\" if not isinstance(values, collections.Iterable): values = [values]", "For creating masks from VIA annotations: $ python mask.py --in_format vgg \\ --images", "--images /data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks For creating TFRecords from a", "datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, )", "segmentation dataset, with one class label per line :return: dictionary mapping class labels", "mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of shard files to", "mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _", "region) in enumerate(annotation[\"regions\"]): # if not combining all regions into a single mask", "(i.e. the label's line number). So a labels file like so: cat dog", "dictionary of class labels to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\")", "key image_annotations[data[\"filename\"]] = data # get a dictionary of class labels to class", "a segmentation dataset, with one class label per line :return: dictionary mapping class", "os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path: {annotations_file}\") # make the masks directory if", "the iterable of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) #", "occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name of the TFRecord files\",", "exist_ok=True) # load the contents of the annotation JSON file (created # using", "iterable of arguments that will be mapped to concurrent future processes args_iterable =", "values a string :return TF-Feature of bytes \"\"\" def norm2bytes(value): return value.encode() if", "= tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get the split index to use", "------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of", "and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ):", "------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards: int = 1,", "masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards: int = 1, dataset_base_name: str", "samples\") else: # we'll just have one base file name mapped to all", "and then grab the annotation data for # the current image based on", "based on the presence of a specified file base name tfrecord_file_prefix_train = \"train\"", "tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get the split index to use for", "os.makedirs(masks_dir, exist_ok=True) # load the contents of the annotation JSON file (created #", "up a basic, global _logger which will write to the console logging.basicConfig( level=logging.INFO,", "<reponame>edumotya/cvdata import argparse import collections import concurrent.futures import json import logging import math", "TFRecords from a masked dataset with an 80% training and 20% validation split:", "line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes into a single mask", "all regions into a single mask file # then we'll only need to", "masks_dir: directory where PNG mask files will be written :param class_labels_file: text file", "base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train", "\"--shards\", required=False, default=1, type=int, help=\"number of shard files to use when converting to", "a dictionary with class labels as keys mapped to the class ID (i.e.", "region attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the class ID", "for the split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } #", "created by the VGG Image Annotator tool :param masks_dir: directory where PNG mask", "file # then write this mask into its own file if not combine_into_one:", "mapped to all file IDs if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else:", "use for the training subset \" \"(validation subset will equal 1.0 - train_pct),", "(no train/valid split)\") # create an iterable of arguments that will be mapped", "image/mask files images_dir: directory containing image files masks_dir: directory containing mask files corresponding", "image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory where mask files", "# ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage: For creating masks from VIA", "_logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\"", "own file if not combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\"", "then we'll only need to allocate the mask array once if combine_into_one: #", "get the correct file name prefix for the TFRecord files # based on", "value as the validation percentage), if this value is 1.0 then no split", "number of images in dataset file_ids: file IDs for image/mask files images_dir: directory", "ValueError(f\"Invalid images directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations file path:", "# ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one:", "min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>> Converting", "corresponding set PNG masks. :param images_dir: directory containing image files :param masks_dir: directory", "type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False,", "regions, expected to be in the JSON format created by the VGG Image", "use for training, with (1.0 minus this value as the validation percentage), if", "all regions into a single mask file then # we'll need to reallocate", "to the region's class attribute class_label = region_attributes[\"class\"] if class_label not in class_labels:", "help=\"base name of the TFRecord files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"] ==", "mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip any files without a *.jpg", "annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one: bool = False, ): \"\"\" Creates", "create an iterable of arguments that will be mapped to concurrent future processes", "file containing class labels used in a segmentation dataset, with one class label", "parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord creation function to the", "a basic, global _logger which will write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s", "\"\"\" Returns a TF-Feature of bytes. :param values a string :return TF-Feature of", "files corresponding to the images :param tfrecord_dir: directory where the output TFRecord files", "= image_dimensions(mask_path) if height != seg_height or width != seg_width: raise RuntimeError('Shape mismatched", "# use a ProcessPoolExecutor to facilitate creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor()", "tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8 \"\"\" # run", "\"\"\" def norm2bytes(value): return value.encode() if isinstance(value, str) and six.PY3 else value return", "per line :param combine_into_one: if True then combine all mask regions for an", "seg_height, _ = image_dimensions(mask_path) if height != seg_height or width != seg_width: raise", "the contents of the annotation JSON file (created # using the VIA tool)", "example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width':", "samples in each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset", "dataset_base_name: base name of the TFRecord files to be produced :param train_pct: the", "of a specified file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if", "mask file \"\"\" # arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory", "\"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False,", "files masks_dir: directory containing mask files corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"])", "images/masks to use for training, with (1.0 minus this value as the validation", "\".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ =", "mask regions for an image into a single mask file \"\"\" # arguments", "shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords = [[x, y] for", ":param annotations_file : annotation file containing segmentation (mask) regions, expected to be in", "args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format: {args['out_format']}\")", "image_annotations = {} # loop over the file ID and annotations themselves (values)", "dictionary containing the following function arguments: output_path: the path of the TFRecord file", "region's class attribute class_label = region_attributes[\"class\"] if class_label not in class_labels: raise ValueError(", "range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\":", "the split index to use for splitting into train/valid sets split_index = int(len(file_ids)", "default=1.0, type=float, help=\"percentage of images/masks to use for the training subset \" \"(validation", "tfrecord_dir: directory where the output TFRecord files will be written :param num_shards: number", "import concurrent.futures import json import logging import math import os import random from", "logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\" Reads a", "if not combine_into_one: # allocate memory for the region mask region_mask = np.zeros((height,", "in class_labels: raise ValueError( \"No corresponding class ID found for the class label", "files without a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] #", "shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\":", "\"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get the", "ID annotation = image_annotations[image_file_name] # get the image's dimensions width, height, _ =", "file name mapped to all file IDs if \"\" == dataset_base_name: tfrecord_file_prefix =", "type=str, help=\"path to directory where mask files will be written \" \"(or found", "the file ID and annotations themselves (values) for data in annotations.values(): # store", "TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1)", "use a ProcessPoolExecutor to facilitate creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as", "per line :return: dictionary mapping class labels to ID values \"\"\" class_labels =", ":return: dictionary mapping class labels to ID values \"\"\" class_labels = {} with", "2, \"panda\": 3, } :param labels_path: path to a file containing class labels", "the unique image ID annotation = image_annotations[image_file_name] # get the image's dimensions width,", "we'll only need to allocate the mask array once if combine_into_one: # allocate", "tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if height != seg_height or width", "import collections import concurrent.futures import json import logging import math import os import", "files will be written :param class_labels_file: text file containing one class label per", "_int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords(", "# get the split index to use for splitting into train/valid sets split_index", "file IDs for image/mask files images_dir: directory containing image files masks_dir: directory containing", "values: :return: \"\"\" if not isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) #", "the class labels file listing one class per line\", ) args_parser.add_argument( \"--combine\", default=False,", "split: $ python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png --out_format", "\"no splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name of", "sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number", "import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up a basic, global _logger which", "masks_ext = \".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids)", "from a masked dataset with an 80% training and 20% validation split: $", "using the filename as the key image_annotations[data[\"filename\"]] = data # get a dictionary", "initialize the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {} # loop over", "width, 3), dtype=\"uint8\") # grab the shape and region attributes shape_attributes = region[\"shape_attributes\"]", "= 1 for class_label in class_labels_file: class_labels[class_label.strip()] = class_id class_id += 1 return", "def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one: bool =", "os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path)", "contains {len(file_ids[split_index:])} validation samples\") else: # we'll just have one base file name", "np.array(poly_coords, np.int32) # reshape the points to (<# of coordinates>, 1, 2) pts", "masked dataset with an 80% training and 20% validation split: $ python mask.py", "file exported from the VGG Image Annotator (VIA) tool. :param images_dir: directory containing", "def _bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of bytes.", ") elif args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"],", "# store the data in the dictionary using the filename as the key", "label per line :return: dictionary mapping class labels to ID values \"\"\" class_labels", "import cv2 import numpy as np import six import tensorflow as tf from", "{args[\"shard_id\"]}') # read the image image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"],", "annotation data for # the current image based on the unique image ID", "[[x, y] for x, y in coords] pts = np.array(poly_coords, np.int32) # reshape", "an iterable of arguments that will be mapped to concurrent future processes args_iterable", "write this mask into its own file if not combine_into_one: # write the", "containing input image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory where", "'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir:", "from annotations specified in a JSON file exported from the VGG Image Annotator", "\"--tfrecords\", required=False, type=str, help=\"path to directory where TFRecord output files will be written\",", "for an image into a single mask file \"\"\" # arguments validation if", "will be written \" \"(or found if used as an input)\", ) args_parser.add_argument(", "1, 2)) # draw the polygon mask, using the class ID as the", "single mask file \"\"\" # arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images", "images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping of base file names and", "tool) and initialize the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {} #", "== \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], )", "memory for the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # loop", "input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\",", "region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # loop over each of the annotated", "== \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\":", "parse the command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path", "over each of the annotated regions for (i, region) in enumerate(annotation[\"regions\"]): # if", "to be in the JSON format created by the VGG Image Annotator tool", "training and 20% validation split: $ python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks", "dataset_base_name # get the split index to use for splitting into train/valid sets", "corresponding class ID found for the class label \" f\"found in the region", "tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm from cvdata.utils import image_dimensions,", "mapping class labels to ID values \"\"\" class_labels = {} with open(labels_path, \"r\")", "between image and mask.') # Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded':", "mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords", "path to a file containing class labels used in a segmentation dataset, with", "args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] ==", "allocate memory for the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") #", "JPG image files :param annotations_file : annotation file containing segmentation (mask) regions, expected", "# parse the command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str,", "} args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate creating the TFRecords in parallel", "files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\":", "Dict: \"\"\" Reads a text file, which is assumed to contain one class", "= dataset_base_name # map the file prefixes to the set of file IDs", "): \"\"\" Builds and writes a TFRecord with image and segmentation (mask) features.", "\\ --tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8 \"\"\" # run this", "of shards :param dataset_base_name: base name of the TFRecord files to be produced", "then # we'll need to reallocate the mask array for each mask region", "split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation", "percentage of images/masks to use for training, with (1.0 minus this value as", "the annotated regions for (i, region) in enumerate(annotation[\"regions\"]): # if not combining all", "not isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values:", "in each split section _logger.info(f\"TFRecord dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains", "one base file name mapped to all file IDs if \"\" == dataset_base_name:", "combine_into_one: bool = False, ): \"\"\" Creates mask files from annotations specified in", "TFRecordWriter from tqdm import tqdm from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ #", "extension if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab the image info", "--tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8 \"\"\" # run this module's", "os import random from typing import Dict import cv2 import numpy as np", "validation percentage), if this value is 1.0 then no split will occur \"\"\"", "arguments that will be mapped to concurrent future processes args_iterable = [] for", "argparse import collections import concurrent.futures import json import logging import math import os", "return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns a", "1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns", "\"\"\" # arguments validation if not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\")", "f\"found in the region attributes -- label: {class_label}\", ) else: class_id = class_labels[class_label]", "{ tfrecord_file_prefix: file_ids, } # report the number of samples _logger.info(f\"TFRecord dataset contains", "of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the class labels", "\" \"(validation subset will equal 1.0 - train_pct), if 1.0 then \" \"no", "if class_label not in class_labels: raise ValueError( \"No corresponding class ID found for", ":param masks_dir: directory containing mask files corresponding to the images :param tfrecord_dir: directory", "of the TFRecord files to be produced :param train_pct: the percentage of images/masks", "label's line number). So a labels file like so: cat dog panda will", "masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format:", "polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords =", "value.encode() if isinstance(value, str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def", "value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining all masks into a single", "\\ --images /data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks For creating TFRecords from", "train_pct), if 1.0 then \" \"no splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False,", "in the dictionary using the filename as the key image_annotations[data[\"filename\"]] = data #", "raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage:", "'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) #", "bool = False, ): \"\"\" Creates mask files from annotations specified in a", "correct file name prefix for the TFRecord files # based on the presence", "class_labels[class_label] # get the array of (x, y)-coordinates for the region's mask polygon", "dictionary mapping class labels to ID values \"\"\" class_labels = {} with open(labels_path,", "tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\":", "num_shards: number of shards :param dataset_base_name: base name of the TFRecord files to", "= list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping of base file", "+ \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if height", "y_coords = shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords = [[x, y] for x,", "for shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = {", "\"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\"", "region if not combine_into_one: # allocate memory for the region mask region_mask =", "global _logger which will write to the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\",", "args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"'", "# ------------------------------------------------------------------------------ def main(): # parse the command line arguments args_parser = argparse.ArgumentParser()", "and 20% validation split: $ python mask.py --images /data/lesions/images \\ --masks /data/lesions/masks \\", "= \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train +", "\"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output", "in coords] pts = np.array(poly_coords, np.int32) # reshape the points to (<# of", "from VIA annotations: $ python mask.py --in_format vgg \\ --images /data/images \\ --annotations", "of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)\") # create an", "region_attributes[\"class\"] if class_label not in class_labels: raise ValueError( \"No corresponding class ID found", "height, _ = image_dimensions(image_path) # read the semantic segmentation annotation (mask) mask_path =", "1, \"dog\": 2, \"panda\": 3, } :param labels_path: path to a file containing", "\".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create", "# reshape the points to (<# of coordinates>, 1, 2) pts = pts.reshape((-1,", "start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"])", "one class label per line, and returns a dictionary with class labels as", "= np.zeros((height, width, 3), dtype=\"uint8\") # loop over each of the annotated regions", ":param images_dir: directory containing JPG image files :param annotations_file : annotation file containing", "a TFRecord with image and segmentation (mask) features. :param args: dictionary containing the", "store the data in the dictionary using the filename as the key image_annotations[data[\"filename\"]]", "reallocate the mask array for each mask region if not combine_into_one: # allocate", "images_dir: directory containing image files :param masks_dir: directory containing mask files corresponding to", "str, tfrecord_dir: str, num_shards: int = 1, dataset_base_name: str = \"tfrecord\", train_pct: float", "a file containing class labels used in a segmentation dataset, with one class", "TFRecord creation function to the iterable of arguments _logger.info(f\"Building TFRecords in directory {tfrecord_dir}", "corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer: start_idx = args[\"shard_id\"] *", "containing JPG image files :param annotations_file : annotation file containing segmentation (mask) regions,", "containing one class label per line :param combine_into_one: if True then combine all", "the output TFRecord files will be written :param num_shards: number of shards :param", "# create an iterable of arguments that will be mapped to concurrent future", "Image Annotator tool :param masks_dir: directory where PNG mask files will be written", "not os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid", "the label's line number). So a labels file like so: cat dog panda", "------------------------------------------------------------------------------ # set up a basic, global _logger which will write to the", "map the file prefixes to the set of file IDs split_names_to_ids = {", "# based on the presence of a specified file base name tfrecord_file_prefix_train =", "else: tfrecord_file_prefix = dataset_base_name # map the file prefixes to the set of", "\\ --shards 12 -- train_pct 0.8 \"\"\" # run this module's main function", "class label \" f\"found in the region attributes -- label: {class_label}\", ) else:", "/data/lesions/images \\ --masks /data/lesions/masks \\ --in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\", "poly_coords = [[x, y] for x, y in coords] pts = np.array(poly_coords, np.int32)", "mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main():", "filename as the key image_annotations[data[\"filename\"]] = data # get a dictionary of class", "\"--classes\", required=False, type=str, help=\"path of the class labels file listing one class per", "type=str, help=\"path to directory containing input image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str,", "in annotations.values(): # store the data in the dictionary using the filename as", "a mapping of base file names and subsets of file IDs if train_pct", "\"No corresponding class ID found for the class label \" f\"found in the", "all masks into a single file # then write this mask into its", "\"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) #", "\"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument(", "num_per_shard: number of images/masks per shard num_images: total number of images in dataset", "file (created # using the VIA tool) and initialize the annotations dictionary annotations", "tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\" Builds and writes a", "the TFRecord files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"]", "isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str,", "= image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions into a single mask file", "labels file like so: cat dog panda will result in a dictionary like", "the correct file name prefix for the TFRecord files # based on the", "annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", )", "[values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\"", "text file containing one class label per line :param combine_into_one: if True then", "ValueError( \"No corresponding class ID found for the class label \" f\"found in", "of (x, y)-coordinates for the region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords =", "\" \"(or found if used as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str,", "\"\"\" Returns a TF-Feature of int64_list. :param values: :return: \"\"\" if not isinstance(values,", "logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------", "following function arguments: output_path: the path of the TFRecord file to be written", "------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) -> Dict: \"\"\" Reads a text file,", "as tfrecord_writer: start_idx = args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) *", ") args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path of the class labels file listing one", "combining all regions into a single mask file # then we'll only need", "the command line arguments args_parser = argparse.ArgumentParser() args_parser.add_argument( \"--images\", required=True, type=str, help=\"path to", "prefixes to the set of file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, }", "combine_into_one: if True then combine all mask regions for an image into a", "tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\" Returns a", "= os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args = { \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\":", "args_iterable = [] for base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard =", "cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined mask file, if requested if combine_into_one:", "import logging import math import os import random from typing import Dict import", "class attribute class_label = region_attributes[\"class\"] if class_label not in class_labels: raise ValueError( \"No", "class label per line :param combine_into_one: if True then combine all mask regions", "grab the annotation data for # the current image based on the unique", "So a labels file like so: cat dog panda will result in a", "image_dimensions(image_path) # read the semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] +", "help=\"path to directory containing input image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path", "norm2bytes(value): return value.encode() if isinstance(value, str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) #", "concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord creation function to the iterable of", "\"\"\" Reads a text file, which is assumed to contain one class label", "isinstance(value, str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args:", "tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards: int", "a single mask file then # we'll need to reallocate the mask array", "\"\"\" class_labels = {} with open(labels_path, \"r\") as class_labels_file: class_id = 1 for", "# grab the image info and then grab the annotation data for #", "mapped to concurrent future processes args_iterable = [] for base_name, file_ids in split_names_to_ids.items():", "write the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------", "3, } :param labels_path: path to a file containing class labels used in", "masks directory if it doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load the contents", "label \" f\"found in the region attributes -- label: {class_label}\", ) else: class_id", "str = \"tfrecord\", train_pct: float = 1.0, ): \"\"\" Creates TFRecord files corresponding", "Annotator (VIA) tool. :param images_dir: directory containing JPG image files :param annotations_file :", "then combine all mask regions for an image into a single mask file", "shards :param dataset_base_name: base name of the TFRecord files to be produced :param", "class ID corresponding to the region's class attribute class_label = region_attributes[\"class\"] if class_label", "specified in a JSON file exported from the VGG Image Annotator (VIA) tool.", "of JPG images with corresponding set PNG masks. :param images_dir: directory containing image", "tfrecord_file_prefix: file_ids, } # report the number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)}", "= len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards): output_filename =", "using the VIA tool) and initialize the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations", "output_path: the path of the TFRecord file to be written shard_id: shard ID", "!= \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid +", ") args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory where mask files will be", "os.path.exists(images_dir): raise ValueError(f\"Invalid images directory path: {images_dir}\") elif not os.path.exists(annotations_file): raise ValueError(f\"Invalid annotations", "= class_id class_id += 1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, )", "directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str,", "only need to allocate the mask array once if combine_into_one: # allocate memory", "args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported", "file path: {annotations_file}\") # make the masks directory if it doesn't already exist", "the polygon mask, using the class ID as the mask value cv2.fillPoly(region_mask, [pts],", "type=str, default=\"\", help=\"base name of the TFRecord files\", ) args = vars(args_parser.parse_args()) if", "vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if args[\"out_format\"]", "_logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip any files without a", "for base_name, file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards))", "ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage: For", "------------------------------------------------------------------------------ if __name__ == \"__main__\": \"\"\" Usage: For creating masks from VIA annotations:", "values, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list. :param values: :return:", "-> tf.train.Feature: \"\"\" Returns a TF-Feature of bytes. :param values a string :return", "annotations.values(): # store the data in the dictionary using the filename as the", "image files masks_dir: directory containing mask files corresponding to the images \"\"\" with", "single mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of shard files", "to use for splitting into train/valid sets split_index = int(len(file_ids) * train_pct) #", "dtype=\"uint8\") # loop over each of the annotated regions for (i, region) in", "\"--combine\", default=False, action='store_true', help=\"combine all regions/classes into a single mask file\", ) args_parser.add_argument(", "/ num_shards)) for shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', ) tfrecord_writing_args", "masks_dir: directory containing mask files corresponding to the images :param tfrecord_dir: directory where", ") args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes into a single mask file\",", "sets split_index = int(len(file_ids) * train_pct) # map the file prefixes to the", "images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a", "args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate creating the TFRecords in parallel with", "dictionary using the filename as the key image_annotations[data[\"filename\"]] = data # get a", "dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get the split index", "like so: { \"cat\": 1, \"dog\": 2, \"panda\": 3, } :param labels_path: path", "based on the unique image ID annotation = image_annotations[image_file_name] # get the image's", "os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path) # read", "the TFRecord files # based on the presence of a specified file base", "where TFRecord output files will be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path", "image_file_name)) # if combining all regions into a single mask file # then", "\"\"\" Builds and writes a TFRecord with image and segmentation (mask) features. :param", "# ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\" Builds and writes a TFRecord", "a single mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number of shard", "files corresponding to a dataset of JPG images with corresponding set PNG masks.", "file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\":", "containing image files :param masks_dir: directory containing mask files corresponding to the images", "args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\", )", "attributes -- label: {class_label}\", ) else: class_id = class_labels[class_label] # get the array", "{len(file_ids)} samples (no train/valid split)\") # create an iterable of arguments that will", "mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) # write a combined mask", "directory containing image files masks_dir: directory containing mask files corresponding to the images", "single file # then write this mask into its own file if not", "of file IDs if train_pct < 1.0: # get the correct file name", "image ID annotation = image_annotations[image_file_name] # get the image's dimensions width, height, _", ":param values: :return: \"\"\" if not isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values))", "): \"\"\" Creates mask files from annotations specified in a JSON file exported", "JSON file (created # using the VIA tool) and initialize the annotations dictionary", "else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------ def _build_write_tfrecord( args: Dict, ): \"\"\" Builds", "num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir,", "once if combine_into_one: # allocate memory for the region mask region_mask = np.zeros((height,", "if used as an input)\", ) args_parser.add_argument( \"--tfrecords\", required=False, type=str, help=\"path to directory", "+ 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>> Converting image", "raise ValueError(f\"Invalid annotations file path: {annotations_file}\") # make the masks directory if it", "where the output TFRecord files will be written :param num_shards: number of shards", "and segmentation (mask) features. :param args: dictionary containing the following function arguments: output_path:", "the percentage of images/masks to use for training, with (1.0 minus this value", "mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # grab the shape and region", "to contain one class label per line, and returns a dictionary with class", "str, class_labels_file: str, combine_into_one: bool = False, ): \"\"\" Creates mask files from", "height != seg_height or width != seg_width: raise RuntimeError('Shape mismatched between image and", "= \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" + dataset_base_name", "help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"],", "args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise", "-- label: {class_label}\", ) else: class_id = class_labels[class_label] # get the array of", ") args_parser.add_argument( \"--in_format\", required=False, type=str, choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\",", "single mask file # then we'll only need to allocate the mask array", "as np import six import tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter from", "the key image_annotations[data[\"filename\"]] = data # get a dictionary of class labels to", "num_shards: int = 1, dataset_base_name: str = \"tfrecord\", train_pct: float = 1.0, ):", "mask file, if requested if combine_into_one: # write the mask file mask_file_name =", "import tqdm from cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up a", "to a file containing class labels used in a segmentation dataset, with one", "TFRecord file to be written shard_id: shard ID (for multi-shard TFRecord datasets) num_per_shard:", "the TFRecord files to be produced :param train_pct: the percentage of images/masks to", "of bytes. :param values a string :return TF-Feature of bytes \"\"\" def norm2bytes(value):", "the region attributes -- label: {class_label}\", ) else: class_id = class_labels[class_label] # get", "to allocate the mask array once if combine_into_one: # allocate memory for the", "need to reallocate the mask array for each mask region if not combine_into_one:", "(values) for data in annotations.values(): # store the data in the dictionary using", "vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"],", "= 1, dataset_base_name: str = \"tfrecord\", train_pct: float = 1.0, ): \"\"\" Creates", "(_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------ def masked_dataset_to_tfrecords( images_dir: str, masks_dir: str,", "not combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask)", "choices=[\"coco\", \"openimages\", \"png\", \"vgg\"], help=\"format of input annotations\", ) args_parser.add_argument( \"--out_format\", required=False, type=str,", "file IDs if \"\" == dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name", "format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids(", "of file IDs for the split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid:", "tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3),", "the sets of file IDs for the split sections split_names_to_ids = { tfrecord_file_prefix_train:", "args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"], args[\"train_pct\"], ) else: raise", "\".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if height !=", "on the presence of a specified file base name tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid", "in directory {tfrecord_dir} \") executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file:", "executor.map(_build_write_tfrecord, args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str, class_labels_file:", "== dataset_base_name: tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map the file", "a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab the", "TF-Feature of int64_list. :param values: :return: \"\"\" if not isinstance(values, collections.Iterable): values =", "required=False, type=str, help=\"path to directory where mask files will be written \" \"(or", "type=float, help=\"percentage of images/masks to use for the training subset \" \"(validation subset", "# create a mapping of base file names and subsets of file IDs", "range(start_idx, end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read", ": annotation file containing segmentation (mask) regions, expected to be in the JSON", "len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards): output_filename = os.path.join(", "the masks directory if it doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load the", "# make the masks directory if it doesn't already exist os.makedirs(masks_dir, exist_ok=True) #", "# ------------------------------------------------------------------------------ # set up a basic, global _logger which will write to", "for image_file_name in tqdm(os.listdir(images_dir)): # skip any files without a *.jpg extension if", "containing segmentation (mask) regions, expected to be in the JSON format created by", "combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation_{i}.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) #", "int64_list. :param values: :return: \"\"\" if not isinstance(values, collections.Iterable): values = [values] return", "# report the number of samples in each split section _logger.info(f\"TFRecord dataset contains", "mismatched between image and mask.') # Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={", "= tf.io.gfile.GFile(image_path, 'rb').read() width, height, _ = image_dimensions(image_path) # read the semantic segmentation", "image and mask.') # Convert to tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data),", "masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping of base file names and subsets", "vgg \\ --images /data/images \\ --annotations /data/via_annotations.json \\ --masks /data/masks For creating TFRecords", "Usage: For creating masks from VIA annotations: $ python mask.py --in_format vgg \\", "validation samples\") else: # we'll just have one base file name mapped to", "height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions into a single", "mask files corresponding to the images :param tfrecord_dir: directory where the output TFRecord", "written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation file\", ) args_parser.add_argument( \"--in_format\",", "(i, region) in enumerate(annotation[\"regions\"]): # if not combining all regions into a single", "image into a single mask file \"\"\" # arguments validation if not os.path.exists(images_dir):", "} # report the number of samples in each split section _logger.info(f\"TFRecord dataset", "tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train + \"_\" +", "format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\") # ------------------------------------------------------------------------------ if __name__ ==", "{ \"output_path\": output_filename, \"shard_id\": shard_id, \"num_per_shard\": num_per_shard, \"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir,", "num_images = len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards): output_filename", "int(math.ceil(num_images / num_shards)) for shard_id in range(num_shards): output_filename = os.path.join( tfrecord_dir, f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord', )", "produced :param train_pct: the percentage of images/masks to use for training, with (1.0", "for the region mask region_mask = np.zeros((height, width, 3), dtype=\"uint8\") # grab the", "files will be written \" \"(or found if used as an input)\", )", "to be written shard_id: shard ID (for multi-shard TFRecord datasets) num_per_shard: number of", "and writes a TFRecord with image and segmentation (mask) features. :param args: dictionary", "with class labels as keys mapped to the class ID (i.e. the label's", "each mask region if not combine_into_one: # allocate memory for the region mask", "# get a dictionary of class labels to class IDs class_labels = _class_labels_to_ids(class_labels_file)", "--annotations /data/via_annotations.json \\ --masks /data/masks For creating TFRecords from a masked dataset with", "ID corresponding to the region's class attribute class_label = region_attributes[\"class\"] if class_label not", "annotations specified in a JSON file exported from the VGG Image Annotator (VIA)", "grab the image info and then grab the annotation data for # the", "shard files to use when converting to TFRecord format\", ) args_parser.add_argument( \"--train_pct\", required=False,", "this value as the validation percentage), if this value is 1.0 then no", "--shards 12 -- train_pct 0.8 \"\"\" # run this module's main function main()", "on the unique image ID annotation = image_annotations[image_file_name] # get the image's dimensions", "\"\"\" Creates TFRecord files corresponding to a dataset of JPG images with corresponding", "region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse the command line arguments args_parser", "in range(start_idx, end_idx): print(f'\\r>> Converting image {i + 1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') #", "= { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number of samples", "(mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data = tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height,", "annotation JSON file (created # using the VIA tool) and initialize the annotations", "split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the", "dictionary with class labels as keys mapped to the class ID (i.e. the", "dataset of JPG images with corresponding set PNG masks. :param images_dir: directory containing", "assumed to contain one class label per line, and returns a dictionary with", "1.0: # get the correct file name prefix for the TFRecord files #", "_int64_list_feature(height), 'image/width': _int64_list_feature(width), 'image/channels': _int64_list_feature(3), 'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)), 'image/segmentation/class/format': _bytes_list_feature('png'), })) tfrecord_writer.write(example.SerializeToString()) # ------------------------------------------------------------------------------", "directory where the output TFRecord files will be written :param num_shards: number of", "doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load the contents of the annotation JSON", "*.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab the image", "a single file # then write this mask into its own file if", "from the VGG Image Annotator (VIA) tool. :param images_dir: directory containing JPG image", "line number). So a labels file like so: cat dog panda will result", "images_dir: str, masks_dir: str, tfrecord_dir: str, num_shards: int = 1, dataset_base_name: str =", "images/masks to use for the training subset \" \"(validation subset will equal 1.0", "# set up a basic, global _logger which will write to the console", "= shape_attributes[\"all_points_y\"] coords = zip(x_coords, y_coords) poly_coords = [[x, y] for x, y", "args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"],", "not image_file_name.endswith(\".jpg\"): continue file_id = os.path.splitext(image_file_name)[0] # grab the image info and then", "image info and then grab the annotation data for # the current image", "with one class label per line :return: dictionary mapping class labels to ID", "y)-coordinates for the region's mask polygon x_coords = shape_attributes[\"all_points_x\"] y_coords = shape_attributes[\"all_points_y\"] coords", "directory containing image files :param masks_dir: directory containing mask files corresponding to the", "(VIA) tool. :param images_dir: directory containing JPG image files :param annotations_file : annotation", "file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): #", "ID and annotations themselves (values) for data in annotations.values(): # store the data", "\\ --masks /data/lesions/masks \\ --in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards", "args[\"shard_id\"] * args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i", "image_annotations[data[\"filename\"]] = data # get a dictionary of class labels to class IDs", "# ------------------------------------------------------------------------------ def _bytes_list_feature( values: str, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature", "where PNG mask files will be written :param class_labels_file: text file containing one", "the mask file mask_file_name = f\"{file_id}_segmentation.png\" cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def", "\"num_images\": num_images, \"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a", "image_file_name in tqdm(os.listdir(images_dir)): # skip any files without a *.jpg extension if not", "pts.reshape((-1, 1, 2)) # draw the polygon mask, using the class ID as", "data in the dictionary using the filename as the key image_annotations[data[\"filename\"]] = data", "panda will result in a dictionary like so: { \"cat\": 1, \"dog\": 2,", "dataset contains {len(file_ids[:split_index])} training samples\") _logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else: #", "True then combine all mask regions for an image into a single mask", "the TFRecord creation function to the iterable of arguments _logger.info(f\"Building TFRecords in directory", "a masked dataset with an 80% training and 20% validation split: $ python", "each of the annotated regions for (i, region) in enumerate(annotation[\"regions\"]): # if not", "args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\":", "combine_into_one: # allocate memory for the region mask region_mask = np.zeros((height, width, 3),", "into a single file # then write this mask into its own file", "class_label = region_attributes[\"class\"] if class_label not in class_labels: raise ValueError( \"No corresponding class", "one class label per line :return: dictionary mapping class labels to ID values", "into a single mask file # then we'll only need to allocate the", "be written shard_id: shard ID (for multi-shard TFRecord datasets) num_per_shard: number of images/masks", "number). So a labels file like so: cat dog panda will result in", "color=[class_id]*3) # if not combining all masks into a single file # then", "_logger.info(f\"TFRecord dataset contains {len(file_ids[split_index:])} validation samples\") else: # we'll just have one base", "\"__main__\": \"\"\" Usage: For creating masks from VIA annotations: $ python mask.py --in_format", "of images/masks to use for the training subset \" \"(validation subset will equal", "import random from typing import Dict import cv2 import numpy as np import", "the current image based on the unique image ID annotation = image_annotations[image_file_name] #", "from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm from cvdata.utils import image_dimensions, matching_ids", "result in a dictionary like so: { \"cat\": 1, \"dog\": 2, \"panda\": 3,", "multi-shard TFRecord datasets) num_per_shard: number of images/masks per shard num_images: total number of", "num_images: total number of images in dataset file_ids: file IDs for image/mask files", "the shape and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find", "regions into a single mask file then # we'll need to reallocate the", "subset \" \"(validation subset will equal 1.0 - train_pct), if 1.0 then \"", "as executor: # map the TFRecord creation function to the iterable of arguments", "path of the TFRecord file to be written shard_id: shard ID (for multi-shard", "TFRecord files will be written :param num_shards: number of shards :param dataset_base_name: base", "files images_dir: directory containing image files masks_dir: directory containing mask files corresponding to", "= _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): # skip any files", "mask into its own file if not combine_into_one: # write the mask file", "= \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map the file prefixes to the", "1}/{len(args[\"file_ids\"])} \"' f'shard {args[\"shard_id\"]}') # read the image image_file_name = args[\"file_ids\"][i] + \".jpg\"", "dataset_base_name # map the file prefixes to the set of file IDs split_names_to_ids", "import tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm import tqdm from", ":param dataset_base_name: base name of the TFRecord files to be produced :param train_pct:", "list(matching_ids(masks_dir, images_dir, masks_ext, images_ext)) random.shuffle(file_ids) # create a mapping of base file names", "IDs for the split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], }", "VGG Image Annotator (VIA) tool. :param images_dir: directory containing JPG image files :param", "will be written :param class_labels_file: text file containing one class label per line", "args[\"train_pct\"], ) else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format:", "required=False, default=1.0, type=float, help=\"percentage of images/masks to use for the training subset \"", "{ tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report the number of samples in", "tfrecord_file_prefix_train = \"train\" tfrecord_file_prefix_valid = \"valid\" if dataset_base_name != \"\": tfrecord_file_prefix_train = tfrecord_file_prefix_train", "shape and region attributes shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the", "'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if height != seg_height or width !=", "get the array of (x, y)-coordinates for the region's mask polygon x_coords =", "help=\"number of shard files to use when converting to TFRecord format\", ) args_parser.add_argument(", "class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name in tqdm(os.listdir(images_dir)): #", "os.path.splitext(image_file_name)[0] # grab the image info and then grab the annotation data for", "the semantic segmentation annotation (mask) mask_path = os.path.join(args[\"masks_dir\"], args[\"file_ids\"][i] + \".png\") seg_data =", "the mask array once if combine_into_one: # allocate memory for the region mask", "files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory where mask files will", "the number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)\") #", "the class ID as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not", "cv2 import numpy as np import six import tensorflow as tf from tensorflow.compat.v1.python_io", "args_iterable) # ------------------------------------------------------------------------------ def vgg_to_masks( images_dir: str, annotations_file: str, masks_dir: str, class_labels_file: str,", "TF-Feature of bytes \"\"\" def norm2bytes(value): return value.encode() if isinstance(value, str) and six.PY3", ") else: raise ValueError(f\"Unsupported output format: {args['out_format']}\") else: raise ValueError(f\"Unsupported input format: {args['in_format']}\")", "= min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>>", "directory containing mask files corresponding to the images \"\"\" with TFRecordWriter(args[\"output_path\"]) as tfrecord_writer:", "* args[\"num_per_shard\"] end_idx = min((args[\"shard_id\"] + 1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in", ") _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _class_labels_to_ids( labels_path: str, ) -> Dict:", "for # the current image based on the unique image ID annotation =", "type=str, choices=[\"png\", \"tfrecord\"], help=\"format of output annotations/masks\", ) args_parser.add_argument( \"--classes\", required=False, type=str, help=\"path", "< 1.0: # get the correct file name prefix for the TFRecord files", "for the training subset \" \"(validation subset will equal 1.0 - train_pct), if", "with open(labels_path, \"r\") as class_labels_file: class_id = 1 for class_label in class_labels_file: class_labels[class_label.strip()]", "using the class ID as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if", "default=1, type=int, help=\"number of shard files to use when converting to TFRecord format\",", "cvdata.utils import image_dimensions, matching_ids # ------------------------------------------------------------------------------ # set up a basic, global _logger", "mask, using the class ID as the mask value cv2.fillPoly(region_mask, [pts], color=[class_id]*3) #", "--out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct 0.8 \"\"\" #", "concurrent future processes args_iterable = [] for base_name, file_ids in split_names_to_ids.items(): num_images =", "occur \"\"\" masks_ext = \".png\" images_ext = \".jpg\" file_ids = list(matching_ids(masks_dir, images_dir, masks_ext,", "annotated regions for (i, region) in enumerate(annotation[\"regions\"]): # if not combining all regions", "1) * args[\"num_per_shard\"], args[\"num_images\"]) for i in range(start_idx, end_idx): print(f'\\r>> Converting image {i", "= region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the class ID corresponding to the", "to facilitate creating the TFRecords in parallel with concurrent.futures.ProcessPoolExecutor() as executor: # map", "IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, } # report the number of samples", "width, height, _ = image_dimensions(image_path) # read the semantic segmentation annotation (mask) mask_path", "= { tfrecord_file_prefix: file_ids, } # report the number of samples _logger.info(f\"TFRecord dataset", "images_dir: str, annotations_file: str, masks_dir: str, class_labels_file: str, combine_into_one: bool = False, ):", "tfrecord_file_prefix = dataset_base_name # map the file prefixes to the set of file", "file_ids, } # report the number of samples _logger.info(f\"TFRecord dataset contains {len(file_ids)} samples", "written :param num_shards: number of shards :param dataset_base_name: base name of the TFRecord", "\"--base_name\", required=False, type=str, default=\"\", help=\"base name of the TFRecord files\", ) args =", "file_ids in split_names_to_ids.items(): num_images = len(file_ids) num_per_shard = int(math.ceil(num_images / num_shards)) for shard_id", "raise ValueError( \"No corresponding class ID found for the class label \" f\"found", "width != seg_width: raise RuntimeError('Shape mismatched between image and mask.') # Convert to", "IDs for image/mask files images_dir: directory containing image files masks_dir: directory containing mask", "set of file IDs split_names_to_ids = { tfrecord_file_prefix: file_ids, } # report the", "this mask into its own file if not combine_into_one: # write the mask", "and initialize the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {} # loop", "and subsets of file IDs if train_pct < 1.0: # get the correct", "with concurrent.futures.ProcessPoolExecutor() as executor: # map the TFRecord creation function to the iterable", "file, if requested if combine_into_one: # write the mask file mask_file_name = f\"{file_id}_segmentation.png\"", "regions/classes into a single mask file\", ) args_parser.add_argument( \"--shards\", required=False, default=1, type=int, help=\"number", "be written\", ) args_parser.add_argument( \"--annotations\", required=False, type=str, help=\"path to annotation file\", ) args_parser.add_argument(", "cv2.fillPoly(region_mask, [pts], color=[class_id]*3) # if not combining all masks into a single file", "\"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to facilitate creating", "labels to ID values \"\"\" class_labels = {} with open(labels_path, \"r\") as class_labels_file:", "a dataset of JPG images with corresponding set PNG masks. :param images_dir: directory", "not in class_labels: raise ValueError( \"No corresponding class ID found for the class", "line :return: dictionary mapping class labels to ID values \"\"\" class_labels = {}", "data for # the current image based on the unique image ID annotation", "np import six import tensorflow as tf from tensorflow.compat.v1.python_io import TFRecordWriter from tqdm", "train_pct < 1.0: # get the correct file name prefix for the TFRecord", "listing one class per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine all regions/classes", "class_id += 1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, ) -> tf.train.Feature:", "* train_pct) # map the file prefixes to the sets of file IDs", "array once if combine_into_one: # allocate memory for the region mask region_mask =", "as keys mapped to the class ID (i.e. the label's line number). So", "def _int64_list_feature( values, ) -> tf.train.Feature: \"\"\" Returns a TF-Feature of int64_list. :param", "shape_attributes = region[\"shape_attributes\"] region_attributes = region[\"region_attributes\"] # find the class ID corresponding to", "for the TFRecord files # based on the presence of a specified file", "tfrecord_file_prefix = \"tfrecord\" else: tfrecord_file_prefix = dataset_base_name # map the file prefixes to", "will be mapped to concurrent future processes args_iterable = [] for base_name, file_ids", "dimensions width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name)) # if combining all regions into", "= tfrecord_file_prefix_train + \"_\" + dataset_base_name tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name", "no split will occur \"\"\" masks_ext = \".png\" images_ext = \".jpg\" file_ids =", "tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + \"_\" + dataset_base_name # get the split index to", "PNG masks. :param images_dir: directory containing image files :param masks_dir: directory containing mask", "the file prefixes to the set of file IDs split_names_to_ids = { tfrecord_file_prefix:", "of the annotation JSON file (created # using the VIA tool) and initialize", "cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask) _logger.info(\"Done\") # ------------------------------------------------------------------------------ def main(): # parse the command line", "make the masks directory if it doesn't already exist os.makedirs(masks_dir, exist_ok=True) # load", "by the VGG Image Annotator tool :param masks_dir: directory where PNG mask files", "\"vgg\": if args[\"out_format\"] == \"png\": vgg_to_masks( args[\"images\"], args[\"annotations\"], args[\"masks\"], args[\"classes\"], args[\"combine\"], ) elif", "= data # get a dictionary of class labels to class IDs class_labels", "of the TFRecord files\", ) args = vars(args_parser.parse_args()) if args[\"in_format\"] == \"vgg\": if", "args[\"in_format\"] == \"png\": if args[\"out_format\"] == \"tfrecord\": masked_dataset_to_tfrecords( args[\"images\"], args[\"masks\"], args[\"tfrecords\"], args[\"shards\"], args[\"base_name\"],", "(mask) regions, expected to be in the JSON format created by the VGG", "name prefix for the TFRecord files # based on the presence of a", "\"file_ids\": file_ids, \"images_dir\": images_dir, \"masks_dir\": masks_dir, } args_iterable.append(tfrecord_writing_args) # use a ProcessPoolExecutor to", "all mask regions for an image into a single mask file \"\"\" #", "to the sets of file IDs for the split sections split_names_to_ids = {", "): \"\"\" Creates TFRecord files corresponding to a dataset of JPG images with", "samples (no train/valid split)\") # create an iterable of arguments that will be", "\" \"no splitting will occur\", ) args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name", ") else: class_id = class_labels[class_label] # get the array of (x, y)-coordinates for", "def norm2bytes(value): return value.encode() if isinstance(value, str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))", "labels file listing one class per line\", ) args_parser.add_argument( \"--combine\", default=False, action='store_true', help=\"combine", "input image files\", ) args_parser.add_argument( \"--masks\", required=False, type=str, help=\"path to directory where mask", "VIA tool) and initialize the annotations dictionary annotations = json.loads(open(annotations_file).read()) image_annotations = {}", "the split sections split_names_to_ids = { tfrecord_file_prefix_train: file_ids[:split_index], tfrecord_file_prefix_valid: file_ids[split_index:], } # report", "files will be written :param num_shards: number of shards :param dataset_base_name: base name", "\\ --in_format png --out_format tfrecord \\ --tfrecords /data/lesions/tfrecords \\ --shards 12 -- train_pct", "image_file_name = args[\"file_ids\"][i] + \".jpg\" image_path = os.path.join(args[\"images_dir\"], image_file_name) image_data = tf.io.gfile.GFile(image_path, 'rb').read()", "themselves (values) for data in annotations.values(): # store the data in the dictionary", "# if combining all regions into a single mask file # then we'll", "tf example. example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_list_feature(image_data), 'image/filename': _bytes_list_feature(image_file_name), 'image/format': _bytes_list_feature('jpeg'), 'image/height': _int64_list_feature(height),", "the JSON format created by the VGG Image Annotator tool :param masks_dir: directory", "return value.encode() if isinstance(value, str) and six.PY3 else value return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)])) # ------------------------------------------------------------------------------", "= False, ): \"\"\" Creates mask files from annotations specified in a JSON", "map the TFRecord creation function to the iterable of arguments _logger.info(f\"Building TFRecords in", "= tf.io.gfile.GFile(mask_path, 'rb').read() seg_width, seg_height, _ = image_dimensions(mask_path) if height != seg_height or", "class labels to class IDs class_labels = _class_labels_to_ids(class_labels_file) _logger.info(\"Generating mask files...\") for image_file_name", "\"\"\" if not isinstance(values, collections.Iterable): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) # ------------------------------------------------------------------------------ def", "args_parser.add_argument( \"--base_name\", required=False, type=str, default=\"\", help=\"base name of the TFRecord files\", ) args", "class_id class_id += 1 return class_labels # ------------------------------------------------------------------------------ def _int64_list_feature( values, ) ->", "JPG images with corresponding set PNG masks. :param images_dir: directory containing image files", "# skip any files without a *.jpg extension if not image_file_name.endswith(\".jpg\"): continue file_id", "to ID values \"\"\" class_labels = {} with open(labels_path, \"r\") as class_labels_file: class_id", "if combining all regions into a single mask file # then we'll only", "the console logging.basicConfig( level=logging.INFO, format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\", ) _logger = logging.getLogger(__name__)" ]
[ "folderpath: str, filename: str, resolution: int, thread: int = 0) -> List[str]: \"\"\"", "30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\",", "f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\",", "= True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] =", "\"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト command = self.vaapi_encode_command(", "\"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def", "filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True", "{ 1080: 4.3, 720: 2.3, 480: 1.2, 360: 0.65, 240: 0.24, 160: 0.24", "def encode_audio( self, folderpath: str, filename: str, force: bool = False): # audio.m3u8がファイルが存在していた場合", "nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result =", "= f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True async def encode(", "self.encode_worker += 1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result", "\".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 #", "0.24 } # 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False,", "class encode_command_class: def __init__(self, encoder, command): self.encoder: str = encoder self.command: List[str] =", "\"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\",", "self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。", "hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self, folderpath: str, filename: str, resolution:", "command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command) return result async", "folderpath: str, filename: str): command = self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\")", "\"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class: \"\"\"Class for keeping", "import asyncio import os from typing import List from dataclasses import dataclass class", "= encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break #", "# audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成", "\"\"\"Class for keeping track of an item in inventory.\"\"\" is_video: bool = False", "0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command(", "0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self, folderpath: str, filename:", "logger import json import asyncio import os from typing import List from dataclasses", "f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v", "\".join(command), \"./\") try: result = json.loads(result.stdout) except ValueError: result = {} obj =", "\".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 #", "return False async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 #", "False } # 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\":", "= self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command) return result async def", "エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel", "1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] =", "f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async", "folderpath, filename, resolution) # vaapiエンコード elif use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath,", "twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command", "# ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\")", "use_encoder == \"software\": command = self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif use_encoder", "= \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\",", "\"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\",", "self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker", "vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\",", "os from typing import List from dataclasses import dataclass class encoder_class: def __init__(self):", "= True obj.width = stream[\"width\"] obj.height = stream[\"height\"] return obj class encode_command_class: def", "__init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate =", "force: return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード command =", "= await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\" \".join(encoder.command),", "str, filename: str, resolution: int, thread: int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。", "\"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\",", "\"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数 self.encode_worker = 0 #", "await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\")", "現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False }", "if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder =", "str, filename: str): command = self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\") command", "filename) result = await command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout) except ValueError:", "のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\")", "= stream[\"height\"] return obj class encode_command_class: def __init__(self, encoder, command): self.encoder: str =", "audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成 with open(audio_path,", "{ \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality: str", "f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self, folderpath: str, filename: str, resolution: int,", "\"./\") try: result = json.loads(result.stdout) except ValueError: result = {} obj = self.video_info_class()", "def __init__(self, encoder, command): self.encoder: str = encoder self.command: List[str] = command async", "\"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self, folderpath: str,", "\"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return", "4.3, 720: 2.3, 480: 1.2, 360: 0.65, 240: 0.24, 160: 0.24 } #", "\"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\",", "\"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec", "= \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720:", "for stream in result[\"streams\"]: if \"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio", "self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3,", "\"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads", "in result[\"streams\"]: if \"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True", "f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass", "\"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v", "self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif use_encoder == \"vaapi\": command = self.vaapi_encode_command(", "return command def software_encode_command( self, folderpath: str, filename: str, resolution: int, thread: int", "ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\") logger.info(f\"{self.encoder_available}\")", "\"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f", "360) await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command),", "{self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf", "\"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with", "filename: str, resolution: int, thread: int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。", "class encoder_class: def __init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" #", "解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720: 2.3, 480: 1.2, 360: 0.65, 240:", "= [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass", "thread: int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [", "folderpath: str, filename: str, resolution: int, s: int = 5) -> List[str]: \"\"\"", "logger.error(result.stderr) return False async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0", "0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\":", "f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def", "+= 1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result =", "self.encoder: str = encoder self.command: List[str] = command async def get_encode_command( self, folderpath:", "self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename, resolution)", "def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。", "typing import List from dataclasses import dataclass class encoder_class: def __init__(self): # サンプル動画", "result async def encode_audio( self, folderpath: str, filename: str, force: bool = False):", "vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command),", "\"./\") command = self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\") pass def video_info_command(self,", "pass def video_info_command(self, folderpath: str, filename: str): command = [ \"ffprobe\", \"-loglevel quiet\",", "def video_info_command(self, folderpath: str, filename: str): command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\",", "\"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\",", "if self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while True:", "video_info_class: \"\"\"Class for keeping track of an item in inventory.\"\"\" is_video: bool =", "4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\",", "filename, resolution) # vaapiエンコード elif use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath, filename,", "0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self,", "folderpath: str, filename: str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\"", "= self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode", "List[str] = command async def get_encode_command( self, folderpath: str, filename: str, resolution: int,)", "str, filename: str, resolution: int,) -> encode_command_class: if self.encode_worker == 0: await self.encode_test()", "software_encode_command( self, folderpath: str, filename: str, resolution: int, thread: int = 0) ->", "logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker =", "self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while True: for encoder in self.encoder_available: #", "self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir,", "vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\",", "encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト command =", "resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\",", "False if result.returncode == 0: return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command))", "= await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker", "= await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker", "self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result =", "libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size", "# 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True", "from .filemanager import filemanager from ..command_run import command_run from ..logger import logger import", "thumbnail(self, folderpath: str, filename: str): command = self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command),", "await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker +=", "+= 1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result =", "False, \"software\": False } def get_bitrate(quality: str = \"high\"): pass return def audio_encode_command(", "encoder self.command: List[str] = command async def get_encode_command( self, folderpath: str, filename: str,", "str): command = self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath,", "not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder break else: #", "result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 # nvenc(HW) のテスト command", "self, folderpath: str, filename: str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]:", "resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\",", "command = self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath:", "\"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self, folderpath: str,", "\"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class: \"\"\"Class for keeping track", "\".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成", "\"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g", "use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break", "filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command =", "= f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"):", "elif \"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"] obj.height = stream[\"height\"]", "await command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout) except ValueError: result = {}", "return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async", "resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution)", "return command async def thumbnail(self, folderpath: str, filename: str): command = self.thumbnail_command(folderpath, filename,", "8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number", "引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes", "logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder =", "\\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder break else:", "\"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status", "= self.video_info_class() if \"streams\" not in result: return obj for stream in result[\"streams\"]:", "resolution: int, thread: int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command", "filename: str, resolution: int,) -> encode_command_class: if self.encode_worker == 0: await self.encode_test() #", "filemanager from ..command_run import command_run from ..logger import logger import json import asyncio", "..command_run import command_run from ..logger import logger import json import asyncio import os", "for keeping track of an item in inventory.\"\"\" is_video: bool = False is_audio:", "get_video_info( self, folderpath: str, filename: str) -> video_info_class: command = self.video_info_command(folderpath, filename) result", "vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\",", "else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder", "\"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video,", "nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command),", "\"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数 self.encode_worker =", "False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality: str = \"high\"):", "\"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\",", "for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: #", "in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder]", "= \"high\"): pass return def audio_encode_command( self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド", "command async def get_encode_command( self, folderpath: str, filename: str, resolution: int,) -> encode_command_class:", "30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\",", "self.command: List[str] = command async def get_encode_command( self, folderpath: str, filename: str, resolution:", "self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"]", "1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\",", "True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] = True", "self, folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename)", "False async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi", "\"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif \"video\" ==", "\"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return", "with open(audio_path, \"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command),", "return obj class encode_command_class: def __init__(self, encoder, command): self.encoder: str = encoder self.command:", "audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True async def", "\"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size", "= self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout)", "# 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath, filename) await", "False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数 self.encode_worker = 0", "8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f", "= [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\",", "= [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\",", "str, filename: str, resolution: int, s: int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。", "self, folderpath: str, filename: str, force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path =", "vaapi_encode_command( self, folderpath: str, filename: str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") ->", "self.encode_command_class(use_encoder, command) return result async def encode_audio( self, folderpath: str, filename: str, force:", "command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if", "0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command", "command def vaapi_encode_command( self, folderpath: str, filename: str, resolution: int, vaapi_device: str =", "\"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf", "str, resolution: int, s: int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\"", "self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode ==", "List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\",", "= True self.encode_worker += 1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video,", "error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self): \"\"\" エンコードのテスト", "} def get_bitrate(quality: str = \"high\"): pass return def audio_encode_command( self, folderpath: str,", "self, folderpath: str, filename: str, resolution: int, thread: int = 0) -> List[str]:", "\"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\"", "return command def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]:", "filename) encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await", "2.3, 480: 1.2, 360: 0.65, 240: 0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available", "result.returncode == 0: return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr)", "= { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数", "\"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a", "== \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder ==", "\"./\") pass def video_info_command(self, folderpath: str, filename: str): command = [ \"ffprobe\", \"-loglevel", "quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class: \"\"\"Class for", "f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\",", "\"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize", "f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return", "== 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while True: for encoder", "1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command", "await asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder == \"software\": command", "folderpath: str, filename: str) -> video_info_class: command = self.video_info_command(folderpath, filename) result = await", "os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード", "in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif \"video\" == stream[\"codec_type\"]:", "resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放", "6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self, folderpath:", "await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return", "str = encoder self.command: List[str] = command async def get_encode_command( self, folderpath: str,", "False, \"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality: str = \"high\"): pass return", "\"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合", "False width: int = 0 height: int = 0 async def get_video_info( self,", "f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f", "self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder", "cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\",", "breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder == \"software\": command = self.software_encode_command( folderpath, filename,", "List from dataclasses import dataclass class encoder_class: def __init__(self): # サンプル動画 self.sample_dir =", "サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\",", "logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\")", "= self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode", "f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) ->", "def audio_encode_command( self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [", "<gh_stars>0 from .filemanager import filemanager from ..command_run import command_run from ..logger import logger", "self.encode_worker += 1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result", "\"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality: str =", "import logger import json import asyncio import os from typing import List from", "self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"]", "await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\")", "1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] =", "\"streams\" not in result: return obj for stream in result[\"streams\"]: if \"codec_type\" in", "= self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command =", "video_info_class: command = self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command), \"./\") try: result", "= stream[\"width\"] obj.height = stream[\"height\"] return obj class encode_command_class: def __init__(self, encoder, command):", "利用可能なエンコーダーの探索 use_encoder = None while True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if", "bool = False is_audio: bool = False width: int = 0 height: int", "obj.is_audio = True elif \"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"]", "logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode == 0: return True else:", "hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command(", "await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker +=", "f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def", "if use_encoder == \"software\": command = self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif", "-> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync", "\"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder", "のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\")", "medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\",", "\"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ]", "\"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self, folderpath: str,", "\"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\",", "\"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\",", "filename, resolution) result = self.encode_command_class(use_encoder, command) return result async def encode_audio( self, folderpath:", "result = self.encode_command_class(use_encoder, command) return result async def encode_audio( self, folderpath: str, filename:", "f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self, folderpath: str, filename: str): command =", "str = \"high\"): pass return def audio_encode_command( self, folderpath: str, filename: str,): \"\"\"", "def vaapi_encode_command( self, folderpath: str, filename: str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\")", "vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [", "command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode == 0:", "\"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\",", "\"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\",", "str, resolution: int, thread: int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\"", "f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class: \"\"\"Class for keeping track of an", "0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self, folderpath: str, filename:", "\"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self, folderpath: str,", "= command async def get_encode_command( self, folderpath: str, filename: str, resolution: int,) ->", "\".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode == 0: return", "nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード", "160: 0.24 } # 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\":", "{s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def", "filename, 720) await command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath: str, filename: str):", "cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\",", "のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\")", "サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080:", "None while True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\", "\"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\",", "= self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode", "= False if result.returncode == 0: return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\"", "width: int = 0 height: int = 0 async def get_video_info( self, folderpath:", "command def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\"", "-> encode_command_class: if self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None", "{self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq", "command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if", "f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf", "\"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self, folderpath: str,", "command = self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename,", "0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command(", "\"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\":", "intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\",", "async def get_encode_command( self, folderpath: str, filename: str, resolution: int,) -> encode_command_class: if", "{self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time", "== 0: return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return", "\"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\",", "== 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 # nvenc(SW) のテスト command =", "filename: str) -> video_info_class: command = self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command),", "] return command def software_encode_command( self, folderpath: str, filename: str, resolution: int, thread:", "{self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size", "result: return obj for stream in result[\"streams\"]: if \"codec_type\" in stream: if \"audio\"", "h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode", "self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720: 2.3, 480:", "\"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self,", "bool = False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return", "command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command", "0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self, folderpath: str, filename:", "encode( self, folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath,", "filename: str): command = self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\") command =", "command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command", "str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio:", "== 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker ==", "if \"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif \"video\"", "0 # vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await", "0.65, 240: 0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False,", "s: int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [", "# エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ await", "obj.is_video = True obj.width = stream[\"width\"] obj.height = stream[\"height\"] return obj class encode_command_class:", "{folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self): \"\"\" エンコードのテスト \"\"\"", "force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force:", "= 0 # vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result =", "encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\"", "if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if", "エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10)", "False } def get_bitrate(quality: str = \"high\"): pass return def audio_encode_command( self, folderpath:", "[ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i", "folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\",", "return obj for stream in result[\"streams\"]: if \"codec_type\" in stream: if \"audio\" ==", "2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ]", "import json import asyncio import os from typing import List from dataclasses import", "if result.returncode == 0: return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout)", "return command @dataclass class video_info_class: \"\"\"Class for keeping track of an item in", "command async def thumbnail(self, folderpath: str, filename: str): command = self.thumbnail_command(folderpath, filename, 360)", "self, folderpath: str, filename: str, resolution: int,) -> encode_command_class: if self.encode_worker == 0:", "result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True", "while True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not", "{folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\",", "resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command", "6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self, folderpath:", "2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ]", "__init__(self, encoder, command): self.encoder: str = encoder self.command: List[str] = command async def", ".filemanager import filemanager from ..command_run import command_run from ..logger import logger import json", "VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\",", "command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1", "720: 2.3, 480: 1.2, 360: 0.65, 240: 0.24, 160: 0.24 } # 利用可能なエンコーダ", "1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command", "await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode ==", "self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"]", "folderpath: str, filename: str, resolution: int,) -> encode_command_class: if self.encode_worker == 0: await", "\"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device", "0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command(", "# 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder ==", "height: int = 0 async def get_video_info( self, folderpath: str, filename: str) ->", "\"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode", "{self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\"", "self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout) except", "0: return True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False", "\"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"] obj.height = stream[\"height\"] return", "await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker +=", "True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]:", "False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True #", "return result async def encode_audio( self, folderpath: str, filename: str, force: bool =", "is_video: bool = False is_audio: bool = False width: int = 0 height:", "f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r", "item in inventory.\"\"\" is_video: bool = False is_audio: bool = False width: int", "obj.height = stream[\"height\"] return obj class encode_command_class: def __init__(self, encoder, command): self.encoder: str", "エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format", "= None while True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and", "\"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self, folderpath: str, filename: str,", "-> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync", "command): self.encoder: str = encoder self.command: List[str] = command async def get_encode_command( self,", "get_bitrate(quality: str = \"high\"): pass return def audio_encode_command( self, folderpath: str, filename: str,):", "= [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number", "f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\",", "30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v", "self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\") logger.info(f\"{self.encoder_available}\") return self.encoder_available", "command def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\"", "video_info_command(self, folderpath: str, filename: str): command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format", "== \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command) return", "elif use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif", "command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1", "192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ]", "filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\",", "obj class encode_command_class: def __init__(self, encoder, command): self.encoder: str = encoder self.command: List[str]", "\"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self,", "同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False,", "if \"streams\" not in result: return obj for stream in result[\"streams\"]: if \"codec_type\"", "h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile", "self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } #", "str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\"", "== 0: self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\") logger.info(f\"{self.encoder_available}\") return self.encoder_available encoder", "command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" #", "async def get_video_info( self, folderpath: str, filename: str) -> video_info_class: command = self.video_info_command(folderpath,", "-> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\",", "{} obj = self.video_info_class() if \"streams\" not in result: return obj for stream", "import dataclass class encoder_class: def __init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video =", "VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level", "in inventory.\"\"\" is_video: bool = False is_audio: bool = False width: int =", "f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\",", "command = self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command), \"./\") try: result =", "async def encode_audio( self, folderpath: str, filename: str, force: bool = False): #", "エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト command = self.vaapi_encode_command( self.sample_dir,", "1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\",", "True else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def", "int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command =", "== \"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder ==", "= self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\")", "0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0:", "from typing import List from dataclasses import dataclass class encoder_class: def __init__(self): #", "self.video_info_class() if \"streams\" not in result: return obj for stream in result[\"streams\"]: if", "filename, resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename,", "int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\",", "int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder", "f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\",", "self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] =", "folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\"", "import command_run from ..logger import logger import json import asyncio import os from", "VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel", "# エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] =", "else: logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self):", "command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath: str, filename: str): command = [", "f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self, folderpath: str, filename: str, resolution: int,", "logger.error(f\"encoder error {folderpath}\") logger.error(\" \".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self): \"\"\"", "= self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720)", "\"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self, folderpath: str, filename: str,", "elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif", "command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\") pass", "\"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time", "self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"]", "\"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 # nvenc(HW)", "resolution) # vaapiエンコード elif use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution)", "await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while True: for encoder in self.encoder_available:", "False, \"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ", "await command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath: str, filename: str): command =", "filename, resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename,", "self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ", "f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\",", "resolution: int,) -> encode_command_class: if self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder", "\"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return", "'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\",", "\"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size", "not in result: return obj for stream in result[\"streams\"]: if \"codec_type\" in stream:", "str): command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return", "\"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\"", "\"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g", "True self.encode_worker += 1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080)", "return command def vaapi_encode_command( self, folderpath: str, filename: str, resolution: int, vaapi_device: str", "result.returncode == 0: self.encoder_available[\"nvenc_sw_decode\"] = True self.encode_worker += 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker", "\"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self,", "1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self, folderpath:", "self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command(", "resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution)", "command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout) except ValueError: result = {} obj", "4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\",", "result = {} obj = self.video_info_class() if \"streams\" not in result: return obj", "False is_audio: bool = False width: int = 0 height: int = 0", "0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False,", "int = 0 height: int = 0 async def get_video_info( self, folderpath: str,", "# audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\"", "0: self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\") logger.info(f\"{self.encoder_available}\") return self.encoder_available encoder =", "use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder", "if result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 # nvenc(HW) のテスト", "] return command def thumbnail_command( self, folderpath: str, filename: str, resolution: int, s:", "self.thumbnail_command(folderpath, filename, 360) await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720) await", "stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif \"video\" == stream[\"codec_type\"]: obj.is_video", "def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト command", "if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 # nvenc(SW) のテスト", "\"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str,", "int = 0 async def get_video_info( self, folderpath: str, filename: str) -> video_info_class:", "stream[\"width\"] obj.height = stream[\"height\"] return obj class encode_command_class: def __init__(self, encoder, command): self.encoder:", "\"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path", "\"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\",", "str, resolution: int,) -> encode_command_class: if self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索", "1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker = 1", "0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self, folderpath:", "break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード if", "\"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads", "return command def thumbnail_command( self, folderpath: str, filename: str, resolution: int, s: int", "] return command @dataclass class video_info_class: \"\"\"Class for keeping track of an item", "stream[\"codec_type\"]: obj.is_audio = True elif \"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width =", "self.encoder_used_status[encoder.encoder] = False if result.returncode == 0: return True else: logger.error(f\"encoder error {folderpath}\")", "ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\",", "\"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command", "# サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = {", "str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\",", "str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath,", "{folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset", "result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] = True", "\"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize", "エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False", "} # 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\":", "self.bitrate = { 1080: 4.3, 720: 2.3, 480: 1.2, 360: 0.65, 240: 0.24,", "0 async def get_video_info( self, folderpath: str, filename: str) -> video_info_class: command =", "break # ソフトウエアエンコード if use_encoder == \"software\": command = self.software_encode_command( folderpath, filename, resolution)", "import filemanager from ..command_run import command_run from ..logger import logger import json import", "self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\",", "True elif \"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"] obj.height =", "\"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i", "\"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True async", "= 0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False,", "folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\"", "== stream[\"codec_type\"]: obj.is_audio = True elif \"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width", "asyncio import os from typing import List from dataclasses import dataclass class encoder_class:", "= True self.encode_worker += 1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video,", "dataclass class encoder_class: def __init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\"", "filename: str): command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ]", "str) -> video_info_class: command = self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command), \"./\")", "except ValueError: result = {} obj = self.video_info_class() if \"streams\" not in result:", "open(audio_path, \"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\")", "f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass", "\"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ]", "str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i", "List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\",", "command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1", "command def software_encode_command( self, folderpath: str, filename: str, resolution: int, thread: int =", "encoder_class: def __init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps)", "vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\",", "from ..command_run import command_run from ..logger import logger import json import asyncio import", "空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\"", "encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード", "filename: str, force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path)", "filename) await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path =", "scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self, folderpath: str, filename: str, resolution:", "filename, 360) await command_run(\" \".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720) await command_run(\"", "str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command", "= self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath: str,", "-> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\",", "\"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\",", "def software_encode_command( self, folderpath: str, filename: str, resolution: int, thread: int = 0)", "open(audio_done_path, \"w\"): pass return True async def encode( self, folderpath: str, filename: str,", "} # 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False,", "= await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath,", "\"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\",", "\"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\",", "pass return def audio_encode_command( self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command", "nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device", "\"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\",", "] return command async def thumbnail(self, folderpath: str, filename: str): command = self.thumbnail_command(folderpath,", "= await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker", "6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self, folderpath:", "f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\",", "..logger import logger import json import asyncio import os from typing import List", "[ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device", "str, filename: str): command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\",", "stream in result[\"streams\"]: if \"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio =", "hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self, folderpath: str, filename: str, resolution:", "vaapiエンコード elif use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード", "self, folderpath: str, filename: str) -> video_info_class: command = self.video_info_command(folderpath, filename) result =", "folderpath: str, filename: str): command = [ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\",", "str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [", "[ \"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass class", "image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self, folderpath: str, filename:", "obj = self.video_info_class() if \"streams\" not in result: return obj for stream in", "if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") #", "await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\"", "# 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False", "\"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size", "\"software\": command = self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif use_encoder == \"vaapi\":", "\"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class: \"\"\"Class", "True use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break", "def encode( self, folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await", "use_encoder = None while True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder]", "f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self, folderpath: str, filename: str):", "input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await", "\"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time", "result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 # nvenc(SW) のテスト command", "0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str,", "self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command(", "an item in inventory.\"\"\" is_video: bool = False is_audio: bool = False width:", "scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command async def thumbnail(self, folderpath: str, filename: str): command", "\"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\",", "folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command) return result async def encode_audio( self,", "return command def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]:", "\"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\",", "encoder, command): self.encoder: str = encoder self.command: List[str] = command async def get_encode_command(", "= { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality:", "\"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\",", "= self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command =", "command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f", "\"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode == 0: return True", "encode_audio( self, folderpath: str, filename: str, force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path", "self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while True: for", "= [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\",", "self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0:", "intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\",", "self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\")", "encode_command_class: def __init__(self, encoder, command): self.encoder: str = encoder self.command: List[str] = command", "\"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v", "\"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\",", "self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode ==", "if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif \"video\" == stream[\"codec_type\"]: obj.is_video =", "command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\",", "command def thumbnail_command( self, folderpath: str, filename: str, resolution: int, s: int =", "= self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif use_encoder == \"vaapi\": command =", "class video_info_class: \"\"\"Class for keeping track of an item in inventory.\"\"\" is_video: bool", "\"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720: 2.3, 480: 1.2, 360:", "elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder,", "filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await", "str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command", "high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return", "dataclasses import dataclass class encoder_class: def __init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video", "command_run from ..logger import logger import json import asyncio import os from typing", "def __init__(self): # サンプル動画 self.sample_dir = \"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate", "def get_bitrate(quality: str = \"high\"): pass return def audio_encode_command( self, folderpath: str, filename:", "1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\",", "cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\",", "6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath: str,", "1.2, 360: 0.65, 240: 0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available = {", "str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\",", "use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder", "command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r", "async def thumbnail(self, folderpath: str, filename: str): command = self.thumbnail_command(folderpath, filename, 360) await", "command = self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif use_encoder == \"vaapi\": command", "hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution:", "continue # breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder == \"software\": command = self.software_encode_command(", "\"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\":", "\"high\"): pass return def audio_encode_command( self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\"", "\"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\",", "from dataclasses import dataclass class encoder_class: def __init__(self): # サンプル動画 self.sample_dir = \"./sample\"", "@dataclass class video_info_class: \"\"\"Class for keeping track of an item in inventory.\"\"\" is_video:", "\"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath: str, filename:", "\"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 # nvenc(SW)", "of an item in inventory.\"\"\" is_video: bool = False is_audio: bool = False", "{thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\",", "f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self, folderpath: str, filename: str,", "int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\",", "nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command),", "str, filename: str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。", "= \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720: 2.3, 480: 1.2,", "\"-g 180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\",", "-> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i", "\"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\",", "nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。", "\"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g", "[ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g", "= True use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue #", "int, s: int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command =", "{self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf", "result = await command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout) except ValueError: result", "ValueError: result = {} obj = self.video_info_class() if \"streams\" not in result: return", "filename: str, resolution: int, vaapi_device: str = \"/dev/dri/renderD128\") -> List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。", "= self.encode_command_class(use_encoder, command) return result async def encode_audio( self, folderpath: str, filename: str,", "self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\":", "def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。", "\"-profile high\", \"-compression_level 0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"]", "0 height: int = 0 async def get_video_info( self, folderpath: str, filename: str)", "遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i", "\"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def software_encode_command( self,", "\"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality: str = \"high\"): pass", "\"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v", "int,) -> encode_command_class: if self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder =", "\"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time 6\",", "high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\",", "result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if", "self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } def", "360: 0.65, 240: 0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available = { \"vaapi\":", "\"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time", "filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") #", "\"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\",", "5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\",", "logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080)", "self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\") logger.info(f\"{self.encoder_available}\") return self.encoder_available encoder = encoder_class()", "= await command_run(\" \".join(command), \"./\") try: result = json.loads(result.stdout) except ValueError: result =", "\"./sample\" self.sample_video = \"video.mp4\" # 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720: 2.3,", "command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format", "\"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device", "] return command def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) ->", "[ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r", "try: result = json.loads(result.stdout) except ValueError: result = {} obj = self.video_info_class() if", "= [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\",", "result = json.loads(result.stdout) except ValueError: result = {} obj = self.video_info_class() if \"streams\"", "or force: return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード command", "6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command(", "# 解像度:ビットレート(Mbps) self.bitrate = { 1080: 4.3, 720: 2.3, 480: 1.2, 360: 0.65,", "self.encoder_used_status[encoder] = True use_encoder = encoder break else: # 利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue", "= encoder self.command: List[str] = command async def get_encode_command( self, folderpath: str, filename:", "json\", f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class: \"\"\"Class for keeping track of", "== \"software\": command = self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード elif use_encoder ==", "# nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath, filename, resolution) #", "# 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False", "command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format", "利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False }", "self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if result.returncode ==", "-> video_info_class: command = self.video_info_command(folderpath, filename) result = await command_run(\" \".join(command), \"./\") try:", "in result: return obj for stream in result[\"streams\"]: if \"codec_type\" in stream: if", "return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath,", "hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self, folderpath: str, filename:", "\"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\",", "0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\",", "\"software\": False } def get_bitrate(quality: str = \"high\"): pass return def audio_encode_command( self,", "command = self.vaapi_encode_command( folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command", "str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [", "\".join(command), \"./\") pass def video_info_command(self, folderpath: str, filename: str): command = [ \"ffprobe\",", "async def encode( self, folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info =", "filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\")", "if self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker = 1 logger.info(\"エンコードテスト完了!!\") logger.info(f\"{self.encoder_available}\") return", "folderpath, filename, resolution) # nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath,", "= json.loads(result.stdout) except ValueError: result = {} obj = self.video_info_class() if \"streams\" not", "and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder = encoder break", "# 利用可能なエンコーダーの探索 use_encoder = None while True: for encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合", "folderpath: str, filename: str, force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\"", "1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await", "self, folderpath: str, filename: str, resolution: int, s: int = 5) -> List[str]:", "= { 1080: 4.3, 720: 2.3, 480: 1.2, 360: 0.65, 240: 0.24, 160:", "480: 1.2, 360: 0.65, 240: 0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available =", "get_encode_command( self, folderpath: str, filename: str, resolution: int,) -> encode_command_class: if self.encode_worker ==", "# 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True async def encode( self, folderpath:", "オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\",", "logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result = await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder]", "# vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\"", "\"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution:", "playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path,", "[ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\",", "720) await command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath: str, filename: str): command", "# nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\"", "List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync", "= {} obj = self.video_info_class() if \"streams\" not in result: return obj for", "filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command =", "\"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif \"video\" == stream[\"codec_type\"]: obj.is_video = True", "True obj.width = stream[\"width\"] obj.height = stream[\"height\"] return obj class encode_command_class: def __init__(self,", "True async def encode( self, folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info", "with open(audio_done_path, \"w\"): pass return True async def encode( self, folderpath: str, filename:", "180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf", "\"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command) return result", "= False width: int = 0 height: int = 0 async def get_video_info(", "== stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"] obj.height = stream[\"height\"] return obj", "int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\",", "= f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path = f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"):", "f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,)", "f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'\", \"-profile high\", \"-compression_level 0\", \"-start_number 0\",", "= [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\",", "json import asyncio import os from typing import List from dataclasses import dataclass", "] return command def vaapi_encode_command( self, folderpath: str, filename: str, resolution: int, vaapi_device:", "\"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\", \"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\",", "== 0: self.encoder_available[\"vaapi\"] = True self.encode_worker += 1 # nvenc(HW) のテスト command =", "int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\",", "self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command) return result async def encode_audio(", "return def audio_encode_command( self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command =", "= 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\",", "# ソフトウエアエンコード if use_encoder == \"software\": command = self.software_encode_command( folderpath, filename, resolution) #", "is_audio: bool = False width: int = 0 height: int = 0 async", "audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成 with", "= await command_run(\" \".join(encoder.command), \"./\") logger.info(\"エンコード完了\") # エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode", "{folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\",", "= False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or force: return True", "asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder == \"software\": command =", "\"software\": False } # 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status = {", "\"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r", "180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number", "\"-hwaccel_output_format vaapi\", \"-hwaccel_device intel\", \"-filter_hw_device intel\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-vcodec", "# エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode == 0: return True else: logger.error(f\"encoder", "input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行", "def get_encode_command( self, folderpath: str, filename: str, resolution: int,) -> encode_command_class: if self.encode_worker", "\"-y\", \"-vsync 1\", f\"-threads {thread}\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\",", "\"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel cuda\",", "thumbnail_command( self, folderpath: str, filename: str, resolution: int, s: int = 5) ->", "エンコーダーを開放 self.encoder_used_status[encoder.encoder] = False if result.returncode == 0: return True else: logger.error(f\"encoder error", "# nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\"", "result[\"streams\"]: if \"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]: obj.is_audio = True elif", "str, filename: str) -> video_info_class: command = self.video_info_command(folderpath, filename) result = await command_run(\"", "resolution: int, s: int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command", "\"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize", "encoder in self.encoder_available: # 利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする", "f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\",", "import os from typing import List from dataclasses import dataclass class encoder_class: def", "\"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f", "[ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf", "{folderpath}/{filename}\", f\"-ss {s}\", \"-vframes 1\", \"-f image2\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/thumbnail_{resolution}.jpg\" ] return command", "\".join(command), \"./\") command = self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\") pass def", "command @dataclass class video_info_class: \"\"\"Class for keeping track of an item in inventory.\"\"\"", "track of an item in inventory.\"\"\" is_video: bool = False is_audio: bool =", "= 0 async def get_video_info( self, folderpath: str, filename: str) -> video_info_class: command", "= True elif \"video\" == stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"] obj.height", "0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while True: for encoder in", "filename: str, resolution: int, s: int = 5) -> List[str]: \"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。", "True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass # audioのエンコード command = self.audio_encode_command(folderpath, filename)", "f\"{folderpath}/audio.done\" # 空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True async def encode( self,", "1080) result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"vaapi\"] =", "利用可能でかつ、利用されていない場合 if self.encoder_available[encoder] and \\ not self.encoder_used_status[encoder]: # エンコーダーを利用状態にする self.encoder_used_status[encoder] = True use_encoder", "stream[\"codec_type\"]: obj.is_video = True obj.width = stream[\"width\"] obj.height = stream[\"height\"] return obj class", "\"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return command def vaapi_encode_command( self,", "\"\"\" サムネイル生成のコマンド。 引数sは切り出し時点の動画の場所。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", f\"-i {folderpath}/{filename}\", f\"-ss", "stream[\"height\"] return obj class encode_command_class: def __init__(self, encoder, command): self.encoder: str = encoder", "# breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder == \"software\": command = self.software_encode_command( folderpath,", "ソフトウエアエンコード if use_encoder == \"software\": command = self.software_encode_command( folderpath, filename, resolution) # vaapiエンコード", "List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync", "high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\",", "空のaudio.doneを作成 with open(audio_done_path, \"w\"): pass return True async def encode( self, folderpath: str,", "f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", f\"-threads {thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v", "result = await command_run(\" \".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True", "hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,)", "\"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command(", "List[str]: \"\"\" vaapi(intel)エンコード時のコマンド。 VBRでのエンコードを行う。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\",", "obj for stream in result[\"streams\"]: if \"codec_type\" in stream: if \"audio\" == stream[\"codec_type\"]:", "+= 1 # ソフトウエアエンコードしか使えない場合 if self.encode_worker == 0: self.encoder_available[\"software\"] = True self.encode_worker =", "bool = False width: int = 0 height: int = 0 async def", "medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf hwupload,scale_cuda=-2:{resolution-1}\", \"-hls_time 6\",", "pass return True async def encode( self, folderpath: str, filename: str, resolution: int,):", "obj.width = stream[\"width\"] obj.height = stream[\"height\"] return obj class encode_command_class: def __init__(self, encoder,", "f\"{folderpath}/{resolution}p.m3u8\", ] return command def thumbnail_command( self, folderpath: str, filename: str, resolution: int,", "use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result = self.encode_command_class(use_encoder, command)", "# 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status = { \"vaapi\": False, \"nvenc_hw_decode\":", "def get_video_info( self, folderpath: str, filename: str) -> video_info_class: command = self.video_info_command(folderpath, filename)", "command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result = await command_run(\" \".join(command), \"./\") if", "\"w\"): pass return True async def encode( self, folderpath: str, filename: str, resolution:", "{thread}\", \"-vcodec libx264\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-start_number 0\", \"-hls_time", "1\", \"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\",", "folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if", "\"nvenc_sw_decode\": False, \"software\": False } def get_bitrate(quality: str = \"high\"): pass return def", "command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path,", "self.encode_worker = 0 # vaapi のテスト command = self.vaapi_encode_command( self.sample_dir, self.sample_video, 1080) result", "True self.encode_worker += 1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir, self.sample_video, 1080)", "1 # nvenc(HW) のテスト command = self.nvenc_hw_decode_encode_command( self.sample_dir, self.sample_video, 1080) result = await", "\"-g 180\", \"-c:v h264_nvenc\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", \"-preset medium\", \"-profile:v high\",", "nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device", "0\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def", "{ \"vaapi\": False, \"nvenc_hw_decode\": False, \"nvenc_sw_decode\": False, \"software\": False } # 同時エンコード数 self.encode_worker", "False, \"software\": False } # 同時エンコード数 self.encode_worker = 0 # 現在利用中のエンコーダ self.encoder_used_status =", "self.thumbnail_command(folderpath, filename, 720) await command_run(\" \".join(command), \"./\") pass def video_info_command(self, folderpath: str, filename:", "inventory.\"\"\" is_video: bool = False is_audio: bool = False width: int = 0", "await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename, resolution) logger.info(f\"エンコーダ{encoder.encoder}を使用\") # エンコード実行 result", "await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename) encoder = await self.get_encode_command(folderpath, filename,", "str, filename: str, force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if", "利用可能なエンコーダーがないときは待つ await asyncio.sleep(10) continue # breakされていたらもう一度break break # ソフトウエアエンコード if use_encoder == \"software\":", "resolution) result = self.encode_command_class(use_encoder, command) return result async def encode_audio( self, folderpath: str,", "int, thread: int = 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command =", "scale_cuda=-2:{resolution-1}\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\", ] return command def nvenc_sw_decode_encode_command(", "\"-an\", \"-preset medium\", \"-profile:v high\", \"-bf 4\", \"-b_ref_mode 2\", \"-temporal-aq 1\", f\"-vf scale_cuda=-2:{resolution-1}\",", "str, force: bool = False): # audio.m3u8がファイルが存在していた場合 audio_path = f\"{folderpath}/audio.m3u8\" if os.path.isfile(audio_path) or", "pass # audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path =", "\"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/audio.m3u8\" ] return command def", "\"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"-vf scale=-2:{resolution}\", f\"{folderpath}/{resolution}p.m3u8\" ] return", "keeping track of an item in inventory.\"\"\" is_video: bool = False is_audio: bool", "import List from dataclasses import dataclass class encoder_class: def __init__(self): # サンプル動画 self.sample_dir", "def thumbnail(self, folderpath: str, filename: str): command = self.thumbnail_command(folderpath, filename, 360) await command_run(\"", "self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 # nvenc(SW) のテスト command = self.nvenc_sw_decode_encode_command( self.sample_dir,", "{folderpath}/{filename}\", \"-vn\", \"-b:a 192k\", \"-aac_coder twoloop\", \"-start_number 0\", \"-hls_time 6\", \"-hls_list_size 0\", \"-f", "# nvenc_swエンコード elif use_encoder == \"nvenc_sw_decode\": command = self.nvenc_sw_decode_encode_command( folderpath, filename, resolution) result", "def thumbnail_command( self, folderpath: str, filename: str, resolution: int, s: int = 5)", "= False is_audio: bool = False width: int = 0 height: int =", "\"\"\" command = [ \"ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", f\"-init_hw_device vaapi=intel:{vaapi_device}\", \"-hwaccel vaapi\",", "self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await filemanager.write_playlist(playlist_path, \"audio\") audio_done_path", "1080: 4.3, 720: 2.3, 480: 1.2, 360: 0.65, 240: 0.24, 160: 0.24 }", "\"-hls_time 6\", \"-hls_list_size 0\", \"-f hls\", f\"{folderpath}/{resolution}p.m3u8\"] return command def nvenc_hw_decode_encode_command( self, folderpath:", "from ..logger import logger import json import asyncio import os from typing import", "= 0) -> List[str]: \"\"\" ソフトウエアエンコード時のコマンド。 遅い。 \"\"\" command = [ \"ffmpeg\", \"-hide_banner\",", "self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。", "encode_command_class: if self.encode_worker == 0: await self.encode_test() # 利用可能なエンコーダーの探索 use_encoder = None while", "# vaapiエンコード elif use_encoder == \"vaapi\": command = self.vaapi_encode_command( folderpath, filename, resolution) #", "audioのエンコード command = self.audio_encode_command(folderpath, filename) await command_run(\" \".join(command), \"./\") playlist_path = f\"{folderpath}/playlist.m3u8\" await", "return True async def encode( self, folderpath: str, filename: str, resolution: int,): logger.info(\"エンコード開始\")", "\".join(encoder.command)) logger.error(result.stdout) logger.error(result.stderr) return False async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker", "\"\"\" nvencエンコード時のコマンド。動画のデコードにはHWが利用される。 VBRでのエンコードを行う。 エラー対策のため、実際に出力される動画の解像度は-1されている。 \"\"\" command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\",", "audio_encode_command( self, folderpath: str, filename: str,): \"\"\" オーディオ切り出しのコマンド \"\"\" command = [ \"ffmpeg\",", "\"ffprobe\", \"-loglevel quiet\", \"-show_streams\", \"-print_format json\", f\"{folderpath}/{filename}\", ] return command @dataclass class video_info_class:", "command) return result async def encode_audio( self, folderpath: str, filename: str, force: bool", "resolution: int,): logger.info(\"エンコード開始\") input_video_info = await self.get_video_info(folderpath, filename) if input_video_info.is_audio: await self.encode_audio(folderpath, filename)", "\".join(command), \"./\") if result.returncode == 0: self.encoder_available[\"nvenc_hw_decode\"] = True self.encode_worker += 1 #", "180\", \"-vcodec h264_vaapi\", \"-rc_mode VBR\", \"-bf 8\", f\"-b:v {self.bitrate[resolution]}M\", f\"-bufsize {self.bitrate[resolution]*6}M\", \"-an\", f\"-vf", "if os.path.isfile(audio_path) or force: return True # 空のaudio.m3u8を作成 with open(audio_path, \"w\"): pass #", "command = [ \"/opt/bin/ffmpeg\", \"-hide_banner\", \"-y\", \"-vsync 1\", \"-init_hw_device cuda\", \"-hwaccel_output_format cuda\", f\"-i", "folderpath, filename, resolution) # nvenc_hwエンコード elif use_encoder == \"nvenc_hw_decode\": command = self.nvenc_hw_decode_encode_command( folderpath,", "240: 0.24, 160: 0.24 } # 利用可能なエンコーダ self.encoder_available = { \"vaapi\": False, \"nvenc_hw_decode\":", "= 0 height: int = 0 async def get_video_info( self, folderpath: str, filename:", "\"-init_hw_device cuda\", \"-hwaccel cuda\", \"-hwaccel_output_format cuda\", f\"-i {folderpath}/{filename}\", \"-r 30\", \"-g 180\", \"-c:v", "json.loads(result.stdout) except ValueError: result = {} obj = self.video_info_class() if \"streams\" not in", "async def encode_test(self): \"\"\" エンコードのテスト \"\"\" logger.info(\"エンコードテスト開始\") self.encode_worker = 0 # vaapi のテスト", "nvenc_sw_decode_encode_command( self, folderpath: str, filename: str, resolution: int,) -> List[str]: \"\"\" nvencエンコード時のコマンド。動画のデコードにはSWが利用される。 VBRでのエンコードを行う。" ]
[ "part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode ==", "proc mappings\") proc_maps = [] #get and format the memory mappings which are", "in parts: if parse: try: number = int(part) except: pass if \"Breakpoint\" in", "#print(\"breakpoint hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp", "= True bp.number = number print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\"))", "gdb session :param exc: String value containing the gdb command :param strip: Boolean,", "str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking", "import Path from threading import Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The", "if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra project - this", "self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it is inserted", "= self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process, False print(\"Found proc offset: \"", "str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line) if", "of the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the", "String args: - Arguments to start the executable with \"\"\" #connect reader thread", "GDB BP SETUP\") for bp in self.client.breakpoints: skip = False for line in", ": offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def run(self, cmd, interactive=True,", "\"Breakpoint\" in part: parse = True bp.number = number print(\"return from setup: \"", "to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set logging file", "print(\"Error in GDB execution of:\" + str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self):", "from threading import Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main class", "runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start()", "pathlib import Path from threading import Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb:", "GhidraCommandClient class GhidraGdb: \"\"\"The main class which encapsulates the whole GhidraGdb framework \"\"\"", "self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp =", "2: arr.pop(0) proc_maps.append(arr) ## get the lowest Start Address offset = 0 procStartAddresss", "time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo): \"\"\"read the", "Initial GDB Commands which are executed before the program starts :param String args:", "gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set logging", "mappings\") proc_maps = [] #get and format the memory mappings which are mapping", "Address offset = 0 procStartAddresss = 0 for i, map in enumerate(proc_maps): if", "hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if the current GDB Thread is running", "#self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the offset between Ghidra", "breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function executes a", "\"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode ==", "Fifo which is used to read the data comming from the gdb :param", "mapping here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset", "from gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps = [] #get and format", "None break except: continue if not finBp: continue finBp.hit() #todo - this has", "Thread is running :return: Boolean - True if the Thread is running \"\"\"", "cases) - default: True :return: String value containing the gdb response unparsed \"\"\"", "startCommands=\"\", args=\"\"): \"\"\"This is the entry function that spawns a new process and", "Deprecated - attaches the gdb to an existing program instance instead of spawning", "time.sleep(0.05) print(\"getting proc mapping\") #get the proc mappings from gdb procMappings = self.excAndGet(\"i", "= self.currRet + line + \"\\n\" if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\"", "during code execution: \" + str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line)", "function runs arbitrary code in either python or GDB everytime a breakpoint is", "thread is running ... (if gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if", "a particular mapping :param procName: String value containing the Name of the mapping", "Boolean - True if the Thread is running \"\"\" #Todo -- check this", "None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc Offset of a particular", "to be in parallel for line in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try:", "+ startCommands, api=True) self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need", "for part in line.split(\" \"): if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found", "line2 in line: skip = True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0)", "- default: True :return: String value containing the gdb response unparsed \"\"\" self.currRet", "which are mapping the main executable for line in procMappings.split(\"\\n\"): if procName in", "+ startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra", "part in parts: if parse: try: number = int(part) except: pass if \"Breakpoint\"", "continue finBp.hit() #todo - this has to be in parallel for line in", "if len(line) > 1: try: finBp.exec_(line) except Exception as e: print(\"Exception during code", "create issues in some cases) - default: True :return: String value containing the", "else: return self.currRet def readFifo(self, fifo): \"\"\"read the ouput of the gdbPipe te", "program starts :param String args: - Arguments to start the executable with \"\"\"", "filename where the fifo will be created :return: None \"\"\" print(\"setting up fifo", "def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the gdb to an existing", "code execution: \" + str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line) >", "def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking mode :param FIFO: The", "+ str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp in self.client.breakpoints: skip = False", "Default: True :param startCommands: Sting - Initial GDB Commands which are executed before", "breakpoint ...) def checkThreadRunning(self): \"\"\"check if the current GDB Thread is running :return:", "file /tmp/gdbPipe set logging on starti''' + str(args) + \"\\n\" + startCommands, api=True)", "can interact with GDB as usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def", "e: print(\"Exception during code execution: \" + str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"):", "on ''' + startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze", "part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet =", "String value containing the Name of the mapping :return: The start Address of", "= self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process, False print(\"Found", "Arguments to start the executable with \"\"\" #connect reader thread to read gdb", "\"\"\"Analyze the Ghidra project - this command will create all the functions, breakpoints", "self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo which is used to", "inserted :param pattern: the pattern to identify the breakpoint :return: None \"\"\" self.removals.append(pattern)", "\"\"\" Deprecated - attaches the gdb to an existing program instance instead of", "the debugger to it :param String cmd: value containing the path to your", "open(FIFO, 'r') as fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo):", "self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e: print(e) if not \"File", "breakpoint before it is inserted :param pattern: the pattern to identify the breakpoint", "= True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we", "for part in parts: if parse: try: number = int(part) except: pass if", "\"continue\" in line: finBp.deactivate() except Exception as e: print(\"Error in GDB execution of:\"", "the gdb session :param exc: String value containing the gdb command :param strip:", "\" + str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try:", "= ret.split(\" \") parse = False number = 0 for part in parts:", "number of the breakpoint (in gdb) parts = ret.split(\" \") parse = False", "to an existing program instance instead of spawning the program :param interactive: interactive:", "procName in line: ln = line.replace(\"\\t\", \" \") #turn multiple whitespaces into single", "\"\\n\" if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the", "if the Thread is running \"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread))", "print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line) if line[0]", "return self.process, False print(\"Found proc offset: \" + str(procOffset)) #calculate final dynamic offset", "FIFO: The filename where the fifo will be created :return: None \"\"\" Thread(target=self.setupFifo,", "bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2 in line: skip = True if", "in GDB execution of:\" + str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run", "#connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set", "None self.breakpointAddr = None self.currRet = None self.removals = [] def removeBpByPattern(self, pattern):", "ret = self.excAndGet(str(bp.setup)) #we parse the number of the breakpoint (in gdb) parts", "as fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the", "value containing the gdb response unparsed \"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\"", "interactive shell(the user can interact with GDB as usual) - Non-blocking :return: None", "#turn multiple whitespaces into single whitespaces while \" \" in ln: ln =", "it :param String cmd: value containing the path to your executable :param Boolean", "to start the executable with \"\"\" #connect reader thread to read gdb pipe", "Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is running", "to read from :return: None \"\"\" while True: #time.sleep(0.05) line = fifo.readline() if", "== 0: return self.process, False print(\"Found proc offset: \" + str(procOffset)) #calculate final", "line: ln = line.replace(\"\\t\", \" \") #turn multiple whitespaces into single whitespaces while", "a breakpoint is hit :return: None \"\"\" #the first breakpoint has to install", "process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e: print(e) if not", "code in either python or GDB everytime a breakpoint is hit :return: None", "#self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches", "in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is", "self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\"", "Exception as e: print(e) if not \"File exists\" in str(e): print(\"sys.exit\") return self.client", "single whitespaces while \" \" in ln: ln = ln.replace(\" \", \" \")", "the entry function that spawns a new process and connects the debugger to", "setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"):", "te receive the data :param fifo: the fifo object to read from :return:", "the gdb :param FIFO: The filename where the fifo will be created :return:", "print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode", "cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry function that spawns a new", "and format the memory mappings which are mapping the main executable for line", "the current GDB Thread is running :return: Boolean - True if the Thread", "\"File exists\" in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr", "offset = 0 procStartAddresss = 0 for i, map in enumerate(proc_maps): if i", "self.breakpointAddr = None self.currRet = None self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes", "created :return: None \"\"\" print(\"setting up fifo now: \" + str(FIFO)) with open(FIFO,", "strip: Boolean, optional - remove the EOF delimiter automatically(this might create issues in", "try: os.mkfifo(self.FIFO) except Exception as e: print(e) if not \"File exists\" in str(e):", "mapping :return: The start Address of the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05)", "\"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process,", "\") #create an array, containing the different columns arr = ln.split(\" \") if", "pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set logging on", "gdb :param FIFO: The filename where the fifo will be created :return: None", "line2 in self.removals: if line2 in line: skip = True if skip: continue", "execution: \" + str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line) > 0:", "return self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None self.currRet = None", "finBp = bp self.breakpointAddr = None break except: continue if not finBp: continue", "set logging file /tmp/gdbPipe set logging on ''' + startCommands, api=True) if interactive:", "columns arr = ln.split(\" \") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get", "if line2 in line: skip = True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset)", "self.removals: if line2 in line: skip = True if skip: continue print(\"ADDING BP\")", "self.breakpointAddr = None break except: continue if not finBp: continue finBp.hit() #todo -", "new process and connects the debugger to it :param String cmd: value containing", "#Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return", "the gdb response unparsed \"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print", "before it is inserted :param pattern: the pattern to identify the breakpoint :return:", "containing the gdb response unparsed \"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0])", "finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line) if line[0] == \"c\" or \"continue\"", "- True if the Thread is running \"\"\" #Todo -- check this try:", "as e: print(\"Exception during code execution: \" + str(line)) print(str(e)) for line in", "\"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running():", "pattern: the pattern to identify the breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self,", "response unparsed \"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while", "the process mapping here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name)", "gdb to an existing program instance instead of spawning the program :param interactive:", "offset between Ghidra and the process mapping here (Because of ...) imageBase =", "print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp in self.client.breakpoints:", "None self.currRet = None self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint", "user can interact with. Default: True :param startCommands: Sting - Initial GDB Commands", "bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the number of the breakpoint (in", "== \"WAITBP\": if \"Breakpoint\" in line: for part in line.split(\" \"): if \"0x\"", "where the fifo will be created :return: None \"\"\" print(\"setting up fifo now:", "optional - open a regular GDB Window which the user can interact with.", "attaches the gdb to an existing program instance instead of spawning the program", "self.runtimeAnalysisNonBlock() #we need to calculate the offset between Ghidra and the process mapping", "None self.process = process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e:", "ret.split(\" \") parse = False number = 0 for part in parts: if", "> int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def run(self,", "procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry function that", "here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset ==", "True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse", "read from :return: None \"\"\" while True: #time.sleep(0.05) line = fifo.readline() if len(line)", "create all the functions, breakpoints and classes from the Ghidra Code/Comments :param funcs:", "\" + str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo = fifo print(\"fiifo opened\")", "open a regular GDB Window which the user can interact with. Default: True", "calculate the offset between Ghidra and the process mapping here (Because of ...)", "self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\":", "self.process = process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e: print(e)", "the gdb command :param strip: Boolean, optional - remove the EOF delimiter automatically(this", "spawning the program :param interactive: interactive: Boolean, optional - open a regular GDB", "the user can interact with. Default: True :param startCommands: Sting - Initial GDB", "self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set logging on", "fifo: the fifo object to read from :return: None \"\"\" while True: #time.sleep(0.05)", "are executed before the program starts :return: None \"\"\" #connect reader thread to", "__init__(self, process=None): self.fifo = None self.process = process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO)", "executable with \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process =", "self.parserMode == \"GETDAT\": self.currRet = self.currRet + line + \"\\n\" if \"ggdb__EOF\" in", "the different columns arr = ln.split(\" \") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr)", ":param funcs: A list of functions which are to be analyzed :return: None", "excAndGet(self, exc, strip=True): \"\"\"This function executes a command within the gdb session :param", "be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession", "def __init__(self, process=None): self.fifo = None self.process = process self.FIFO = \"/tmp/gdbPipe\" try:", ":return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function executes a command", "fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking", "start Address of the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\")", "it is inserted :param pattern: the pattern to identify the breakpoint :return: None", "to install the other breakpoints - then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5)", "+ line + \"\\n\" if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def setupFifo(self,", "True :param String startCommands: optional - Initial GDB Commands which are executed before", "def runtimeAnalysis(self): \"\"\"This function runs arbitrary code in either python or GDB everytime", "= line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\" in line: for part", "= \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if", "mapping the main executable for line in procMappings.split(\"\\n\"): if procName in line: ln", "array, containing the different columns arr = ln.split(\" \") if len(arr[0]) < 2:", "import GhidraCommandClient class GhidraGdb: \"\"\"The main class which encapsulates the whole GhidraGdb framework", "in line.split(\" \"): if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address:", "which are executed before the program starts :param String args: - Arguments to", "= False for line in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2 in", "self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return", "+ str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode :return: None", "\"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, '''", "= number print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True", "= fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in", "Name of the mapping :return: The start Address of the mapped space \"\"\"", "+ str(args) + \"\\n\" + startCommands, api=True) self.gdb = self.process.gdb #self if interactive:", "offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp in self.client.breakpoints: skip", "some cases) - default: True :return: String value containing the gdb response unparsed", "strip: return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo): \"\"\"read the ouput of", "if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False except Exception as e: return True", "\" + self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet = self.currRet + line +", "analyze(self, funcs): \"\"\"Analyze the Ghidra project - this command will create all the", "now: \" + str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo = fifo print(\"fiifo", "exc: String value containing the gdb command :param strip: Boolean, optional - remove", "function \"setupFifo\" in None-blocking mode :param FIFO: The filename where the fifo will", "into single whitespaces while \" \" in ln: ln = ln.replace(\" \", \"", "your executable :param Boolean interactive: optional - open a regular GDB Window which", "mappings from gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps = [] #get and", "import * import sys import os from pathlib import Path from threading import", "...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process,", "user can interact with GDB as usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start()", "(Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0:", "before the program starts :param String args: - Arguments to start the executable", "offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def", "threading import Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main class which", "the function \"setupFifo\" in None-blocking mode :param FIFO: The filename where the fifo", ":param String args: - Arguments to start the executable with \"\"\" #connect reader", "interactive: Boolean, optional - open a regular GDB Window which the user can", "self.gdb = gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set logging on ''' +", "mapping\") #get the proc mappings from gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps", "of spawning the program :param interactive: interactive: Boolean, optional - open a regular", "String startCommands: optional - Initial GDB Commands which are executed before the program", ":param String startCommands: optional - Initial GDB Commands which are executed before the", "GDB Window which the user can interact with. Default: True :param startCommands: Sting", "None \"\"\" print(\"setting up fifo now: \" + str(FIFO)) with open(FIFO, 'r') as", "from pwn import * import sys import os from pathlib import Path from", "gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set logging on ''' + startCommands, api=True)", "check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else:", "\"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e: print(e) if not \"File exists\" in", "False number = 0 for part in parts: if parse: try: number =", "can interact with. Default: True :param startCommands: Sting - Initial GDB Commands which", "False print(\"Found proc offset: \" + str(procOffset)) #calculate final dynamic offset self.procOffset =", "within the gdb session :param exc: String value containing the gdb command :param", "a breakpoint before it is inserted :param pattern: the pattern to identify the", "self.excAndGet(str(bp.setup)) #we parse the number of the breakpoint (in gdb) parts = ret.split(\"", "\"\"\"Get the Proc Offset of a particular mapping :param procName: String value containing", "\" \" in ln: ln = ln.replace(\" \", \" \") #create an array,", "for bp in self.client.breakpoints: skip = False for line in bp.pyExc.split(\"\\n\"): for line2", "number = int(part) except: pass if \"Breakpoint\" in part: parse = True bp.number", "try: number = int(part) except: pass if \"Breakpoint\" in part: parse = True", "\" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\"", "to identify the breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This", "True :param startCommands: Sting - Initial GDB Commands which are executed before the", "except Exception as e: print(e) if not \"File exists\" in str(e): print(\"sys.exit\") return", "mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the proc mappings", "is hit :return: None \"\"\" #the first breakpoint has to install the other", ":return: None \"\"\" #the first breakpoint has to install the other breakpoints -", "= 0 procStartAddresss = 0 for i, map in enumerate(proc_maps): if i ==", "object to read from :return: None \"\"\" while True: #time.sleep(0.05) line = fifo.readline()", "startCommands, api=True) self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to", "arr = ln.split(\" \") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get the", "or \"continue\" in line: finBp.deactivate() except Exception as e: print(\"Error in GDB execution", "of a particular mapping :param procName: String value containing the Name of the", "bp.number = number print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process,", "args: - Arguments to start the executable with \"\"\" #connect reader thread to", "number = 0 for part in parts: if parse: try: number = int(part)", "number print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def", "the ouput of the gdbPipe te receive the data :param fifo: the fifo", "if len(line) > 2: line = line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if", "fifo now: \" + str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo = fifo", "for i, map in enumerate(proc_maps): if i == 0 or offset > int(map[3].split(\"x\")[1],16)", "between Ghidra and the process mapping here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\")", "+ str(procOffset)) #calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset:", "cmd: value containing the path to your executable :param Boolean interactive: optional -", "to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging file /tmp/gdbPipe", "the data :param fifo: the fifo object to read from :return: None \"\"\"", "= self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the offset", "- Arguments to start the executable with \"\"\" #connect reader thread to read", "- int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp", "comming from the gdb :param FIFO: The filename where the fifo will be", "the whole GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo = None self.process =", "breakpoint is hit :return: None \"\"\" #the first breakpoint has to install the", "ln.replace(\" \", \" \") #create an array, containing the different columns arr =", "#print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False except", "self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the proc mappings from gdb procMappings =", "while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the proc mappings from gdb procMappings", "reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set", "\" in ln: ln = ln.replace(\" \", \" \") #create an array, containing", "line.split(\" \"): if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \"", "\"\"\"This function runs arbitrary code in either python or GDB everytime a breakpoint", "setupFifo(self, FIFO): \"\"\"Create the Fifo which is used to read the data comming", "executable :param Boolean interactive: optional - open a regular GDB Window which the", "- this has to be in parallel for line in finBp.pyExc.split(\"\\n\"): if len(line)", "pwn import * import sys import os from pathlib import Path from threading", "encapsulates the whole GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo = None self.process", "#get and format the memory mappings which are mapping the main executable for", "bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr = None", "Ghidra project - this command will create all the functions, breakpoints and classes", "read the data comming from the gdb :param FIFO: The filename where the", "in None-blocking mode :param FIFO: The filename where the fifo will be created", "try: self.gdb.execute(line) if line[0] == \"c\" or \"continue\" in line: finBp.deactivate() except Exception", "pattern to identify the breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True):", "unparsed \"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode", "#print(\"found Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet = self.currRet", "self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set logging on starti'''", "import os from pathlib import Path from threading import Thread from clients.GhidraCommandClient import", "Code/Comments :param funcs: A list of functions which are to be analyzed :return:", "which are executed before the program starts :return: None \"\"\" #connect reader thread", "data :param fifo: the fifo object to read from :return: None \"\"\" while", "if i == 0 or offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss", "str(args) + \"\\n\" + startCommands, api=True) self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive()", "str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for", "= [] #get and format the memory mappings which are mapping the main", "the lowest Start Address offset = 0 procStartAddresss = 0 for i, map", "finBp.deactivate() except Exception as e: print(\"Error in GDB execution of:\" + str(line)) print(\"Exception:", "BP SETUP\") for bp in self.client.breakpoints: skip = False for line in bp.pyExc.split(\"\\n\"):", "gdb command :param strip: Boolean, optional - remove the EOF delimiter automatically(this might", "the Name of the mapping :return: The start Address of the mapped space", "mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is running ...", "#the first breakpoint has to install the other breakpoints - then continue ...", "\" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp in self.client.breakpoints: skip =", "\"\"\" #the first breakpoint has to install the other breakpoints - then continue", "None \"\"\" while True: #time.sleep(0.05) line = fifo.readline() if len(line) > 2: line", "line[0] == \"c\" or \"continue\" in line: finBp.deactivate() except Exception as e: print(\"Error", "procName: String value containing the Name of the mapping :return: The start Address", "\"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check", "\"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\" in line: for part in line.split(\"", "the memory mappings which are mapping the main executable for line in procMappings.split(\"\\n\"):", "self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the offset between Ghidra and the process", "None self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it is", "#print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False except Exception", "process mapping here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if", "len(line) > 2: line = line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\"", "break except: continue if not finBp: continue finBp.hit() #todo - this has to", "gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if the current GDB Thread is", "def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis,", "bp self.breakpointAddr = None break except: continue if not finBp: continue finBp.hit() #todo", "print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None", "mapping :param procName: String value containing the Name of the mapping :return: The", "time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if self.breakpointAddr: #print(\"breakpoint hit\") for", "e: print(\"Error in GDB execution of:\" + str(line)) print(\"Exception: \" + str(e)) def", "True :return: String value containing the gdb response unparsed \"\"\" self.currRet = \"\"", "'runtimeAnalysis' in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread", "\"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr) elif", "== \"GETDAT\": self.currRet = self.currRet + line + \"\\n\" if \"ggdb__EOF\" in line:", "Ghidra and the process mapping here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset", "startCommands: Sting - Initial GDB Commands which are executed before the program starts", "to your executable :param Boolean interactive: optional - open a regular GDB Window", "thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set logging", "where the fifo will be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def", "the Proc Offset of a particular mapping :param procName: String value containing the", "... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while", "install the other breakpoints - then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\")", "except Exception as e: print(\"Exception during code execution: \" + str(line)) print(str(e)) for", "which are to be analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function", "except: continue if not finBp: continue finBp.hit() #todo - this has to be", "GDB everytime a breakpoint is hit :return: None \"\"\" #the first breakpoint has", "or offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss", "print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self,", "None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is running ... (if gdb", "used to read the data comming from the gdb :param FIFO: The filename", "#print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False except Exception as", "= int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"):", "if self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp", "len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get the lowest Start Address offset =", "\"): if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \" +", "self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP", "self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr = None break except:", "delimiter automatically(this might create issues in some cases) - default: True :return: String", "\" \") #create an array, containing the different columns arr = ln.split(\" \")", "the pattern to identify the breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc,", "in some cases) - default: True :return: String value containing the gdb response", "ln.split(\" \") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get the lowest Start", "= False number = 0 for part in parts: if parse: try: number", "continue if not finBp: continue finBp.hit() #todo - this has to be in", "procStartAddresss = 0 for i, map in enumerate(proc_maps): if i == 0 or", "proc_maps.append(arr) ## get the lowest Start Address offset = 0 procStartAddresss = 0", "the user can interact with. Default: True :param String startCommands: optional - Initial", "self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the offset between", "int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp in", "be analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary code", "... (if gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if the current GDB", "the fifo will be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self):", "of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0: return", "has to be in parallel for line in finBp.pyExc.split(\"\\n\"): if len(line) > 1:", "containing the Name of the mapping :return: The start Address of the mapped", "0 procStartAddresss = 0 for i, map in enumerate(proc_maps): if i == 0", "not \"File exists\" in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode = None", "while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if self.breakpointAddr: #print(\"breakpoint", "len(line) > 1: try: finBp.exec_(line) except Exception as e: print(\"Exception during code execution:", "self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False except Exception as e: return", "= int(part) except: pass if \"Breakpoint\" in part: parse = True bp.number =", "funcs: A list of functions which are to be analyzed :return: None \"\"\"", "self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None self.currRet = None self.removals", "finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line) except Exception as e: print(\"Exception during", "\"\"\"The main class which encapsulates the whole GhidraGdb framework \"\"\" def __init__(self, process=None):", "breakpoints - then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\"", "an array, containing the different columns arr = ln.split(\" \") if len(arr[0]) <", "as e: print(e) if not \"File exists\" in str(e): print(\"sys.exit\") return self.client =", "1: try: finBp.exec_(line) except Exception as e: print(\"Exception during code execution: \" +", "startCommands=\"\"): \"\"\" Deprecated - attaches the gdb to an existing program instance instead", ":param fifo: the fifo object to read from :return: None \"\"\" while True:", "runtimeAnalysis(self): \"\"\"This function runs arbitrary code in either python or GDB everytime a", ":param pattern: the pattern to identify the breakpoint :return: None \"\"\" self.removals.append(pattern) def", "Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking mode :param FIFO: The filename where", "FIFO: The filename where the fifo will be created :return: None \"\"\" print(\"setting", "int(part) except: pass if \"Breakpoint\" in part: parse = True bp.number = number", "Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc Offset of", "return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo): \"\"\"read the ouput of the", "parse: try: number = int(part) except: pass if \"Breakpoint\" in part: parse =", "gdbPipe te receive the data :param fifo: the fifo object to read from", "executable for line in procMappings.split(\"\\n\"): if procName in line: ln = line.replace(\"\\t\", \"", "procStartAddresss = map[0] return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is", "continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the number of", "= self.excAndGet(str(bp.setup)) #we parse the number of the breakpoint (in gdb) parts =", "\"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0]", "time.sleep(0.05) finBp = None try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints:", "+ self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet = self.currRet + line + \"\\n\"", "#todo - this has to be in parallel for line in finBp.pyExc.split(\"\\n\"): if", "int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def run(self, cmd,", "Path from threading import Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main", "self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the", "String cmd: value containing the path to your executable :param Boolean interactive: optional", "which the user can interact with. Default: True :param startCommands: Sting - Initial", "receive the data :param fifo: the fifo object to read from :return: None", "logging on starti''' + str(args) + \"\\n\" + startCommands, api=True) self.gdb = self.process.gdb", "arbitrary code in either python or GDB everytime a breakpoint is hit :return:", "ln = line.replace(\"\\t\", \" \") #turn multiple whitespaces into single whitespaces while \"", "\"Breakpoint\" in line: for part in line.split(\" \"): if \"0x\" in part: self.breakpointAddr", "proc_maps = [] #get and format the memory mappings which are mapping the", "Address of the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get", "containing the different columns arr = ln.split(\" \") if len(arr[0]) < 2: arr.pop(0)", "in self.breakpointAddr: finBp = bp self.breakpointAddr = None break except: continue if not", "if not finBp: continue finBp.hit() #todo - this has to be in parallel", "GDB as usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get", "str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def", "#time.sleep(0.05) line = fifo.readline() if len(line) > 2: line = line.replace(\"\\n\", \"\") if", "the fifo object to read from :return: None \"\"\" while True: #time.sleep(0.05) line", "gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set logging on starti''' + str(args) +", "functions which are to be analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This", "\"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if self.breakpointAddr:", "\", \" \") #create an array, containing the different columns arr = ln.split(\"", "the mapping :return: The start Address of the mapped space \"\"\" while self.checkThreadRunning():", "True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if self.breakpointAddr: #print(\"breakpoint hit\")", "Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet = self.currRet +", "as an interactive shell(the user can interact with GDB as usual) - Non-blocking", "running :return: Boolean - True if the Thread is running \"\"\" #Todo --", "None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive", "process and connects the debugger to it :param String cmd: value containing the", "def excAndGet(self, exc, strip=True): \"\"\"This function executes a command within the gdb session", "= part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet", "interact with GDB as usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self,", "created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as", "the breakpoint (in gdb) parts = ret.split(\" \") parse = False number =", "self.currRet = None self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before", "arr.pop(0) proc_maps.append(arr) ## get the lowest Start Address offset = 0 procStartAddresss =", "== \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo):", "return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the gdb", "interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the gdb to an existing program instance", "the other breakpoints - then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode", "readFifo(self, fifo): \"\"\"read the ouput of the gdbPipe te receive the data :param", "line = line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\" in line: for", "in line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo which is", "to it :param String cmd: value containing the path to your executable :param", "''' set logging file /tmp/gdbPipe set logging on starti''' + str(args) + \"\\n\"", "False for line in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2 in line:", "self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary code in either python or GDB", "print(\"Exception during code execution: \" + str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if", "procOffset == 0: return self.process, False print(\"Found proc offset: \" + str(procOffset)) #calculate", "with. Default: True :param String startCommands: optional - Initial GDB Commands which are", "if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the", ":param String cmd: value containing the path to your executable :param Boolean interactive:", "\"\"\"This function executes a command within the gdb session :param exc: String value", "breakpoint (in gdb) parts = ret.split(\" \") parse = False number = 0", "if strip: return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo): \"\"\"read the ouput", "existing program instance instead of spawning the program :param interactive: interactive: Boolean, optional", ":param Boolean interactive: optional - open a regular GDB Window which the user", "Boolean, optional - remove the EOF delimiter automatically(this might create issues in some", "the EOF delimiter automatically(this might create issues in some cases) - default: True", "the offset between Ghidra and the process mapping here (Because of ...) imageBase", "optional - remove the EOF delimiter automatically(this might create issues in some cases)", "= 0 for part in parts: if parse: try: number = int(part) except:", "logging on ''' + startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs):", "if the current GDB Thread is running :return: Boolean - True if the", "#get the proc mappings from gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps =", "Address: \" + self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet = self.currRet + line", "getProcOffset(self, procName): \"\"\"Get the Proc Offset of a particular mapping :param procName: String", "+ str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated", "= bp self.breakpointAddr = None break except: continue if not finBp: continue finBp.hit()", "GDB Thread is running :return: Boolean - True if the Thread is running", "ouput of the gdbPipe te receive the data :param fifo: the fifo object", "self.currRet = self.currRet + line + \"\\n\" if \"ggdb__EOF\" in line: self.parserMode =", "interact with. Default: True :param String startCommands: optional - Initial GDB Commands which", "thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging file", "+ \"\\n\" + startCommands, api=True) self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock()", "in line: skip = True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret", "daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive shell(the user can interact", "\") #turn multiple whitespaces into single whitespaces while \" \" in ln: ln", "ln = ln.replace(\" \", \" \") #create an array, containing the different columns", "interact with. Default: True :param startCommands: Sting - Initial GDB Commands which are", "are to be analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs", "0 or offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return", "line in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line) except Exception as e:", "= GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None self.currRet = None self.removals =", "line in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2 in line: skip =", "issues in some cases) - default: True :return: String value containing the gdb", "line in procMappings.split(\"\\n\"): if procName in line: ln = line.replace(\"\\t\", \" \") #turn", "* import sys import os from pathlib import Path from threading import Thread", "\"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc Offset of a particular mapping", "str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\") for bp in self.client.breakpoints: skip = False for", "mode :param FIFO: The filename where the fifo will be created :return: None", "- Initial GDB Commands which are executed before the program starts :return: None", "\"\"\"read the ouput of the gdbPipe te receive the data :param fifo: the", "(if gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if the current GDB Thread", "A list of functions which are to be analyzed :return: None \"\"\" self.client.analyze(funcs)", "of the breakpoint (in gdb) parts = ret.split(\" \") parse = False number", "line: skip = True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret =", "in line: finBp.deactivate() except Exception as e: print(\"Error in GDB execution of:\" +", "lowest Start Address offset = 0 procStartAddresss = 0 for i, map in", "self.process, False print(\"Found proc offset: \" + str(procOffset)) #calculate final dynamic offset self.procOffset", "Start Address offset = 0 procStartAddresss = 0 for i, map in enumerate(proc_maps):", ":param strip: Boolean, optional - remove the EOF delimiter automatically(this might create issues", "procMappings = self.excAndGet(\"i proc mappings\") proc_maps = [] #get and format the memory", "with GDB as usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName):", ":param procName: String value containing the Name of the mapping :return: The start", "line.replace(\"\\t\", \" \") #turn multiple whitespaces into single whitespaces while \" \" in", "current GDB Thread is running :return: Boolean - True if the Thread is", "Sting - Initial GDB Commands which are executed before the program starts :return:", "checkThreadRunning(self): \"\"\"check if the current GDB Thread is running :return: Boolean - True", "True bp.number = number print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return", "True: #time.sleep(0.05) line = fifo.readline() if len(line) > 2: line = line.replace(\"\\n\", \"\")", "the Ghidra project - this command will create all the functions, breakpoints and", "\"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive shell(the", "\" \") #turn multiple whitespaces into single whitespaces while \" \" in ln:", "self.breakpointAddr: finBp = bp self.breakpointAddr = None break except: continue if not finBp:", "= fifo.readline() if len(line) > 2: line = line.replace(\"\\n\", \"\") if self.parserMode ==", "default: True :return: String value containing the gdb response unparsed \"\"\" self.currRet =", "which encapsulates the whole GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo = None", "def getProcOffset(self, procName): \"\"\"Get the Proc Offset of a particular mapping :param procName:", "## get the lowest Start Address offset = 0 procStartAddresss = 0 for", "containing the path to your executable :param Boolean interactive: optional - open a", "self.currRet + line + \"\\n\" if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def", "finBp = None try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints: if", "...) def checkThreadRunning(self): \"\"\"check if the current GDB Thread is running :return: Boolean", "self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function executes a command within the gdb", "fifo will be created :return: None \"\"\" print(\"setting up fifo now: \" +", "- Initial GDB Commands which are executed before the program starts :param String", "the executable with \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process", "imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process, False", "+ str(line)) print(str(e)) for line in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line)", "the function 'runtimeAnalysis' in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if", "different columns arr = ln.split(\" \") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ##", "GDB Commands which are executed before the program starts :param String args: -", "main class which encapsulates the whole GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo", "command will create all the functions, breakpoints and classes from the Ghidra Code/Comments", "framework \"\"\" def __init__(self, process=None): self.fifo = None self.process = process self.FIFO =", "an existing program instance instead of spawning the program :param interactive: interactive: Boolean,", "the Thread is running \"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running())", "if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr = None break except: continue", "\"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if", "interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the offset between Ghidra and the", "hit :return: None \"\"\" #the first breakpoint has to install the other breakpoints", "sys import os from pathlib import Path from threading import Thread from clients.GhidraCommandClient", "[] #get and format the memory mappings which are mapping the main executable", "session :param exc: String value containing the gdb command :param strip: Boolean, optional", "the proc mappings from gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps = []", "dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING", "def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry function that spawns", "gdb) parts = ret.split(\" \") parse = False number = 0 for part", "setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the gdb to an existing program", "the breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function executes", "\"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is running ... (if gdb hits", "Exception as e: print(\"Exception during code execution: \" + str(line)) print(str(e)) for line", "\"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo): \"\"\"read", "BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the number of the breakpoint", "= [] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it is inserted :param", "\"\"\" print(\"setting up fifo now: \" + str(FIFO)) with open(FIFO, 'r') as fifo:", "if \"Breakpoint\" in part: parse = True bp.number = number print(\"return from setup:", "removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it is inserted :param pattern: the pattern", "not finBp: continue finBp.hit() #todo - this has to be in parallel for", "self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process, False print(\"Found proc", ":return: None \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb", "of the mapping :return: The start Address of the mapped space \"\"\" while", "proc mapping\") #get the proc mappings from gdb procMappings = self.excAndGet(\"i proc mappings\")", "\"\"\"Run the function \"setupFifo\" in None-blocking mode :param FIFO: The filename where the", "procName): \"\"\"Get the Proc Offset of a particular mapping :param procName: String value", "e: print(e) if not \"File exists\" in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self)", "0: return self.process, False print(\"Found proc offset: \" + str(procOffset)) #calculate final dynamic", "offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB", "2: line = line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\" in line:", "Exception as e: print(\"Error in GDB execution of:\" + str(line)) print(\"Exception: \" +", "in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None", "parallel for line in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line) except Exception", "program starts :return: None \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO)", "in ln: ln = ln.replace(\" \", \" \") #create an array, containing the", "to calculate the offset between Ghidra and the process mapping here (Because of", "pass if \"Breakpoint\" in part: parse = True bp.number = number print(\"return from", "api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra project -", "running \"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if", "in procMappings.split(\"\\n\"): if procName in line: ln = line.replace(\"\\t\", \" \") #turn multiple", "regular GDB Window which the user can interact with. Default: True :param startCommands:", "project - this command will create all the functions, breakpoints and classes from", "self.client.breakpoints: skip = False for line in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if", "None try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in", "from :return: None \"\"\" while True: #time.sleep(0.05) line = fifo.readline() if len(line) >", "memory mappings which are mapping the main executable for line in procMappings.split(\"\\n\"): if", "an interactive shell(the user can interact with GDB as usual) - Non-blocking :return:", "skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the number", "continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05)", "offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\",", "\") parse = False number = 0 for part in parts: if parse:", "optional - Initial GDB Commands which are executed before the program starts :param", "> 1: try: finBp.exec_(line) except Exception as e: print(\"Exception during code execution: \"", "the main executable for line in procMappings.split(\"\\n\"): if procName in line: ln =", "opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking mode :param", "daemon=True).start() #check if current thread is running ... (if gdb hits breakpoint ...)", "try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False", "return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry function", "line = fifo.readline() if len(line) > 2: line = line.replace(\"\\n\", \"\") if self.parserMode", "GDB execution of:\" + str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the", "the program starts :return: None \"\"\" #connect reader thread to read gdb pipe", "funcs): \"\"\"Analyze the Ghidra project - this command will create all the functions,", "executes a command within the gdb session :param exc: String value containing the", "+ str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in", "Window which the user can interact with. Default: True :param String startCommands: optional", "breakpoints and classes from the Ghidra Code/Comments :param funcs: A list of functions", "will be created :return: None \"\"\" print(\"setting up fifo now: \" + str(FIFO))", "from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True,", "from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main class which encapsulates the whole", "True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the gdb to an", "in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode", "will create all the functions, breakpoints and classes from the Ghidra Code/Comments :param", "fifo will be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup", "analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary code in", "self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self, fifo): \"\"\"read the ouput of the gdbPipe", "\"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary code in either python or", "while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return self.currRet def", "the Fifo which is used to read the data comming from the gdb", "str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode :return: None \"\"\"", "run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry function that spawns a", "Initial GDB Commands which are executed before the program starts :return: None \"\"\"", "= None self.process = process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as", "def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it is inserted :param pattern: the", "Commands which are executed before the program starts :return: None \"\"\" #connect reader", "self.process = gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set logging on starti''' +", "Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc Offset of a particular mapping :param", "= None self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it", "import sys import os from pathlib import Path from threading import Thread from", "\"c\" or \"continue\" in line: finBp.deactivate() except Exception as e: print(\"Error in GDB", "self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra project - this command will create", "particular mapping :param procName: String value containing the Name of the mapping :return:", "up fifo now: \" + str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo =", "/tmp/gdbPipe set logging on starti''' + str(args) + \"\\n\" + startCommands, api=True) self.gdb", "The filename where the fifo will be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,),", "is running :return: Boolean - True if the Thread is running \"\"\" #Todo", "line: for part in line.split(\" \"): if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1]", "parse = False number = 0 for part in parts: if parse: try:", "reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging", "function executes a command within the gdb session :param exc: String value containing", "the data comming from the gdb :param FIFO: The filename where the fifo", "is the entry function that spawns a new process and connects the debugger", "self.pid, self.gdb = gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set logging on '''", "command :param strip: Boolean, optional - remove the EOF delimiter automatically(this might create", "parse = True bp.number = number print(\"return from setup: \" + str(ret)) #self.gdb.execute(str(bp.setup))", "0: try: self.gdb.execute(line) if line[0] == \"c\" or \"continue\" in line: finBp.deactivate() except", "that spawns a new process and connects the debugger to it :param String", "the gdbPipe te receive the data :param fifo: the fifo object to read", "def checkThreadRunning(self): \"\"\"check if the current GDB Thread is running :return: Boolean -", "GDB Commands which are executed before the program starts :return: None \"\"\" #connect", "if parse: try: number = int(part) except: pass if \"Breakpoint\" in part: parse", "to be analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary", "self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else:", "line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\" in line: for part in", "need to calculate the offset between Ghidra and the process mapping here (Because", "the fifo will be created :return: None \"\"\" print(\"setting up fifo now: \"", "debugger to it :param String cmd: value containing the path to your executable", ":return: Boolean - True if the Thread is running \"\"\" #Todo -- check", "= \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return", "except Exception as e: print(\"Error in GDB execution of:\" + str(line)) print(\"Exception: \"", "i, map in enumerate(proc_maps): if i == 0 or offset > int(map[3].split(\"x\")[1],16) :", "on starti''' + str(args) + \"\\n\" + startCommands, api=True) self.gdb = self.process.gdb #self", "#create an array, containing the different columns arr = ln.split(\" \") if len(arr[0])", "self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking mode :param FIFO:", "+ str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo)", "self.parserMode = None self.breakpointAddr = None self.currRet = None self.removals = [] def", "part: parse = True bp.number = number print(\"return from setup: \" + str(ret))", "= None try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1]", "Commands which are executed before the program starts :param String args: - Arguments", "execution of:\" + str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function", "gdb response unparsed \"\"\" self.currRet = \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\")", "0 for i, map in enumerate(proc_maps): if i == 0 or offset >", "None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function executes a command within", "from the Ghidra Code/Comments :param funcs: A list of functions which are to", "Ghidra Code/Comments :param funcs: A list of functions which are to be analyzed", "containing the gdb command :param strip: Boolean, optional - remove the EOF delimiter", "exc, strip=True): \"\"\"This function executes a command within the gdb session :param exc:", "#time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp =", "and classes from the Ghidra Code/Comments :param funcs: A list of functions which", "procMappings.split(\"\\n\"): if procName in line: ln = line.replace(\"\\t\", \" \") #turn multiple whitespaces", "in parallel for line in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line) except", "Proc Offset of a particular mapping :param procName: String value containing the Name", "Offset of a particular mapping :param procName: String value containing the Name of", "regular GDB Window which the user can interact with. Default: True :param String", "elif self.parserMode == \"GETDAT\": self.currRet = self.currRet + line + \"\\n\" if \"ggdb__EOF\"", "with open(FIFO, 'r') as fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self,", "self.gdb.execute(line) if line[0] == \"c\" or \"continue\" in line: finBp.deactivate() except Exception as", "are mapping the main executable for line in procMappings.split(\"\\n\"): if procName in line:", "current thread is running ... (if gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check", "if \"Breakpoint\" in line: for part in line.split(\" \"): if \"0x\" in part:", ":param exc: String value containing the gdb command :param strip: Boolean, optional -", "self.currRet def readFifo(self, fifo): \"\"\"read the ouput of the gdbPipe te receive the", "if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr)", "0 for part in parts: if parse: try: number = int(part) except: pass", "function 'runtimeAnalysis' in Non-blocking mode :return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current", "running ... (if gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if the current", "other breakpoints - then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode =", "class which encapsulates the whole GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo =", "try: finBp.exec_(line) except Exception as e: print(\"Exception during code execution: \" + str(line))", "\"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo which is used to read the", "def analyze(self, funcs): \"\"\"Analyze the Ghidra project - this command will create all", "automatically(this might create issues in some cases) - default: True :return: String value", "if not \"File exists\" in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode =", "while True: #time.sleep(0.05) line = fifo.readline() if len(line) > 2: line = line.replace(\"\\n\",", "def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive shell(the user can interact with", "self.parserMode == \"WAITBP\": if \"Breakpoint\" in line: for part in line.split(\" \"): if", "enumerate(proc_maps): if i == 0 or offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16)", "are executed before the program starts :param String args: - Arguments to start", "int(map[3].split(\"x\")[1],16) procStartAddresss = map[0] return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This", "offset: \" + str(procOffset)) #calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16)))", "python or GDB everytime a breakpoint is hit :return: None \"\"\" #the first", ":return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an", "in either python or GDB everytime a breakpoint is hit :return: None \"\"\"", "= str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" + str(self.procOffset)) print(\"EXECUTING GDB BP SETUP\")", "then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True:", ":return: The start Address of the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting", "in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line) except Exception as e: print(\"Exception", "skip = False for line in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2", "#check if current thread is running ... (if gdb hits breakpoint ...) def", "\"\\n\" + startCommands, api=True) self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we", "if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the offset between Ghidra and", "in line: ln = line.replace(\"\\t\", \" \") #turn multiple whitespaces into single whitespaces", "in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr = None break", "- remove the EOF delimiter automatically(this might create issues in some cases) -", "self.excAndGet(\"i proc mappings\") proc_maps = [] #get and format the memory mappings which", "\"\"\"Setup the GdbSession as an interactive shell(the user can interact with GDB as", "#we parse the number of the breakpoint (in gdb) parts = ret.split(\" \")", "this command will create all the functions, breakpoints and classes from the Ghidra", "list of functions which are to be analyzed :return: None \"\"\" self.client.analyze(funcs) def", "this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return", "> 2: line = line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\": if \"Breakpoint\" in", "import Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main class which encapsulates", "strip=True): \"\"\"This function executes a command within the gdb session :param exc: String", "proc offset: \" + str(procOffset)) #calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) -", "parts: if parse: try: number = int(part) except: pass if \"Breakpoint\" in part:", "bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr = None break except: continue if", "starti''' + str(args) + \"\\n\" + startCommands, api=True) self.gdb = self.process.gdb #self if", "< 2: arr.pop(0) proc_maps.append(arr) ## get the lowest Start Address offset = 0", "+ \"\\n\" if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create", "String value containing the gdb command :param strip: Boolean, optional - remove the", "GdbSession as an interactive shell(the user can interact with GDB as usual) -", "for line in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line) except Exception as", "> 0: try: self.gdb.execute(line) if line[0] == \"c\" or \"continue\" in line: finBp.deactivate()", "before the program starts :return: None \"\"\" #connect reader thread to read gdb", "- this command will create all the functions, breakpoints and classes from the", "interactive: interactive: Boolean, optional - open a regular GDB Window which the user", "self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05)", "command within the gdb session :param exc: String value containing the gdb command", "fifo): \"\"\"read the ouput of the gdbPipe te receive the data :param fifo:", "self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process, False print(\"Found proc offset: \" +", "\"\"\"This is the entry function that spawns a new process and connects the", "str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None self.currRet", "in line: for part in line.split(\" \"): if \"0x\" in part: self.breakpointAddr =", "print(e) if not \"File exists\" in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode", "from the gdb :param FIFO: The filename where the fifo will be created", "mappings which are mapping the main executable for line in procMappings.split(\"\\n\"): if procName", "for line2 in self.removals: if line2 in line: skip = True if skip:", "interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry function that spawns a new process", "time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp", "None \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb =", "the functions, breakpoints and classes from the Ghidra Code/Comments :param funcs: A list", "map[0] return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the entry", "connects the debugger to it :param String cmd: value containing the path to", "logging file /tmp/gdbPipe set logging on starti''' + str(args) + \"\\n\" + startCommands,", "process=None): self.fifo = None self.process = process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except", "finBp: continue finBp.hit() #todo - this has to be in parallel for line", "os.mkfifo(self.FIFO) except Exception as e: print(e) if not \"File exists\" in str(e): print(\"sys.exit\")", ":return: String value containing the gdb response unparsed \"\"\" self.currRet = \"\" self.parserMode", "as usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the", "multiple whitespaces into single whitespaces while \" \" in ln: ln = ln.replace(\"", "args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive shell(the user can", "= ln.split(\" \") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get the lowest", "fifo.readline() if len(line) > 2: line = line.replace(\"\\n\", \"\") if self.parserMode == \"WAITBP\":", "if procOffset == 0: return self.process, False print(\"Found proc offset: \" + str(procOffset))", "for line in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2 in line: skip", "while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning():", "logging file /tmp/gdbPipe set logging on ''' + startCommands, api=True) if interactive: self.setupGdbInteractive()", "= self.excAndGet(\"i proc mappings\") proc_maps = [] #get and format the memory mappings", "gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set", "print(\"Found proc offset: \" + str(procOffset)) #calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16)", "if line[0] == \"c\" or \"continue\" in line: finBp.deactivate() except Exception as e:", "value containing the Name of the mapping :return: The start Address of the", "'r') as fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run", "if len(line) > 0: try: self.gdb.execute(line) if line[0] == \"c\" or \"continue\" in", "is used to read the data comming from the gdb :param FIFO: The", "= \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo which is used to read", "set logging on starti''' + str(args) + \"\\n\" + startCommands, api=True) self.gdb =", "\\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return self.currRet", "\"GETDAT\": self.currRet = self.currRet + line + \"\\n\" if \"ggdb__EOF\" in line: self.parserMode", "classes from the Ghidra Code/Comments :param funcs: A list of functions which are", "args=\"\"): \"\"\"This is the entry function that spawns a new process and connects", "skip = True if skip: continue print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup))", "setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive shell(the user can interact with GDB", "self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra project - this command will", "map in enumerate(proc_maps): if i == 0 or offset > int(map[3].split(\"x\")[1],16) : offset", "the Ghidra Code/Comments :param funcs: A list of functions which are to be", "SETUP\") for bp in self.client.breakpoints: skip = False for line in bp.pyExc.split(\"\\n\"): for", "whole GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo = None self.process = process", "= process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e: print(e) if", "spawns a new process and connects the debugger to it :param String cmd:", "hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr", "str(ret)) #self.gdb.execute(str(bp.setup)) self.gdb.execute(str(\"continue\")) return self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated -", ":return: None \"\"\" Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is running ... (if", "for line in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line) if line[0] ==", "get the lowest Start Address offset = 0 procStartAddresss = 0 for i,", "Default: True :param String startCommands: optional - Initial GDB Commands which are executed", "program :param interactive: interactive: Boolean, optional - open a regular GDB Window which", "/tmp/gdbPipe set logging on ''' + startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def", "- open a regular GDB Window which the user can interact with. Default:", "True if the Thread is running \"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads())", "-- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True", "a command within the gdb session :param exc: String value containing the gdb", "print(\"getting proc mapping\") #get the proc mappings from gdb procMappings = self.excAndGet(\"i proc", "= 0 for i, map in enumerate(proc_maps): if i == 0 or offset", "line + \"\\n\" if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO):", "is running ... (if gdb hits breakpoint ...) def checkThreadRunning(self): \"\"\"check if the", "Window which the user can interact with. Default: True :param startCommands: Sting -", "GhidraGdb: \"\"\"The main class which encapsulates the whole GhidraGdb framework \"\"\" def __init__(self,", "print(\"ADDING BP\") bp.rebuiltWithOffset(self.procOffset) bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the number of the", "i == 0 or offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss =", "Boolean interactive: optional - open a regular GDB Window which the user can", ":return: None \"\"\" while True: #time.sleep(0.05) line = fifo.readline() if len(line) > 2:", "and the process mapping here (Because of ...) imageBase = self.client.br.remote_eval(\"str(getState().getCurrentProgram().getAddressMap().getImageBase())\") procOffset =", "''' + startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the", "first breakpoint has to install the other breakpoints - then continue ... while", "the GdbSession as an interactive shell(the user can interact with GDB as usual)", "\"\"\"check if the current GDB Thread is running :return: Boolean - True if", "- Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc Offset", "try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr:", "start the executable with \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO)", "entry function that spawns a new process and connects the debugger to it", "in bp.pyExc.split(\"\\n\"): for line2 in self.removals: if line2 in line: skip = True", "instead of spawning the program :param interactive: interactive: Boolean, optional - open a", "path to your executable :param Boolean interactive: optional - open a regular GDB", "finBp.exec_(line) except Exception as e: print(\"Exception during code execution: \" + str(line)) print(str(e))", ":return: None \"\"\" print(\"setting up fifo now: \" + str(FIFO)) with open(FIFO, 'r')", "len(line) > 0: try: self.gdb.execute(line) if line[0] == \"c\" or \"continue\" in line:", "\"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the proc mappings from gdb", "if procName in line: ln = line.replace(\"\\t\", \" \") #turn multiple whitespaces into", "api=True) self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate", "the program :param interactive: interactive: Boolean, optional - open a regular GDB Window", "= gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set logging on ''' + startCommands,", "None-blocking mode :param FIFO: The filename where the fifo will be created :return:", "None \"\"\" #the first breakpoint has to install the other breakpoints - then", "self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01) if strip:", "GhidraGdb framework \"\"\" def __init__(self, process=None): self.fifo = None self.process = process self.FIFO", "exists\" in str(e): print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr =", "setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking mode :param FIFO: The filename", "class GhidraGdb: \"\"\"The main class which encapsulates the whole GhidraGdb framework \"\"\" def", "which is used to read the data comming from the gdb :param FIFO:", "#we need to calculate the offset between Ghidra and the process mapping here", "format the memory mappings which are mapping the main executable for line in", "remove the EOF delimiter automatically(this might create issues in some cases) - default:", "might create issues in some cases) - default: True :return: String value containing", "whitespaces into single whitespaces while \" \" in ln: ln = ln.replace(\" \",", "be in parallel for line in finBp.pyExc.split(\"\\n\"): if len(line) > 1: try: finBp.exec_(line)", "\"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function executes a command within the", "\"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo which", "has to install the other breakpoints - then continue ... while self.checkThreadRunning(): time.sleep(0.05)", "The filename where the fifo will be created :return: None \"\"\" print(\"setting up", "whitespaces while \" \" in ln: ln = ln.replace(\" \", \" \") #create", "self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp in", "will be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the", "in part: parse = True bp.number = number print(\"return from setup: \" +", "pattern): \"\"\"Removes a breakpoint before it is inserted :param pattern: the pattern to", "fifo: self.fifo = fifo print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function", "a regular GDB Window which the user can interact with. Default: True :param", "read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set", "of:\" + str(line)) print(\"Exception: \" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis'", "== \"c\" or \"continue\" in line: finBp.deactivate() except Exception as e: print(\"Error in", "is inserted :param pattern: the pattern to identify the breakpoint :return: None \"\"\"", "starts :return: None \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid,", "value containing the path to your executable :param Boolean interactive: optional - open", "\" + str(e)) def runtimeAnalysisNonBlock(self): \"\"\"Run the function 'runtimeAnalysis' in Non-blocking mode :return:", "def readFifo(self, fifo): \"\"\"read the ouput of the gdbPipe te receive the data", "set logging on ''' + startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self,", "(in gdb) parts = ret.split(\" \") parse = False number = 0 for", "= \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception as e: print(e) if not \"File exists\"", "#if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running(): if self.gdb.conn.root.gdb.selected_thread().is_running(): return True else: return False except Exception as e:", "of the gdbPipe te receive the data :param fifo: the fifo object to", "data comming from the gdb :param FIFO: The filename where the fifo will", "all the functions, breakpoints and classes from the Ghidra Code/Comments :param funcs: A", ":param startCommands: Sting - Initial GDB Commands which are executed before the program", "= \"\" self.parserMode = \"GETDAT\" self.gdb.execute(exc.split(\"\\n\")[0]) self.gdb.execute(\"print \\\"ggdb__EOF\\\"\") while self.parserMode == \"GETDAT\": time.sleep(0.01)", "to read the data comming from the gdb :param FIFO: The filename where", "the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the proc", "function that spawns a new process and connects the debugger to it :param", "read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set logging file /tmp/gdbPipe", "runs arbitrary code in either python or GDB everytime a breakpoint is hit", "and connects the debugger to it :param String cmd: value containing the path", "bp in self.client.breakpoints: skip = False for line in bp.pyExc.split(\"\\n\"): for line2 in", "value containing the gdb command :param strip: Boolean, optional - remove the EOF", "os from pathlib import Path from threading import Thread from clients.GhidraCommandClient import GhidraCommandClient", "= map[0] return procStartAddresss def run(self, cmd, interactive=True, startCommands=\"\", args=\"\"): \"\"\"This is the", "Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start() def setupGdbInteractive(self): \"\"\"Setup the GdbSession as an interactive shell(the user", "\"\"\"Removes a breakpoint before it is inserted :param pattern: the pattern to identify", "with \"\"\" #connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.process = gdb.debug(cmd,", "shell(the user can interact with GDB as usual) - Non-blocking :return: None \"\"\"", "\" + str(procOffset)) #calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final", "in self.removals: if line2 in line: skip = True if skip: continue print(\"ADDING", "as e: print(\"Error in GDB execution of:\" + str(line)) print(\"Exception: \" + str(e))", "from pathlib import Path from threading import Thread from clients.GhidraCommandClient import GhidraCommandClient class", "pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, ''' set logging file /tmp/gdbPipe set logging", "line: finBp.deactivate() except Exception as e: print(\"Error in GDB execution of:\" + str(line))", "starts :param String args: - Arguments to start the executable with \"\"\" #connect", "space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc mapping\") #get the proc mappings from", "Thread is running \"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if", "print(\"setting up fifo now: \" + str(FIFO)) with open(FIFO, 'r') as fifo: self.fifo", "executed before the program starts :param String args: - Arguments to start the", "self.fifo = None self.process = process self.FIFO = \"/tmp/gdbPipe\" try: os.mkfifo(self.FIFO) except Exception", "- then continue ... while self.checkThreadRunning(): time.sleep(0.05) #time.sleep(5) print(\"CONTINUE\") self.parserMode = \"WAITBP\" while", "The start Address of the mapped space \"\"\" while self.checkThreadRunning(): time.sleep(0.05) print(\"getting proc", "filename where the fifo will be created :return: None \"\"\" Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start()", "if current thread is running ... (if gdb hits breakpoint ...) def checkThreadRunning(self):", "this has to be in parallel for line in finBp.pyExc.split(\"\\n\"): if len(line) >", "interactive: optional - open a regular GDB Window which the user can interact", "be created :return: None \"\"\" print(\"setting up fifo now: \" + str(FIFO)) with", ":param FIFO: The filename where the fifo will be created :return: None \"\"\"", "Boolean, optional - open a regular GDB Window which the user can interact", "proc mappings from gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps = [] #get", "if \"ggdb__EOF\" in line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo", "line: self.parserMode = \"WAITBP\" def setupFifo(self, FIFO): \"\"\"Create the Fifo which is used", "part in line.split(\" \"): if \"0x\" in part: self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint", "\"\"\" while True: #time.sleep(0.05) line = fifo.readline() if len(line) > 2: line =", "final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" + str(self.procOffset))", "print(\"fiifo opened\") self.readFifo(fifo) def setupFifoNonBlock(self, Fifo): \"\"\"Run the function \"setupFifo\" in None-blocking mode", "\"setupFifo\" in None-blocking mode :param FIFO: The filename where the fifo will be", "line in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line) if line[0] == \"c\"", "in finBp.dbExc.split(\"\\n\"): if len(line) > 0: try: self.gdb.execute(line) if line[0] == \"c\" or", "in enumerate(proc_maps): if i == 0 or offset > int(map[3].split(\"x\")[1],16) : offset =", "breakpoint has to install the other breakpoints - then continue ... while self.checkThreadRunning():", "ln: ln = ln.replace(\" \", \" \") #create an array, containing the different", "Thread(target=self.runtimeAnalysis, daemon=True).start() #check if current thread is running ... (if gdb hits breakpoint", "for line in procMappings.split(\"\\n\"): if procName in line: ln = line.replace(\"\\t\", \" \")", "gdb procMappings = self.excAndGet(\"i proc mappings\") proc_maps = [] #get and format the", "procOffset = self.getProcOffset(Path(cmd).name) if procOffset == 0: return self.process, False print(\"Found proc offset:", ":return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary code in either", "clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main class which encapsulates the whole GhidraGdb", "in self.client.breakpoints: skip = False for line in bp.pyExc.split(\"\\n\"): for line2 in self.removals:", "the gdb to an existing program instance instead of spawning the program :param", "startCommands: optional - Initial GDB Commands which are executed before the program starts", "startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra project", "is running \"\"\" #Todo -- check this try: #print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads()) #print(dir(self.gdb.conn.root.gdb.InferiorThread)) #print(self.gdb.conn.root.gdb.selected_thread().is_running()) #if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running():", "#connect reader thread to read gdb pipe self.setupFifoNonBlock(self.FIFO) self.pid, self.gdb = gdb.attach(self.process, '''", "self.gdb = self.process.gdb #self if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() #we need to calculate the", "= ln.replace(\" \", \" \") #create an array, containing the different columns arr", ":return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc Offset of a", "\"\"\" def __init__(self, process=None): self.fifo = None self.process = process self.FIFO = \"/tmp/gdbPipe\"", "print(\"EXECUTING GDB BP SETUP\") for bp in self.client.breakpoints: skip = False for line", "self.process, True def setupGdb(self, interactive=True, startCommands=\"\"): \"\"\" Deprecated - attaches the gdb to", "either python or GDB everytime a breakpoint is hit :return: None \"\"\" #the", "self.parserMode == \"GETDAT\": time.sleep(0.01) if strip: return self.currRet.split(\"$\")[0] else: return self.currRet def readFifo(self,", "user can interact with. Default: True :param String startCommands: optional - Initial GDB", "can interact with. Default: True :param String startCommands: optional - Initial GDB Commands", "parse the number of the breakpoint (in gdb) parts = ret.split(\" \") parse", "Thread from clients.GhidraCommandClient import GhidraCommandClient class GhidraGdb: \"\"\"The main class which encapsulates the", "print(\"sys.exit\") return self.client = GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None self.currRet =", "the program starts :param String args: - Arguments to start the executable with", "= None break except: continue if not finBp: continue finBp.hit() #todo - this", "while \" \" in ln: ln = ln.replace(\" \", \" \") #create an", "''' set logging file /tmp/gdbPipe set logging on ''' + startCommands, api=True) if", "#calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \" +", "if self.parserMode == \"WAITBP\": if \"Breakpoint\" in line: for part in line.split(\" \"):", "which the user can interact with. Default: True :param String startCommands: optional -", "[] def removeBpByPattern(self, pattern): \"\"\"Removes a breakpoint before it is inserted :param pattern:", "functions, breakpoints and classes from the Ghidra Code/Comments :param funcs: A list of", "== 0 or offset > int(map[3].split(\"x\")[1],16) : offset = int(map[3].split(\"x\")[1],16) procStartAddresss = map[0]", "def setupFifo(self, FIFO): \"\"\"Create the Fifo which is used to read the data", ":param interactive: interactive: Boolean, optional - open a regular GDB Window which the", "for bp in self.client.breakpoints: if bp.address.split(\"x\")[1] in self.breakpointAddr: finBp = bp self.breakpointAddr =", "finBp.hit() #todo - this has to be in parallel for line in finBp.pyExc.split(\"\\n\"):", "\") if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get the lowest Start Address", "of functions which are to be analyzed :return: None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self):", "\"WAITBP\": if \"Breakpoint\" in line: for part in line.split(\" \"): if \"0x\" in", "the path to your executable :param Boolean interactive: optional - open a regular", "FIFO): \"\"\"Create the Fifo which is used to read the data comming from", "a new process and connects the debugger to it :param String cmd: value", "with. Default: True :param startCommands: Sting - Initial GDB Commands which are executed", "GhidraCommandClient(self) self.parserMode = None self.breakpointAddr = None self.currRet = None self.removals = []", "while self.checkThreadRunning(): time.sleep(0.05) finBp = None try: if self.breakpointAddr: #print(\"breakpoint hit\") for bp", "main executable for line in procMappings.split(\"\\n\"): if procName in line: ln = line.replace(\"\\t\",", "parts = ret.split(\" \") parse = False number = 0 for part in", "return self.currRet def readFifo(self, fifo): \"\"\"read the ouput of the gdbPipe te receive", "\"\"\"Create the Fifo which is used to read the data comming from the", "the number of the breakpoint (in gdb) parts = ret.split(\" \") parse =", "- attaches the gdb to an existing program instance instead of spawning the", "= line.replace(\"\\t\", \" \") #turn multiple whitespaces into single whitespaces while \" \"", "except: pass if \"Breakpoint\" in part: parse = True bp.number = number print(\"return", "= None self.breakpointAddr = None self.currRet = None self.removals = [] def removeBpByPattern(self,", "if len(arr[0]) < 2: arr.pop(0) proc_maps.append(arr) ## get the lowest Start Address offset", "identify the breakpoint :return: None \"\"\" self.removals.append(pattern) def excAndGet(self, exc, strip=True): \"\"\"This function", "program instance instead of spawning the program :param interactive: interactive: Boolean, optional -", "EOF delimiter automatically(this might create issues in some cases) - default: True :return:", "instance instead of spawning the program :param interactive: interactive: Boolean, optional - open", "interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock() def analyze(self, funcs): \"\"\"Analyze the Ghidra project - this command", "None \"\"\" self.client.analyze(funcs) def runtimeAnalysis(self): \"\"\"This function runs arbitrary code in either python", "String value containing the gdb response unparsed \"\"\" self.currRet = \"\" self.parserMode =", "= gdb.debug(cmd, ''' set logging file /tmp/gdbPipe set logging on starti''' + str(args)", "str(procOffset)) #calculate final dynamic offset self.procOffset = str(hex(int(procOffset.split(\"x\")[1],16) - int(imageBase,16))) print(\"final offset: \"", "file /tmp/gdbPipe set logging on ''' + startCommands, api=True) if interactive: self.setupGdbInteractive() self.runtimeAnalysisNonBlock()", "self.parserMode = \"WAITBP\" while True: time.sleep(0.05) while self.checkThreadRunning(): time.sleep(0.05) finBp = None try:", "everytime a breakpoint is hit :return: None \"\"\" #the first breakpoint has to", "set logging file /tmp/gdbPipe set logging on starti''' + str(args) + \"\\n\" +", "executed before the program starts :return: None \"\"\" #connect reader thread to read", "self.breakpointAddr = part.split(\"x\")[1] #print(\"found Breakpoint Address: \" + self.breakpointAddr) elif self.parserMode == \"GETDAT\":", "GDB Window which the user can interact with. Default: True :param String startCommands:", "or GDB everytime a breakpoint is hit :return: None \"\"\" #the first breakpoint", "= None self.currRet = None self.removals = [] def removeBpByPattern(self, pattern): \"\"\"Removes a", "bp.setHitLimit(0) ret = self.excAndGet(str(bp.setup)) #we parse the number of the breakpoint (in gdb)", "fifo object to read from :return: None \"\"\" while True: #time.sleep(0.05) line =", "usual) - Non-blocking :return: None \"\"\" Thread(target=self.process.interactive).start() def getProcOffset(self, procName): \"\"\"Get the Proc", "self.breakpointAddr) elif self.parserMode == \"GETDAT\": self.currRet = self.currRet + line + \"\\n\" if" ]
[ "def run(): target = Adapter(Adaptee(\"Adaptee is adpated to Target by Adapter\")) target.print_weak() target.print_strong()", "print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is adpated to Target by Adapter\"))", "\")\") def print_asterisk(self): print(\"*\" + self.message + \"*\") class Adapter(Target): def __init__(self, adaptee):", "NotImplementedError def print_strong(self): raise NotImplementedError class Adaptee: def __init__(self, message): self.message = message", "Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk()", "def print_paren(self): print(\"(\" + self.message + \")\") def print_asterisk(self): print(\"*\" + self.message +", "def __init__(self, adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def", "adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is", "run(): target = Adapter(Adaptee(\"Adaptee is adpated to Target by Adapter\")) target.print_weak() target.print_strong() run()", "message def print_paren(self): print(\"(\" + self.message + \")\") def print_asterisk(self): print(\"*\" + self.message", "self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is adpated to Target by Adapter\")) target.print_weak()", "__init__(self, adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run():", "def print_asterisk(self): print(\"*\" + self.message + \"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee", "print(\"(\" + self.message + \")\") def print_asterisk(self): print(\"*\" + self.message + \"*\") class", "self.message + \")\") def print_asterisk(self): print(\"*\" + self.message + \"*\") class Adapter(Target): def", "def __init__(self, message): self.message = message def print_paren(self): print(\"(\" + self.message + \")\")", "self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is adpated to Target", "\"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def", "NotImplementedError class Adaptee: def __init__(self, message): self.message = message def print_paren(self): print(\"(\" +", "class Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self):", "def print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is adpated to Target by", "def print_weak(self): raise NotImplementedError def print_strong(self): raise NotImplementedError class Adaptee: def __init__(self, message):", "+ \"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren()", "print_asterisk(self): print(\"*\" + self.message + \"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee =", "print_paren(self): print(\"(\" + self.message + \")\") def print_asterisk(self): print(\"*\" + self.message + \"*\")", "def print_strong(self): raise NotImplementedError class Adaptee: def __init__(self, message): self.message = message def", "print(\"*\" + self.message + \"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee", "class Target: def print_weak(self): raise NotImplementedError def print_strong(self): raise NotImplementedError class Adaptee: def", "self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target =", "= adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee", "message): self.message = message def print_paren(self): print(\"(\" + self.message + \")\") def print_asterisk(self):", "+ \")\") def print_asterisk(self): print(\"*\" + self.message + \"*\") class Adapter(Target): def __init__(self,", "+ self.message + \"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee def", "raise NotImplementedError def print_strong(self): raise NotImplementedError class Adaptee: def __init__(self, message): self.message =", "self.message + \"*\") class Adapter(Target): def __init__(self, adaptee): self.adaptee = adaptee def print_weak(self):", "Target: def print_weak(self): raise NotImplementedError def print_strong(self): raise NotImplementedError class Adaptee: def __init__(self,", "+ self.message + \")\") def print_asterisk(self): print(\"*\" + self.message + \"*\") class Adapter(Target):", "class Adaptee: def __init__(self, message): self.message = message def print_paren(self): print(\"(\" + self.message", "print_weak(self): raise NotImplementedError def print_strong(self): raise NotImplementedError class Adaptee: def __init__(self, message): self.message", "print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is adpated to", "self.message = message def print_paren(self): print(\"(\" + self.message + \")\") def print_asterisk(self): print(\"*\"", "= message def print_paren(self): print(\"(\" + self.message + \")\") def print_asterisk(self): print(\"*\" +", "adaptee): self.adaptee = adaptee def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target", "print_strong(self): raise NotImplementedError class Adaptee: def __init__(self, message): self.message = message def print_paren(self):", "Adaptee: def __init__(self, message): self.message = message def print_paren(self): print(\"(\" + self.message +", "raise NotImplementedError class Adaptee: def __init__(self, message): self.message = message def print_paren(self): print(\"(\"", "def print_weak(self): self.adaptee.print_paren() def print_strong(self): self.adaptee.print_asterisk() def run(): target = Adapter(Adaptee(\"Adaptee is adpated", "__init__(self, message): self.message = message def print_paren(self): print(\"(\" + self.message + \")\") def" ]
[ "0 # 기타 변수들, 정규식, 시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters", "ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\" class ChatCommon: #", "TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0", "RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success = False stop = False PAG", "= False hmac_auth_success = False stop = False PAG = None MAC =", "경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식, 시저 기본 문자열 등 message_regularexpression", "= ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success = False stop", "Python 3.4.3 x64, Windows 8.1 x64 파일 : ChatCommon.py 내용 : 그냥 공통적이고", "\"\"\" class ChatCommon: # 타입 지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR =", "x64 파일 : ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\"", "class ChatCommon: # 타입 지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0", "x64, Windows 8.1 x64 파일 : ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는", "message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False", "' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success = False stop =", "8.1 x64 파일 : ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리", "변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2", "쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한", "개발환경 : PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64 파일 : ChatCommon.py", "1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE =", "# 기타 변수들, 정규식, 시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters =", "False hmac_auth_success = False stop = False PAG = None MAC = None", "기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024", "기타 변수들, 정규식, 시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = '", "쓰이는 변수만 정리 \"\"\" class ChatCommon: # 타입 지정을 위한 변수... 그냥 자주", "등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG =", "0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의 메세지가 아닌", "<filename>Crypto-Nexus/ChatCommon.py \"\"\" 개발환경 : PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64 파일", "아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식, 시저 기본 문자열 등", "PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64 파일 : ChatCommon.py 내용 :", "\"\"\" 개발환경 : PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64 파일 :", ": ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\" class ChatCommon:", "ChatCommon: # 타입 지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION", "형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식, 시저 기본", "변수들, 정규식, 시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'", "caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success = False", "is_response_HMSG = False hmac_auth_success = False stop = False PAG = None MAC", ": PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64 파일 : ChatCommon.py 내용", "프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식,", "주로 쓰이는 변수만 정리 \"\"\" class ChatCommon: # 타입 지정을 위한 변수... 그냥", "3.4.3 x64, Windows 8.1 x64 파일 : ChatCommon.py 내용 : 그냥 공통적이고 주로", "위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE =", "정규식, 시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER", "x64, Python 3.4.3 x64, Windows 8.1 x64 파일 : ChatCommon.py 내용 : 그냥", "그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 #", "= 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의 메세지가", "지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE", "타입 지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1", "변수만 정리 \"\"\" class ChatCommon: # 타입 지정을 위한 변수... 그냥 자주 쓰는거", "지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식, 시저", "# 프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들,", "= '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success", "= 1024 is_response_HMSG = False hmac_auth_success = False stop = False PAG =", "문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG", "# 타입 지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION =", "2 # 프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타", "공통적이고 주로 쓰이는 변수만 정리 \"\"\" class ChatCommon: # 타입 지정을 위한 변수...", "내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\" class ChatCommon: # 타입", "TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의", "메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식, 시저 기본 문자열", "TYPE_OF_WRONG_MESSAGE = 0 # 기타 변수들, 정규식, 시저 기본 문자열 등 message_regularexpression =", ": 그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\" class ChatCommon: # 타입 지정을", "'\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success =", "시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))' caesar_letters = ' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER =", "정리 \"\"\" class ChatCommon: # 타입 지정을 위한 변수... 그냥 자주 쓰는거 TYPE_OF_CAESAR", "= 2 # 프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE = 0 #", "!\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' RECV_BUFFER = 1024 is_response_HMSG = False hmac_auth_success = False stop = False", "= 1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의 메세지가 아닌 경우 TYPE_OF_WRONG_MESSAGE", "파일 : ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\" class", "자주 쓰는거 TYPE_OF_CAESAR = 0 TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 # 프로그램에서", "TYPE_OF_TRANSPOSITION = 1 TYPE_OF_AFFINE = 2 # 프로그램에서 지정한 형식의 메세지가 아닌 경우", "그냥 공통적이고 주로 쓰이는 변수만 정리 \"\"\" class ChatCommon: # 타입 지정을 위한", "= 0 # 기타 변수들, 정규식, 시저 기본 문자열 등 message_regularexpression = '\\[(?P<type>[H]?[PM][AOS][GDC])\\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\\s]?(?P<msg>.+))'", "Windows 8.1 x64 파일 : ChatCommon.py 내용 : 그냥 공통적이고 주로 쓰이는 변수만", "1024 is_response_HMSG = False hmac_auth_success = False stop = False PAG = None" ]
[ "class HashTag: \"\"\"Represents a hash tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag is", "\"\"\"Just in case if we want to know the lenght.\"\"\" # return len(self._text)", "in case if we want to know the lenght.\"\"\" # return len(self._text) class", "tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag is a simple text string.\"\"\" self._text", "wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def", "\"\"\"This function represents a text rending in html format.\"\"\" return self._text # def", "differs from inheritance because the new functionalities are attached to that particular object", "in html format.\"\"\" return self._text # def len(self): # \"\"\"Just in case if", "def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main():", "def __init__(self, text): \"\"\"A HastTag is a simple text string.\"\"\" self._text = text", "simple text string.\"\"\" self._text = text def render(self): \"\"\"This function represents a text", "decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before:", "f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements a test example run of this", "= HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \",", "text def render(self): \"\"\"This function represents a text rending in html format.\"\"\" return", "a new features/functionalities to an object without changing its implementation. It differs from", "= text def render(self): \"\"\"This function represents a text rending in html format.\"\"\"", "self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in", "add a new features/functionalities to an object without changing its implementation. It differs", "\"\"\"The Decorator pattern is used to dynamically add a new features/functionalities to an", "<b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class", "text rending in html format.\"\"\" return self._text # def len(self): # \"\"\"Just in", "implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render())", "HastTag is a simple text string.\"\"\" self._text = text def render(self): \"\"\"This function", "= ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \", italic_and_bold_hello.render()) if __name__", "represents a text rending in html format.\"\"\" return self._text # def len(self): #", "\"\"\"This main function implements a test example run of this decorator pattern implementation\"\"\"", "features/functionalities to an object without changing its implementation. It differs from inheritance because", "the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def", "inheritance because the new functionalities are attached to that particular object on-demand, not", "new functionalities are attached to that particular object on-demand, not to the entire", "ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped", "__init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps", "render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def __init__(self, wrapped):", "return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self)", "render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements a test example run", "implements a test example run of this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\")", "a test example run of this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello", "on-demand, not to the entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag text.\"\"\"", "pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \",", "its implementation. It differs from inheritance because the new functionalities are attached to", "we want to know the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a", "example run of this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello)", "simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after:", "# \"\"\"Just in case if we want to know the lenght.\"\"\" # return", "to dynamically add a new features/functionalities to an object without changing its implementation.", "f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped", "to know the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in", "case if we want to know the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag):", "def len(self): # \"\"\"Just in case if we want to know the lenght.\"\"\"", "that particular object on-demand, not to the entire subclass.\"\"\" class HashTag: \"\"\"Represents a", "entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag text.\"\"\" def __init__(self, text): \"\"\"A", "is a simple text string.\"\"\" self._text = text def render(self): \"\"\"This function represents", "attached to that particular object on-demand, not to the entire subclass.\"\"\" class HashTag:", "It differs from inheritance because the new functionalities are attached to that particular", "main(): \"\"\"This main function implements a test example run of this decorator pattern", "because the new functionalities are attached to that particular object on-demand, not to", "tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return", "not to the entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag text.\"\"\" def", "used to dynamically add a new features/functionalities to an object without changing its", "<reponame>s-c-23/Elements-of-Software-Design \"\"\"The Decorator pattern is used to dynamically add a new features/functionalities to", "subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag", "= wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\"", "are attached to that particular object on-demand, not to the entire subclass.\"\"\" class", "super().__init__(self) self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag", "main function implements a test example run of this decorator pattern implementation\"\"\" simple_hello", "without changing its implementation. It differs from inheritance because the new functionalities are", "# def len(self): # \"\"\"Just in case if we want to know the", "tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return", "BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped", "an object without changing its implementation. It differs from inheritance because the new", "__init__(self, text): \"\"\"A HastTag is a simple text string.\"\"\" self._text = text def", "function implements a test example run of this decorator pattern implementation\"\"\" simple_hello =", "a tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self):", "italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \", italic_and_bold_hello.render()) if", "class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped =", "<i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def", "a simple text string.\"\"\" self._text = text def render(self): \"\"\"This function represents a", "= wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements a", "self._text # def len(self): # \"\"\"Just in case if we want to know", "HashTag: \"\"\"Represents a hash tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag is a", "Decorator pattern is used to dynamically add a new features/functionalities to an object", "particular object on-demand, not to the entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash", "# return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def __init__(self, wrapped):", "run of this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello", "in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\"", "return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self)", "html format.\"\"\" return self._text # def len(self): # \"\"\"Just in case if we", "__init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This", "\", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \", italic_and_bold_hello.render()) if __name__ == \"__main__\": main()", "rending in html format.\"\"\" return self._text # def len(self): # \"\"\"Just in case", "render(self): \"\"\"This function represents a text rending in html format.\"\"\" return self._text #", "lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def __init__(self,", "def main(): \"\"\"This main function implements a test example run of this decorator", "len(self): # \"\"\"Just in case if we want to know the lenght.\"\"\" #", "implementation. It differs from inheritance because the new functionalities are attached to that", "def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag):", "changing its implementation. It differs from inheritance because the new functionalities are attached", "string.\"\"\" self._text = text def render(self): \"\"\"This function represents a text rending in", "a tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self):", "return self._text # def len(self): # \"\"\"Just in case if we want to", "is used to dynamically add a new features/functionalities to an object without changing", "wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a", "the new functionalities are attached to that particular object on-demand, not to the", "object on-demand, not to the entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag", "text): \"\"\"A HastTag is a simple text string.\"\"\" self._text = text def render(self):", "want to know the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag", "from inheritance because the new functionalities are attached to that particular object on-demand,", "wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main", "\"\"\"Represents a hash tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag is a simple", "self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements", "super().__init__(self) self._wrapped = wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function", "return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements a test example run of", "in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def render(self): return f\"<b>{self._wrapped.render()}</b>\"", "object without changing its implementation. It differs from inheritance because the new functionalities", "pattern is used to dynamically add a new features/functionalities to an object without", "def render(self): return f\"<b>{self._wrapped.render()}</b>\" class ItalicWrapper(HashTag): \"\"\"Wraps a tag in <i>\"\"\" def __init__(self,", "know the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\"", "BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \", italic_and_bold_hello.render())", "of this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello =", "\"\"\"Wraps a tag in <i>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def", "the entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag text.\"\"\" def __init__(self, text):", "a hash tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag is a simple text", "functionalities are attached to that particular object on-demand, not to the entire subclass.\"\"\"", "to the entire subclass.\"\"\" class HashTag: \"\"\"Represents a hash tag text.\"\"\" def __init__(self,", "text.\"\"\" def __init__(self, text): \"\"\"A HastTag is a simple text string.\"\"\" self._text =", "\"\"\"Wraps a tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped = wrapped def", "def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements a test example", "new features/functionalities to an object without changing its implementation. It differs from inheritance", "HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render())", "wrapped def render(self): return f\"<i>{self._wrapped.render()}</i>\" def main(): \"\"\"This main function implements a test", "len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped", "hash tag text.\"\"\" def __init__(self, text): \"\"\"A HastTag is a simple text string.\"\"\"", "print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \", italic_and_bold_hello.render()) if __name__ == \"__main__\":", "to an object without changing its implementation. It differs from inheritance because the", "format.\"\"\" return self._text # def len(self): # \"\"\"Just in case if we want", "text string.\"\"\" self._text = text def render(self): \"\"\"This function represents a text rending", "\"\"\"A HastTag is a simple text string.\"\"\" self._text = text def render(self): \"\"\"This", "class BoldWrapper(HashTag): \"\"\"Wraps a tag in <b>\"\"\" def __init__(self, wrapped): super().__init__(self) self._wrapped =", "bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after:", "ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \", italic_and_bold_hello.render()) if __name__ ==", "def render(self): \"\"\"This function represents a text rending in html format.\"\"\" return self._text", "function represents a text rending in html format.\"\"\" return self._text # def len(self):", "to that particular object on-demand, not to the entire subclass.\"\"\" class HashTag: \"\"\"Represents", "dynamically add a new features/functionalities to an object without changing its implementation. It", "a text rending in html format.\"\"\" return self._text # def len(self): # \"\"\"Just", "this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello = BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello)", "self._text = text def render(self): \"\"\"This function represents a text rending in html", "test example run of this decorator pattern implementation\"\"\" simple_hello = HashTag(\"#helloWorld!\") bold_hello =", "if we want to know the lenght.\"\"\" # return len(self._text) class BoldWrapper(HashTag): \"\"\"Wraps", "= BoldWrapper(simple_hello) italic_and_bold_hello = ItalicWrapper(bold_hello) print(\"before: \", simple_hello.render()) print(\"after: \", bold_hello.render()) print(\"after: \"," ]
[ "def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\", str), (\"account_id\", str)])", "import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\",", "from dataclasses import make_dataclass from fractal.core.specifications.object_of_account_specification import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec", "fractal.core.specifications.object_of_account_specification import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC =", "test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\", str), (\"account_id\", str)]) assert", "import make_dataclass from fractal.core.specifications.object_of_account_specification import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\",", "( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\",", "ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\", str),", "from fractal.core.specifications.object_of_account_specification import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC", ") def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\", str), (\"account_id\",", "= ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\", str), (\"account_id\", str)]) assert spec.is_satisfied_by(DC(**dict(id=\"abc\", account_id=\"def\")))", "spec = ObjectOfAccountSpecification(\"abc\", \"def\") DC = make_dataclass(\"DC\", [(\"id\", str), (\"account_id\", str)]) assert spec.is_satisfied_by(DC(**dict(id=\"abc\",", "make_dataclass from fractal.core.specifications.object_of_account_specification import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec = ObjectOfAccountSpecification(\"abc\", \"def\")", "dataclasses import make_dataclass from fractal.core.specifications.object_of_account_specification import ( ObjectOfAccountSpecification, ) def test_object_of_account_specification(): spec =" ]
[ "2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512", "0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16 count=", "0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16 count=", "bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record", "bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record", "512 count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s", "if m := re.search('copied, (.*?) s, ', message): seconds = float(m.group(1)) elif m", "65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072", "bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record", "bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record", "256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0", "count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2", "count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576", "65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s", "262144 0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024", "count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s bs=", "bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record", "14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record", "0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count=", "16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288", "1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count=", "bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s", "524288 0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16", "2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s", "8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768", "1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s", "float(m.group(1)) elif m := re.search('bytes transferred in (.*?) secs', message): seconds = float(m.group(1))", "1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s", "bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record", "3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144", "bs <= 1024 * 1024 * 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d'", "bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s", "bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record", "1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s", "1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256", "count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs=", "count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian GNU/Linux 10", "1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s bs=", "bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record", "count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s bs=", "65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192", "bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record", "113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096", "0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64 count=", "1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record", "count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576", "count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576", "count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs=", "1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s", "17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384", "bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record", "1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s", "5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072", "count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s bs=", "0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s", "131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768", "bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record", "0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s", "0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s", "262144 0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64", "0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count=", "2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144", "1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s", "seconds = 0 message = str(result.stderr) if m := re.search('copied, (.*?) s, ',", "2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144", "count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs=", "2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record", "1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s", "count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s bs=", "1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096", "2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144", "count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py", "2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512", "48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s", "1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s", "61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192", "262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768", "131072 0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64", "262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096", "0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512 count=", "kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count=", "1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s bs=", "count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576", "524288 1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8", "1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288", "5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s", "count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count=", "9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072", "result = \"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs=", "1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32 count=", "count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576", "23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s", "128 count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s", "1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s", "4 count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s", "0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record", "0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s", "2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512", "bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record", "64 count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s", "3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record", "131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768", "2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s", "armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2", "bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record", "0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256 count=", "bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record", "262144 1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16", "bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record", "count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs=", "0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count=", "bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record", "bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs=", "count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs=", "running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record", "39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192", "1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU", "bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record", "0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record", "count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s bs=", "2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144", "secs', message): seconds = float(m.group(1)) else: print('Unable to parse dd output:\\n%s' % message)", "8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count=", "bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record", "count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s bs=", "1024 * 1024 while bs <= 1024 * 1024 * 8: args =", "bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record", "0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count=", "5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s", "63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s", "bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record", "1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288", "bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record", "bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s", "996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record", "128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512", "5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072", "0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record", "1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s", "$ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s", "count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4", "count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s bs=", "count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s bs=", "3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s", "bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record", "6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10", "count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7,", "1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel", "CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576", "count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s bs=", "345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record", "count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576", "bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================", "bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record", "131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536", "count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s bs=", "bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux 10", "count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs=", "19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384", "bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record", "0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512 count=", "131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s", "3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s", "bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs=", "256 count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s", "0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s", "count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs=", "count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds * 1e6 / count,", "8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32", "0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record", "10 armv7, kernel 5.10 $ python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s", "print('Unable to parse dd output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s'", "%8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds * 1e6 / count, bs *", "1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s", "1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288", "65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384", "131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count=", "11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768", "bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record", "70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s", "count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576", "bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs=", "8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py", "count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576", "2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8 count=", "3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s", "bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record", "21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s", "5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288", "0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s", "count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs=", "2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144", "65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536", "0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count=", "32 count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s", "9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536", "14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s", "0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32 count=", "0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s", "0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s", "count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs=", "10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536", "32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128", "524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152", "16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288", "count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs=", "199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s", "* 1024 while bs <= 1024 * 1024 * 8: args = ['dd',", "bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================", "3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s", "65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s", "16 count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s", "65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144", "2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144", "5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s", "0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count=", "131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384", "bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s", "0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s", "= float(m.group(1)) else: print('Unable to parse dd output:\\n%s' % message) break print('bs=%7d count=%7d", "count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs=", "count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs=", "bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s", "1154.437MB/s Raspberry Pi 4 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs=", "256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian 11 arm64, kernel 5.10", "524288 0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128", "6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s", "0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s", "0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256 count=", "1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144", "15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s", "3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144", "256 count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s", "count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s bs=", "bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record", "524288 0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128", "0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count=", "1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s", "45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s", "0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64 count=", "bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record", "65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384", "167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record", "8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048", "bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record", "5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288", "17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s", "512 count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s", "131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536", "3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s", "count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s bs=", "count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608 count=", "count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs=", "3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s", "0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count=", "re.search('bytes transferred in (.*?) secs', message): seconds = float(m.group(1)) else: print('Unable to parse", "144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s", "117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s", "1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288", "count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs=", "bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s", "0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s", "count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576", "over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2", "bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record", "61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s", "2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512", "count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s bs=", "40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192", "count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s bs=", "bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record", "64 count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s", "524288 0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16", "1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record", "= float(m.group(1)) elif m := re.search('bytes transferred in (.*?) secs', message): seconds =", "Raspberry Pi 3 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1", "1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s", "0 message = str(result.stderr) if m := re.search('copied, (.*?) s, ', message): seconds", "390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s", "0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8 count=", "32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072", "bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608", "count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs=", "bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs=", "16 count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s", "count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s bs=", "0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count=", "count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count=", "524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152", "arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2", "2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144", "131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536", "re.search('copied, (.*?) s, ', message): seconds = float(m.group(1)) elif m := re.search('bytes transferred", "1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record", "7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536", "0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count=", "8 count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s", "count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs=", "1662.377MB/s Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py", "34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s", "count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s bs=", "2 count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s", "1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu server", "438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record", "if seconds > 1: count /= 2 result = \"\"\" Raspberry Pi 4", "count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s bs=", "3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144", "65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288", "256 count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s", "% count] result = subprocess.run(args, capture_output=True) seconds = 0 message = str(result.stderr) if", "131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768", "32 count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s", "2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144", "/ 1e6 / seconds)) bs *= 2 if seconds > 1: count /=", "bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record", "python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s", "bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs=", "bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record", "2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144", "1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s", "i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s", "262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s", "count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs=", "bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record", "GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s", "2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4 count=", "60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192", "0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64 count=", "1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi", "1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record", "count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576", "4 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576", "1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count=", "0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128 count=", "2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s", "count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s bs=", "/ seconds)) bs *= 2 if seconds > 1: count /= 2 result", "2 if seconds > 1: count /= 2 result = \"\"\" Raspberry Pi", "133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record", "262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192", "8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count=", "1: count /= 2 result = \"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE:", "1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s", "1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s", "1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count=", "0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512 count=", "931.457MB/s Raspberry Pi 3 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs=", "524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192", "Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record", "0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256 count=", "bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record", "1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144", "262144 0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16", "bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record", "10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s", "2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144", "8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count=", "count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs=", "= \"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1", "32 count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s", "2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10", "1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288", "bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record", "262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288", "512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel", "$ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s", "2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu", "arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2", "11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s", "bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record", "9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768", "count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count=", "kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count=", "22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s", "3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s", "262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096", "bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs=", "count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count=", "count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s bs=", "5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072", "count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s bs=", "35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s", "60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s", "12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s", "524288 0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8", "bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record", "524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048", "count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs=", "1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096", "512 count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s", "0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record", "1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s", "1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s", "0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count=", "6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s", "1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s", "bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608", "8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768", "0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s", "Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record", "262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s", "131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072", "65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s", "0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32 count=", "count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs=", "4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536", "0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record", "262144 0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024", "subprocess bs = 1 count = 1024 * 1024 while bs <= 1024", "1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s", "32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s", "8 count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s", "262144 0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128", "bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record", "1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4", "1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s", "count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576", "1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144", "88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s", "over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count=", "bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record", "262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048", "51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s", "1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s", "bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs=", "2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s", "262144 0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16", "bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs=", "131072 0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024", "1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s", "2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s", "818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s", "bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record", "count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs=", "0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record", "121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s", "128 count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s", "bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record", "0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128 count=", "kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count=", "5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072", "bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record", "1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s", "count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4", "count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s bs=", "count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs=", "917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s", "2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s", "bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs=", "0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s", "bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record", "1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs=", "1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8 count=", "over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs=", "count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s bs=", "1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s", "262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192", "143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record", "512 count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s", "kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count=", "================================================================ Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py", "7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s", "8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s", "2 count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s", "1024 * 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' %", "1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8 count=", "0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record", "1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s", "30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s", "594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s", "0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s", "1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count=", "189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s", "2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072", "arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2", "1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s", "3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072", "1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144", "262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384", "count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP", "2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144", "65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048", "4 count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s", "262144 0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32", "in (.*?) secs', message): seconds = float(m.group(1)) else: print('Unable to parse dd output:\\n%s'", "count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu server 21.04 arm64,", "bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs=", "2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s", "262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048", "9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536", "2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s", "4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024", "512 count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s", "1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s", "256 count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s", "0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record", "2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record", "count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576", "10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536", "4 count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s", "Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs=", "$ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s", "2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144", "131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768", "bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record", "= 1024 * 1024 while bs <= 1024 * 1024 * 8: args", "count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs=", "1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count=", "0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s", "count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s bs=", "count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s bs=", "24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s", "16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536", "'count=%d' % count] result = subprocess.run(args, capture_output=True) seconds = 0 message = str(result.stderr)", "0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s", "524288 0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024", "29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s", "0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s", "8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048", "262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096", "(.*?) secs', message): seconds = float(m.group(1)) else: print('Unable to parse dd output:\\n%s' %", "32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s", "0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s", "8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s", "0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count=", "count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s bs=", "0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s", "0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64 count=", "2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1", "bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record", "bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs=", "bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record", "bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record", "1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record", "32 count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s", "bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record", "262144 0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32", "bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record", "6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running", "bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304", "2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144", "35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s", "bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs=", "262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048", "3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s", "524288 0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8", "5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072", "count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s bs=", "count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576", "3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144", "131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s", "16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144", "1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048", "6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536", "bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record", "count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s bs=", "1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256 count=", "count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs=", "bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs=", "2 count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s", "count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count=", "4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s", "count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s bs=", "18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384", "1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s", "1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s", "8 count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s", "524288 0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256", "39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s", "bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry", "bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================", "20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s", "count = 1024 * 1024 while bs <= 1024 * 1024 * 8:", "0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128 count=", "1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s", "262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048", "0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s", "1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4 count=", "496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record", "3 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576", "count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs=", "3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072", "1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s", "bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304", "256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576", "77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s", "15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s", "1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288", "9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536", "32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128", "5.10 $ python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count=", "5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s", "running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s", "count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576", "0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s", "87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096", "1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288", "357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s", "256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian 11 arm64, kernel 5.10", "bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record", "bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record", "1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count=", "65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s", "4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072", "1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s bs=", "0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16 count=", "count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s bs=", "count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count=", "1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13%", "1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144", "bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304", "524288 0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64", "9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536", "./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record", "count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs=", "128 count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s", "count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count=", "4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s", "2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144", "debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record", "count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576", "512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048", "5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072", "16 count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s", "16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s", "4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024", "count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s bs=", "1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count=", "669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s", "2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500", "16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288", "32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288", "1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count=", "capture_output=True) seconds = 0 message = str(result.stderr) if m := re.search('copied, (.*?) s,", "262144 0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128", "bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s", "count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs=", "0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count=", "128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512", "re, subprocess bs = 1 count = 1024 * 1024 while bs <=", "freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288", "bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record", "0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count=", "300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s", "524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048", "bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record", "32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s", "count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s bs=", "Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1", "262144 0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16", "Pi 4 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576", "32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144", "count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian GNU/Linux 10", "0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record", "262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536", "0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count=", "1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144", "65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096", "1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s", "1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count=", "1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144", "0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128 count=", "256 count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s", "bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record", "9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536", "count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s bs=", "1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s bs=", "122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576", "2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4 count=", "7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536", "1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count=", "output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds,", "0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32 count=", "bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs=", "337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s", "1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576", "* count / 1e6 / seconds)) bs *= 2 if seconds > 1:", "1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s", "2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s", "37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192", "65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768", "count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0", "bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record", "4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072", "0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64 count=", "1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048", "65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536", "1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s", "= subprocess.run(args, capture_output=True) seconds = 0 message = str(result.stderr) if m := re.search('copied,", "0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count=", "count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s bs=", "7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s", "bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking", "14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s", "733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record", "32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192", "1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record", "524288 0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8", "963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record", "count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs=", "count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs=", "2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512", "0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count=", "512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300,", "bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record", "bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record", "Raspbian GNU/Linux 10 armv7, kernel 5.10 $ python3 bench_dd.py bs= 1 count=1048576 1.067s", "3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072", "16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288", "1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s bs=", "16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s", "2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144", "bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608", "count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi", "./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s", "count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count=", "<reponame>Watch-Later/recipes #!/usr/bin/python3 import re, subprocess bs = 1 count = 1024 * 1024", "2 count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s", "1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s bs=", "3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072", "58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s", "result = subprocess.run(args, capture_output=True) seconds = 0 message = str(result.stderr) if m :=", "219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s", "262144 0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s bs= 64", "1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record", "262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s", "1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8 count=", "32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s", "3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144", "0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record", "0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record", "bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record", "count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count=", "count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs=", "475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s", "65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384", "26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s", "256 count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s", "count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576", "4 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s", "1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count=", "count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs=", "bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian GNU/Linux", "2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192", "count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs=", "131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s", "1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count=", "running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ python3 bench_dd.py bs= 1 count=1048576", "bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s", "count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576", "2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s", "1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record", "2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072", "bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608", "11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs=", "count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s bs=", "0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count=", "count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s bs=", "count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s bs=", "2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s", "bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record", "3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s", "to parse dd output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' %", "90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s", "32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144", "5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288", "count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s bs=", "0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count=", "1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s", "262144 0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512", "1024 * 1024 * 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs,", "bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608 count= 4096 1.588s 387.625us/record 21641.067MB/s \"\"\"", "131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072", "131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384", "2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192", "bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record", "bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record", "1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s", "Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs=", "262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192", "seconds * 1e6 / count, bs * count / 1e6 / seconds)) bs", "bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304", "bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record", "524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096", "154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record", "bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record", "count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs=", "131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s", "0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256 count=", "0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s", "4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s", "0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count=", "count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs=", "64 count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s", "count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s bs=", "524288 0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256", "count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs=", "524288 1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8", "234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s", "bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record", "524288 1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256", "16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s", "count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs=", "count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4", "524288 0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128", "262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192", "bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs=", "16 count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s", "bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record", "count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs=", "2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4", "8192 count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s", "16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count=", "1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s", "1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian", "bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian 11 arm64,", "bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record", "3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144", "1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288", "0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64 count=", "1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s", "Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1", "bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs=", "0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count=", "65536 0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256", "0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record", "count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4", "0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record", "Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record", "count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s bs=", "524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152", "1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s", "32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs=", "1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count=", "bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s", "16 count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s", "1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s", "seconds = float(m.group(1)) elif m := re.search('bytes transferred in (.*?) secs', message): seconds", "2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144", "count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s bs=", "284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s", "bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs=", "bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record", "2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770", "0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count=", "120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s", "1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s", "262144 0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256", "34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192", "count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs=", "2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144", "131072 0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512", "2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144", "bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record", "8 count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s", "11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs=", "24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s", "524288 0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024", "2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s", "4 count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s", "1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count=", "count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count=", "74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s", "bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record", "524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152", "0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512 count=", "65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s", "running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s", "3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144", "count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576", "1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu server", "bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record", "count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count=", "1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288", "bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs=", "count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072", "str(result.stderr) if m := re.search('copied, (.*?) s, ', message): seconds = float(m.group(1)) elif", "1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record", "131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048", "23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record", "128 count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s", "22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384", "30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s", "args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count] result =", "16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288", "4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s", "bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304", "2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s", "count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count=", "count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4", "64 count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s", "8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s", "count, bs * count / 1e6 / seconds)) bs *= 2 if seconds", "0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s", "count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count=", "1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count=", "4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024", "* 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count]", "6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10", "0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s", "count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576", "bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record", "bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608", "10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs=", "104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s", "256 count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s", "16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s", "3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian", "4 count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s", "bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record", "bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record", "count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs=", "count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s bs=", "524288 0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64", "3 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s", "262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s", "Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s", "bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record", "524288 0.762s 1.454us/record 5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32", "count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576", "Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs=", "2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s", "<= 1024 * 1024 * 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' %", "256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1", "count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s bs=", "break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds * 1e6", "262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144", "1.764us/record 145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288", "1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256", "bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record", "11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768", "armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2", "37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s", "bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record", "count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s bs=", "512 count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s", "256 count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s", "2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144", "0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count=", "bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0", "1e6 / count, bs * count / 1e6 / seconds)) bs *= 2", "count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count=", "241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s", "3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s", "0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s", "2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s", "2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s", "512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000", "= ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count] result = subprocess.run(args,", "bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s", "bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608", "count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s bs=", "2 count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s", "1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s", "7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536", "376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record", "4 count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s", "3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s", "================================================================ Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py", "16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64", "bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record", "bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608", "bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record", "22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s", "count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi", "1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s", "64 count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s", "131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096", "128 count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s", "5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288", "count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs=", "1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s", "count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs=", "0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512 count=", "count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576", "1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s", "262144 2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16", "9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py", "5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11 $", "0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8 count=", "1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count=", "524288 1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8", "bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record", "bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record", "count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576", "bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s", "2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running", "bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs=", "bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record", "count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count=", "* 1e6 / count, bs * count / 1e6 / seconds)) bs *=", "93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s", "$ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s", "131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s", "21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s", "8 count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s", "print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds * 1e6 /", "2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s", "131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536", "1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record", "count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs=", "count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs=", "3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072", "count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s bs=", "366.773MB/s bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s", "count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576", "$ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s", "2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512", "524288 0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512", "bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record", "32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s", "count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs=", "count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s", "512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048", "256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024", "1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s", "1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s", "4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s", "bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record", "2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144", "count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs=", "1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s", "5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536", "27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384", "running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s", "2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512", "1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s", "128 count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s", "bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s", "0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record", "4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024", "65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144", "262144 0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128", "52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s", "* 1024 * 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d'", "count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s bs=", "32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s", "113.374us/record 1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096", "$ python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288", "799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s", "0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count=", "2 count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s", "count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s bs=", "0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s", "bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry", "0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record", "0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s", "7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py", "Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record", "bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record", "0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count=", "131072 0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256", "bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record", "2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s", "0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s", "0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s", "python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s", "45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record", "43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192", "0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8 count=", "65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144", "16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s", "262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s", "bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s bs=", "512 count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s", "8 count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s", "1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256", "count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs=", "bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record", "1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count=", "1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s", "1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record", "bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record", "%9.3fMB/s' % (bs, count, seconds, seconds * 1e6 / count, bs * count", "529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s", "8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s", "2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s", "0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288", "count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s bs=", "count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s bs=", "1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s", "count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs=", "65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s", "22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s", "count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs=", "2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s", "count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count=", "6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s", "524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152", "33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384", "0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s", "0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record", "bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record", "14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768", "1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s", "256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024", "0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128 count=", "0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128 count=", "73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096", "0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512 count=", "count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4", "2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s", "count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s bs=", "bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record", "kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count=", "2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s", "s, ', message): seconds = float(m.group(1)) elif m := re.search('bytes transferred in (.*?)", "5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $", "3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072", "bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s", "bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608", "5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288", "count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs=", "697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record", "bs *= 2 if seconds > 1: count /= 2 result = \"\"\"", "bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s bs= 8 count= 262144 0.520s 1.982us/record", "sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s", "63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s", "bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record", "1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s bs=", "bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record", "bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record", "count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576", "32 count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s", "131072 0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32", "6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536", "2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16 count=", "bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record", "0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record", "bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s", "524288 0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32", "0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32 count=", "count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count=", "1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s", "971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s", "3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s", "4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024", "1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4", "262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s bs= 16384", "count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs=", "0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count=", "1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record", "bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record", "2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian 11 arm64, kernel 5.10 $", "Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs=", "1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian 11 arm64, kernel 5.10 $", "transferred in (.*?) secs', message): seconds = float(m.group(1)) else: print('Unable to parse dd", "34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record", "16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count=", "count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs=", "524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152", "921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s", "3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144", "1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16 count=", "178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s", "4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s", "7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s", "count /= 2 result = \"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd%", "bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record", "32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s", "bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs=", "0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288", "2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu", "131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s", "4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record", "16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096", "1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count=", "server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s", "262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384", "950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s", "8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32", "GNU/Linux 10 armv7, kernel 5.10 $ python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record", "3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running", "6.116us/record 669.720MB/s bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536", "bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144 1.496s 5.705us/record", "while bs <= 1024 * 1024 * 8: args = ['dd', 'if=/dev/zero', 'of=/dev/null',", "bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record", "32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count=", "1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288", "1.437us/record 1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288", "262144 0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64", "1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running", "./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s", "8 count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s", "131072 count= 262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s", "1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s", "count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs=", "0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16 count=", "465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s", "Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s", "262144 0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256", "3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s", "1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s", "32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192", "bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record", "0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32 count=", "1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count=", "0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record", "bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record", "4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384", "1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $", "5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536", "message): seconds = float(m.group(1)) elif m := re.search('bytes transferred in (.*?) secs', message):", "bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry", "count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs=", "4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024", "bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s", "1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288", "'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count] result = subprocess.run(args, capture_output=True) seconds =", "23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608 count= 4096 1.588s 387.625us/record 21641.067MB/s", "count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count=", "262144 0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256", "173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s", "bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record", "3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s", "5.503MB/s bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s", "0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record", "1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768", "0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s", "./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record", "8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s", "512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3", "2 count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8", "64 count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256", "5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072", "count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576", "2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144", "count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s bs=", "1501.348MB/s bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s", "seconds, seconds * 1e6 / count, bs * count / 1e6 / seconds))", "0.540s 1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16 count=", "15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s", "1.185s 144.619us/record 1812.651MB/s bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048", "16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s", "6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11 $", "16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s", "0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s bs= 1024 count=", "1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record", "65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s", "1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count=", "1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count=", "9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536", "2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144", "174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s", "3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144", "1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count=", "7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s", "bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record", "3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s", "(.*?) s, ', message): seconds = float(m.group(1)) elif m := re.search('bytes transferred in", "bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian GNU/Linux", "1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s", "count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4", "16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s", "count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576", "16 count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s", "1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s", "3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s", "262144 0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024", "1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count= 262144", "131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072", "1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ python3", "0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count=", "1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s", "bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s", "5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536", "running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs=", "1.392MB/s bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s", "count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s bs=", "count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count=", "\"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576", "4 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ python3 bench_dd.py bs= 1", "count= 131072 1.038s 7.916us/record 1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs=", "29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s", "19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s", "bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record", "262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s", "318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record", "./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record", "1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s", "2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs= 2048 count= 262144", "16 count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s", "0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32 count=", "21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs=", "0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count=", "64 count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s", "2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s", "454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s 2396.406us/record", "0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count=", "16 count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s", "1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s", "0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count=", "bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s", "count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s bs=", "4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s", "645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s", "2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s", "= 1 count = 1024 * 1024 while bs <= 1024 * 1024", "2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8", "count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s bs=", "32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s", "9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536", "bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record", "4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s", "2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s", "count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s bs=", "./bench_dd.py bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record", "bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record", "count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs=", "131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s", "566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record", "11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s", "8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s", "1.030us/record 3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288", "bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record", "bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8 count= 131072 1.251s 9.546us/record", "bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record", "953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian 11", "2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144", "1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count=", "0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count=", "512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel", "5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072", "3 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576", "7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s", "bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record", "512 count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s", "2 result = \"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py", "5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144", "0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256 count=", "2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running", "524288 5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8", "2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144", "arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s", "bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record", "import re, subprocess bs = 1 count = 1024 * 1024 while bs", "0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512 count=", "0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count=", "524288 1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144", "1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count=", "bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record", "count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s bs=", "131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s", "0.908MB/s bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s", "0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s", "4 count= 524288 0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s", "bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu server 21.04", "count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs=", "262144 0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64", "count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs=", "(bs, count, seconds, seconds * 1e6 / count, bs * count / 1e6", "1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s", "1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s", "0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512 count=", "262144 0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16", "684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s", "Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record", "65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536", "524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152", "count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count=", "23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record", "20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384", "65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s", "bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record", "1034.902MB/s bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s", "2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s", "0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record", "44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s", "4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536", "3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144", "545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s", "0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s", "else: print('Unable to parse dd output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record", "524288 0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s bs= 512", "1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s", "Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1", "1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count=", "count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs=", "count= 2048 2.041s 996.766us/record 2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count=", "bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs=", "262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s", ":= re.search('bytes transferred in (.*?) secs', message): seconds = float(m.group(1)) else: print('Unable to", "/= 2 result = \"\"\" Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9", "elif m := re.search('bytes transferred in (.*?) secs', message): seconds = float(m.group(1)) else:", "1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count=", "131072 1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s bs= 32", "bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs=", "1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072 count=", "float(m.group(1)) else: print('Unable to parse dd output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs", "1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count=", "count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count=", "524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384", "10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4 count=", "bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record", "count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s bs=", "bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record", "1 count=1048576 10.017s 9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s bs=", "bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record", "count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count=", "21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs=", "2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s bs= 32 count= 262144", "0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s bs= 128 count=", "65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072", "bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608", "bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs=", "count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576", "2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2", "1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count=", "count=1048576 0.573s 0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576", "1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256 count= 524288", "3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072", "1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3", "bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s", "512 count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s", "4096 count= 262144 0.671s 2.559us/record 1600.774MB/s bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s", "335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s", "2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144", "4094.844us/record 1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400", "128 count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s", "1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288 count=", "1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s", "count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s bs=", "1.415s 345.540us/record 1517.302MB/s bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s", "262144 0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128", "count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s bs=", "174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s", "262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768", "bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384 0.987s 60.240us/record", "2 count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s", "count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs=", "1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s", "512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7,", "982.276MB/s bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s", "count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs= 1024 count=1048576", "1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count=", "262144 0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32", "1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096", "128 count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s", "bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144 0.671s 2.559us/record", "count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs=", "1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count=", "1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144", "count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s bs=", "bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s 1.030us/record", "bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry", "count, seconds, seconds * 1e6 / count, bs * count / 1e6 /", "4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s", "1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144", "262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192", "bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record", "0.547us/record 3.658MB/s bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record", "512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4", "32 count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s", "message): seconds = float(m.group(1)) else: print('Unable to parse dd output:\\n%s' % message) break", "1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s bs= 4 count= 524288", "Raspberry Pi 4 running FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s", "count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s bs=", "bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record", "1 count=1048576 0.558s 0.532us/record 1.880MB/s bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s bs= 4", "bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s", "0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128 count=", "20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s bs= 65536 count= 16384", "65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s", "count] result = subprocess.run(args, capture_output=True) seconds = 0 message = str(result.stderr) if m", "2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048", "4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s", "65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072", "1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel", "bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record", "count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s bs=", "./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record", "GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s", "arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2", "262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s", "256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7,", "bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record", "5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s bs= 8 count=", "8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count=", "0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s", "1156.103MB/s bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s", "2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144", "1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian GNU/Linux", "4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384", "bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$", "32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s", "0.457MB/s bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s bs= 4 count= 262144 0.573s", "4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16", "bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record", "131072 0.751s 5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192", "m := re.search('copied, (.*?) s, ', message): seconds = float(m.group(1)) elif m :=", "524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096", "1 count=1048576 5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s bs=", "bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s", "2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072", "2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144", "0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count=", "2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s", "6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11", "count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs=", "512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi", "bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304", "bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304", "bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu server 21.04", "3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s", "kernel 5.10 $ python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2", "262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs= 32768", "20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384", "1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8 count=", "count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s bs=", "2 count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s", "524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152", "2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192", "0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s", "count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count=", "count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs=", "16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s bs= 64", "4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072", "bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record", "count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs=", "count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs=", "1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count=", "count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576", "2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s bs= 262144 count=", "1024.289MB/s bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0", "count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count=", "count= 1024 6.307s 6159.189us/record 1361.966MB/s Raspberry Pi 4 running Ubuntu server 21.04 arm64,", "102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s", "bs, 'count=%d' % count] result = subprocess.run(args, capture_output=True) seconds = 0 message =", "$ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s", "bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s bs=", "491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs= 8192 count= 131072 1.039s", "./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record", "262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s bs= 16384", "524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152", "266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record", "2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s", "count=1048576 0.565s 0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576", "344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s", "1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s", "1.428s 697.305us/record 1503.756MB/s bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s bs=4194304 count= 512 1.442s", "0.538us/record 475.482MB/s bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record", "count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi", "count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s bs=", "2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s", "0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs= 16 count=1048576 0.550s 0.524us/record", "./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record", "2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144", "670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304 count= 512 1.650s 3223.105us/record", "3.814MB/s bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s", "% bs, 'count=%d' % count] result = subprocess.run(args, capture_output=True) seconds = 0 message", "*= 2 if seconds > 1: count /= 2 result = \"\"\" Raspberry", "16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs=", "Raspberry Pi 4 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1", "64 count= 262144 0.767s 2.926us/record 21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s", "0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s 0.538us/record", "262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096", "32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288", "parse dd output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs,", "count=1048576 0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576", "524288 0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32", "#!/usr/bin/python3 import re, subprocess bs = 1 count = 1024 * 1024 while", "bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s bs= 16 count= 262144 0.543s 2.070us/record", "131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count=", "262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s", "> 1: count /= 2 result = \"\"\" Raspberry Pi 4 running FreeBSD", "4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s", "262144 0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32", "FreeBSD 13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2", "0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count=", "armv7, kernel 5.10 $ python3 bench_dd.py bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs=", "bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record", "64 count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s", "bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry", "524288 0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64", "262144 0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512", "bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record", "bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s bs=", "1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288 count=", "Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1", "bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record", "bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4 count= 262144 0.824s 3.144us/record", "1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count= 2048 2.041s", "1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs= 2048 count= 262144", "2 count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s", "3718.413MB/s bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record", "count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152 count=", "8192 count= 131072 1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s", "65536 0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s bs= 1024", "2103.957MB/s bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s", "22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s", "8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count=", "count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s bs=", "10.567us/record 3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768", "3100.959MB/s bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s", "bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record", "count= 65536 1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs=", "1.463us/record 21.878MB/s bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288", "9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536", "32 count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s", "count= 131072 1.001s 7.638us/record 4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs=", "1.504s 11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count=", "bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record", "0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count=", "4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s", "32 count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s", "count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576", "count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs=", "1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s", "2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s", "count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count=", "1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $", "kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count=", "count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count=", "count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs=", "3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running", "bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record", "262144 0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768", "0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32 count=", "bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record", "3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4 count=", "5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s bs= 32768 count= 131072", "46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s", "count=1048576 0.550s 0.524us/record 30.509MB/s bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s bs= 64 count=1048576", "0.813s 3.101us/record 10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128 count=", "https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576", "262144 0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512", "count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s bs=", "count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304 count=", "1.511s 23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count=", "subprocess.run(args, capture_output=True) seconds = 0 message = str(result.stderr) if m := re.search('copied, (.*?)", "bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s", "2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running", "262144 0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s bs= 512", "count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count=", "29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record", "bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record", "2.070us/record 7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144", "bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record", "dd output:\\n%s' % message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count,", "1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s", "bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s bs=4194304", "65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072", "bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record", "', message): seconds = float(m.group(1)) elif m := re.search('bytes transferred in (.*?) secs',", "count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576", "message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds *", "bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s bs= 32 count= 131072 0.674s 5.143us/record", "1361.966MB/s Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py", "bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record", "1024 count=1048576 0.591s 0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096", "0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record", "4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian", "bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s 2.640us/record", "0.692s 2.640us/record 24.246MB/s bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s bs= 256 count=", "count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s bs=", "0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count=", "0.553s 0.527us/record 121.398MB/s bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s bs= 256 count=1048576 0.565s", "HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs=", "count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count=", "2 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576", "4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s", "0.568s 0.542us/record 236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s", "90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608 count= 4096 1.588s 387.625us/record", "bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian 11 arm64,", "bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record", "4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s bs=1048576 count= 4096 2.321s 566.655us/record", "0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count=", "count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s bs=", "2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4 count=", "bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count= 262144 1.106s 4.219us/record", "4 count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s", "262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096", "server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s", "bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count= 131072 1.001s 7.638us/record", "524288 1.069s 2.039us/record 0.981MB/s bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8", "153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s", "16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count=", "%6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds * 1e6 / count, bs", "524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s bs= 131072", "159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s", "Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1", "32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s", "0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s bs= 32768 count=", "64 count= 524288 0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s", "65536 0.631s 9.623us/record 1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64", "5.10 $ ./bench_dd.py bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288", "262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048", "447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s", "1.663MB/s bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s", "1.464s 1.396us/record 0.716MB/s bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s bs= 4 count=", "1.039s 7.929us/record 1033.159MB/s bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count=", "4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024", "13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768", "0.780s 1.488us/record 344.122MB/s bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count=", "bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs= 524288 count= 4096 1.415s 345.540us/record", "262144 0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192", "32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144", "0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512 count=", "bs * count / 1e6 / seconds)) bs *= 2 if seconds >", "count= 256 2.306s 9005.898us/record 931.457MB/s Raspberry Pi 3 running Debian 11 arm64, kernel", "count= 1024 2.441s 2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi", "% message) break print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' % (bs, count, seconds, seconds", "1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s", "running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 1.464s 1.396us/record", "bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s bs=4194304", "0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16 count=", "0.520s 1.982us/record 4.036MB/s bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32 count=", "1.408us/record 45.465MB/s bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s bs= 256 count= 524288", "================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s", "0.572s 2.184us/record 234.471MB/s bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count=", "bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs=", "count= 131072 0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s bs=", "4 count=1048576 0.565s 0.539us/record 7.418MB/s bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16", "131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s", "count= 65536 1.834s 27.978us/record 4684.865MB/s bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs=", "4289.905MB/s bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s bs= 131072 count= 65536 1.834s", "count=1048576 2.171s 2.070us/record 0.483MB/s bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s bs= 4", "9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536", "3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel", "32 count= 262144 0.582s 2.221us/record 14.405MB/s bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s", "count= 262144 1.106s 4.219us/record 970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs=", "1024 2.084s 2035.068us/record 1030.507MB/s bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s bs=8388608 count= 256", "count= 65536 0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs=", "count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count=", "0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s bs= 4096 count=", "core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record", "19391.838MB/s bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s bs= 65536 count= 524288 1.167s 2.226us/record", "0.574s 0.548us/record 58.435MB/s bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s", "count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s bs=", "bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s bs= 256 count= 262144 0.760s 2.899us/record", "= 0 message = str(result.stderr) if m := re.search('copied, (.*?) s, ', message):", "9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record", "bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record", "count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs=", "bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record", "0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s bs= 64 count=", "23.059us/record 1421.036MB/s bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s bs= 131072 count= 16384", "65536 count= 65536 1.116s 17.026us/record 3849.261MB/s bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s", "31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s", "count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count=", "count=1048576 1.191s 1.136us/record 14427.529MB/s bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536", "2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11", "% (bs, count, seconds, seconds * 1e6 / count, bs * count /", "bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s ================================================================ HP e8300, CPU i7-3770 freebsd13% ./bench_dd.py", "count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs=", "1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s", "84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s", "['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count] result = subprocess.run(args, capture_output=True)", "1.016us/record 15.741MB/s bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288", "1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s bs=4194304 count= 1024 2.454s", "524288 count= 16384 2.347s 143.225us/record 3660.587MB/s bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s bs=2097152", "0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256 count=", "8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s bs=", "count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s bs=", "bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288 0.737s 1.407us/record", "0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576 0.870s", "1024 while bs <= 1024 * 1024 * 8: args = ['dd', 'if=/dev/zero',", "8192 count= 262144 0.750s 2.861us/record 2863.609MB/s bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s", "bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs=", "2.951us/record 173.523MB/s bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144", "2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144", "bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s bs=", "4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s bs=2097152 count= 1024", "bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record", "0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s", "3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry Pi 2", "9.553us/record 0.105MB/s bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s bs= 4 count= 262144", "0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4 count= 524288 0.540s", "bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s 37.450us/record", "0.506MB/s bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s bs= 4 count= 262144 0.520s", "2048 count= 262144 0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s", "running Ubuntu server 21.04 arm64, kernel 5.11 $ ./bench_dd.py bs= 1 count=1048576 10.017s", "524288 count= 8192 1.264s 154.328us/record 3397.221MB/s bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s bs=2097152", "32757.097MB/s bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s", "count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian 11 arm64, kernel", "count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s bs=", "1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s", "count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s bs= 4", "0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s bs= 16 count=", "262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s", "seconds)) bs *= 2 if seconds > 1: count /= 2 result =", "1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s bs= 4 count=", ":= re.search('copied, (.*?) s, ', message): seconds = float(m.group(1)) elif m := re.search('bytes", "1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s", "count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s bs=", "16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s", "bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s bs= 131072 count= 16384 1.437s 87.693us/record", "6126.015MB/s bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s", "204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count= 65536 0.903s", "1171.421MB/s bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s Raspberry Pi 4 running Debian 11", "bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record", "bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s bs=4194304", "32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144", "0.785s 2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count=", "1.421us/record 90.060MB/s bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288", "0.897s 1.711us/record 37.394MB/s bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s bs= 256 count=", "16384 1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608 count= 4096", "count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s bs=", "131072 0.674s 5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128", "262144 1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768", "970.906MB/s bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s bs= 16384 count= 131072 0.917s", "65536 0.629s 9.605us/record 3.332MB/s bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128", "3.885MB/s bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s", "3.262us/record 2.453MB/s bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s bs= 32 count= 262144", "8 count= 131072 0.682s 5.202us/record 1.538MB/s bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s", "bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s bs= 8 count= 262144 0.855s 3.262us/record", "4096 1.303s 318.062us/record 1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024", "bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s bs= 524288 count= 16384 2.347s 143.225us/record", "freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2 count=1048576 0.573s 0.547us/record", "count / 1e6 / seconds)) bs *= 2 if seconds > 1: count", "1.365s 20.821us/record 3147.534MB/s bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s bs= 262144 count=", "bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record", "0.583s 0.556us/record 921.523MB/s bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s", "seconds > 1: count /= 2 result = \"\"\" Raspberry Pi 4 running", "1648.385MB/s bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s", "2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s bs= 512 count= 262144", "count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s bs= 8192 count=1048576", "262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536", "1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi 3", "bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s bs= 64 count= 524288 1.527s 2.913us/record", "0.655s 10.002us/record 204.760MB/s bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s bs= 8192 count=", "14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s", "count= 16384 1.437s 87.693us/record 1494.671MB/s bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s bs=", "bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s bs= 2048 count= 262144 0.984s 3.755us/record", "34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s", "0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64 count=", "1.018s 3.883us/record 2109.512MB/s bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s bs= 32768 count=", "1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s", "count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs=", "433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record", "1.282s 39.113us/record 1675.575MB/s bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s bs= 262144 count=", "262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s", "bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608", "count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s bs=", "262144 0.654s 2.494us/record 51.329MB/s bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s bs= 512", "0.996s 7.597us/record 34507.742MB/s bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536", "1024 1.839s 1796.094us/record 1167.618MB/s bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s bs=8388608 count= 256", "1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768 1.227s", "2.096s 8189.414us/record 1024.323MB/s Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2 arm_freq=1000 core_freq=500 sdram_freq=400 over_voltage=0 over_voltage_sdram_p=0 over_voltage_sdram_i=0 over_voltage_sdram_c=0 $", "0.656s 2.501us/record 818.834MB/s bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s bs= 8192 count=", "9.875us/record 1659.057MB/s bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s bs= 65536 count= 32768", "1.627s 6.208us/record 2639.224MB/s bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count=", "Pi 3 running Debian 11 arm64, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576", "count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s bs=", "count=1048576 0.575s 0.548us/record 14.595MB/s bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s bs= 32 count=1048576", "count=1048576 0.716s 0.683us/record 12000.920MB/s bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s bs= 32768 count=1048576", "1.854s 113.177us/record 1158.110MB/s bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s bs= 524288 count=", "Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ python3 bench_dd.py", "bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s bs= 1024 count= 262144 0.894s 3.411us/record", "0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8 count= 131072 0.682s", "524288 0.757s 1.444us/record 2.770MB/s bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s bs= 16", "64 count=1048576 0.573s 0.546us/record 117.174MB/s bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s bs= 256", "count= 262144 0.543s 2.071us/record 1.931MB/s bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s bs=", "11.471us/record 1428.238MB/s bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s bs= 65536 count= 32768", "bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count= 65536 1.116s 17.026us/record", "1.402us/record 2.852MB/s bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s bs= 16 count= 524288", "10 armv7, kernel 5.10 $ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs=", "bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s bs= 2048 count= 262144 1.093s 4.170us/record", "1e6 / seconds)) bs *= 2 if seconds > 1: count /= 2", "= str(result.stderr) if m := re.search('copied, (.*?) s, ', message): seconds = float(m.group(1))", "79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s", "count= 131072 1.938s 14.784us/record 35462.791MB/s bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s bs=2097152 count=", "bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record", "2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512", "1.091s 266.418us/record 1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s", "e8300, CPU i7-3770 freebsd13% ./bench_dd.py bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s bs= 2", "1.612s 6.148us/record 1332.376MB/s bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s bs= 32768 count=", "$ ./bench_dd.py bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s bs= 2 count= 524288 1.155s", "1 count = 1024 * 1024 while bs <= 1024 * 1024 *", "131072 1.456s 11.111us/record 2949.152MB/s bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s bs= 131072", "262144 1.496s 5.705us/record 22973.575MB/s bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s bs= 524288", "998.356MB/s bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s", "bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s", "0.564us/record 1815.495MB/s bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record", "16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s debian11$ ./bench_dd.py bs=", "262144 0.760s 2.899us/record 88.300MB/s bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s bs= 1024", "262144 0.544s 2.077us/record 30.817MB/s bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256", "4 count= 262144 0.573s 2.187us/record 1.829MB/s bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s", "0.635s 9.687us/record 52.854MB/s bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s bs= 2048 count=", "1024 count=1048576 0.608s 0.580us/record 1764.989MB/s bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s bs= 4096", "32768 count= 524288 1.004s 1.915us/record 17109.038MB/s bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s", "2.993us/record 684.160MB/s bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s bs= 8192 count= 262144", "bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record", "bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s bs=8388608", "0.524s 2.001us/record 7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64 count=", "bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s bs=4194304", "bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s bs= 524288 count= 8192 1.369s 167.107us/record", "262144 0.653s 2.492us/record 102.746MB/s bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s bs= 1024", "3517.599MB/s bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s", "5.373us/record 11.911MB/s bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s bs= 256 count= 131072", "2383.790us/record 1759.511MB/s bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s Raspberry Pi 4 running Raspbian", "bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s bs= 2048 count= 262144 0.656s 2.501us/record", "11.200us/record 23406.614MB/s bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s bs=1048576 count= 32768 1.495s", "bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s bs= 131072 count= 16384 1.264s 77.148us/record", "5.728us/record 357.517MB/s bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s bs= 8192 count= 131072", "256 count= 524288 0.752s 1.434us/record 178.504MB/s bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s", "32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs= 524288", "128 count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s", "32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s bs= 524288 count= 8192", "13-RELEASE: freebsd% python3.9 bench_dd.py bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s bs= 2 count=", "77.148us/record 1698.972MB/s bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s bs= 524288 count= 4096", "bs = 1 count = 1024 * 1024 while bs <= 1024 *", "5.409s 5.159us/record 0.194MB/s bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s bs= 4 count=", "bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count= 524288 1.096s 2.090us/record", "13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512 count= 65536 0.635s", "1.801s 219.850us/record 1192.377MB/s bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s bs=1048576 count= 2048", "2048 count= 262144 1.093s 4.170us/record 491.168MB/s bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s", "count= 262144 0.794s 3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s bs=", "16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s", "0.757s 5.776us/record 2836.329MB/s bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s bs= 65536 count=", "8192 1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048", "1.369s 167.107us/record 3137.440MB/s bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s bs=2097152 count= 2048 2.197s", "Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10 $ python3 bench_dd.py bs=", "bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s bs= 16 count= 262144 0.579s 2.210us/record", "12.712us/record 1288.837MB/s bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s bs= 65536 count= 32768", "count= 524288 1.527s 2.913us/record 21.972MB/s bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s bs=", "1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s bs= 64 count= 524288", "1.487s 90.750us/record 23109.237MB/s bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s bs=8388608 count= 4096 1.588s", "7.730MB/s bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s bs= 64 count= 262144 0.544s", "count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs=", "8: args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count] result", "0.831s 1.585us/record 645.859MB/s bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s bs= 4096 count=", "0.903s 13.782us/record 594.390MB/s bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s bs= 32768 count=", "'bs=%d' % bs, 'count=%d' % count] result = subprocess.run(args, capture_output=True) seconds = 0", "message = str(result.stderr) if m := re.search('copied, (.*?) s, ', message): seconds =", "3.117us/record 657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144", "10.321MB/s bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s", "/ count, bs * count / 1e6 / seconds)) bs *= 2 if", "0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s bs= 262144 count=", "over_voltage_sdram_c=0 $ ./bench_dd.py bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s bs= 2 count= 524288", "count= 1024 2.454s 2396.406us/record 1750.247MB/s bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s Raspberry Pi", "count= 65536 0.771s 11.765us/record 1392.607MB/s bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s bs=", "count= 65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs=", "1.167s 2.226us/record 29446.678MB/s bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s bs= 262144 count=", "65536 0.636s 9.700us/record 13.195MB/s bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s bs= 512", "5.143us/record 6.222MB/s bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s bs= 128 count= 131072", "21.874MB/s bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s bs= 256 count= 262144 0.794s", "250.829us/record 1045.111MB/s bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s", "2.036s 496.960us/record 1054.989MB/s bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s bs=2097152 count= 1024 2.084s", "'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count] result = subprocess.run(args, capture_output=True) seconds", "1967.912MB/s bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s", "657.138MB/s bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s", "6.992us/record 2343.125MB/s bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s bs= 65536 count= 65536", "count= 1024 3.431s 3350.625us/record 1251.798MB/s bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s ================================================================ Raspberry", "m := re.search('bytes transferred in (.*?) secs', message): seconds = float(m.group(1)) else: print('Unable", "65536 count= 16384 0.987s 60.240us/record 1087.909MB/s bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s", "bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s bs= 8192 count= 262144 0.750s 2.861us/record", "16 count= 524288 0.763s 1.456us/record 10.992MB/s bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s", "60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s bs=8388608 count= 8192 2.103s 256.698us/record", "count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s bs=", "524288 2.828s 5.393us/record 0.371MB/s bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s bs= 8", "7.997MB/s bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s bs= 64 count= 262144 0.692s", "262144 0.886s 3.378us/record 1212.454MB/s bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s bs= 16384", "3.236us/record 19.779MB/s bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s bs= 256 count= 262144", "0.690s 5.262us/record 48.655MB/s bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s bs= 1024 count=", "131072 count= 32768 1.052s 32.093us/record 4084.183MB/s bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s", "3.028us/record 84.557MB/s bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s bs= 1024 count= 262144", "count= 262144 0.795s 3.034us/record 337.543MB/s bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s bs=", "bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s bs= 128 count= 65536 0.636s 9.700us/record", "0.996s 3.799us/record 2156.141MB/s bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s bs= 32768 count=", "1.390us/record 1.439MB/s bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s bs= 8 count= 524288", "bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s bs= 256 count= 262144 0.557s 2.126us/record", "1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry Pi 3 running Raspbian", "8 count= 131072 1.251s 9.546us/record 0.838MB/s bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s", "1.025us/record 7.805MB/s bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s bs= 32 count= 524288", "1.933us/record 529.725MB/s bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s bs= 4096 count= 262144", "145.141MB/s bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s bs= 1024 count= 524288 1.013s", "seconds = float(m.group(1)) else: print('Unable to parse dd output:\\n%s' % message) break print('bs=%7d", "65536 1.189s 18.144us/record 3611.984MB/s bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s bs= 262144", "8192 3.553s 433.748us/record 2417.480MB/s bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s bs=4194304 count= 2048", "1.954s 29.814us/record 35170.740MB/s bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s bs=4194304 count= 16384 2.007s", "count= 512 1.650s 3223.105us/record 1301.324MB/s bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s ================================================================ Raspberry", "1.411us/record 5.670MB/s bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s bs= 32 count= 524288", "236.122MB/s bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s", "1024 count= 131072 0.707s 5.392us/record 189.911MB/s bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s" ]
[ "@author: yangqiang @contact: <EMAIL> @file: __init__.py.py @time: 2020/4/3 14:49 \"\"\" from .easyPlog import", "<reponame>whuhit/easyPlog<filename>easyPlog/__init__.py \"\"\" @author: yangqiang @contact: <EMAIL> @file: __init__.py.py @time: 2020/4/3 14:49 \"\"\" from", "yangqiang @contact: <EMAIL> @file: __init__.py.py @time: 2020/4/3 14:49 \"\"\" from .easyPlog import Plog", "\"\"\" @author: yangqiang @contact: <EMAIL> @file: __init__.py.py @time: 2020/4/3 14:49 \"\"\" from .easyPlog" ]
[ "Color, Colors from .grid import Location, Annotation, Annotations, Grid from .minimap import Minimap", "import Location, Annotation, Annotations, Grid from .minimap import Minimap from .astar import AStarSearch", "from .colors import Color, Colors from .grid import Location, Annotation, Annotations, Grid from", ".colors import Color, Colors from .grid import Location, Annotation, Annotations, Grid from .minimap", "Colors from .grid import Location, Annotation, Annotations, Grid from .minimap import Minimap from", "from .grid import Location, Annotation, Annotations, Grid from .minimap import Minimap from .astar", "import Color, Colors from .grid import Location, Annotation, Annotations, Grid from .minimap import", "<filename>models/__init__.py from .colors import Color, Colors from .grid import Location, Annotation, Annotations, Grid", ".grid import Location, Annotation, Annotations, Grid from .minimap import Minimap from .astar import" ]
[ "self.file = reader.efile(path, key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else: self.json =", "reader class dataBase: def __init__(self, path, key): self.file = reader.efile(path, key); if(len(self.file.data) >", "class dataBase: def __init__(self, path, key): self.file = reader.efile(path, key); if(len(self.file.data) > 0):", "if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode()", "path, key): self.file = reader.efile(path, key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else:", "0): self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode() def save(self):", "import imports.fileReader as reader class dataBase: def __init__(self, path, key): self.file = reader.efile(path,", "def __init__(self, path, key): self.file = reader.efile(path, key); if(len(self.file.data) > 0): self.json =", "dataBase: def __init__(self, path, key): self.file = reader.efile(path, key); if(len(self.file.data) > 0): self.json", "import json import imports.fileReader as reader class dataBase: def __init__(self, path, key): self.file", "= reader.efile(path, key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\")", "key): self.file = reader.efile(path, key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else: self.json", "= json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode() def save(self): self.file.data =", "self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode() def save(self): self.file.data", "> 0): self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode() def", "json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode() def save(self): self.file.data = json.dumps(self.json)", "json import imports.fileReader as reader class dataBase: def __init__(self, path, key): self.file =", "as reader class dataBase: def __init__(self, path, key): self.file = reader.efile(path, key); if(len(self.file.data)", "key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"] =", "else: self.json = json.loads(\"{}\") self.json[\"key\"] = key.decode() def save(self): self.file.data = json.dumps(self.json) self.file.save();", "imports.fileReader as reader class dataBase: def __init__(self, path, key): self.file = reader.efile(path, key);", "__init__(self, path, key): self.file = reader.efile(path, key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data)", "reader.efile(path, key); if(len(self.file.data) > 0): self.json = json.loads(self.file.data) else: self.json = json.loads(\"{}\") self.json[\"key\"]" ]
[ "defined. \"\"\" from flask import Blueprint from flask import current_app as app from", "Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response() @main_blueprint.route('/health') def health(): app.prom_init.up_gauge.set(1) return make_response()", "import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response() @main_blueprint.route('/health') def", "make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response() @main_blueprint.route('/health') def health():", "import Blueprint from flask import current_app as app from flask import make_response main_blueprint", "This is where all the general routes and controllers are defined. \"\"\" from", "the general routes and controllers are defined. \"\"\" from flask import Blueprint from", "= Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response() @main_blueprint.route('/health') def health(): app.prom_init.up_gauge.set(1) return", "import current_app as app from flask import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/')", "are defined. \"\"\" from flask import Blueprint from flask import current_app as app", "from flask import current_app as app from flask import make_response main_blueprint = Blueprint('main_blueprint',", "app from flask import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return", "flask import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response() @main_blueprint.route('/health')", "is where all the general routes and controllers are defined. \"\"\" from flask", "as app from flask import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index():", "and controllers are defined. \"\"\" from flask import Blueprint from flask import current_app", "general routes and controllers are defined. \"\"\" from flask import Blueprint from flask", "Blueprint from flask import current_app as app from flask import make_response main_blueprint =", "flask import current_app as app from flask import make_response main_blueprint = Blueprint('main_blueprint', __name__)", "\"\"\" This is where all the general routes and controllers are defined. \"\"\"", "flask import Blueprint from flask import current_app as app from flask import make_response", "current_app as app from flask import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def", "from flask import Blueprint from flask import current_app as app from flask import", "all the general routes and controllers are defined. \"\"\" from flask import Blueprint", "from flask import make_response main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response()", "main_blueprint = Blueprint('main_blueprint', __name__) @main_blueprint.route('/') def index(): return make_response() @main_blueprint.route('/health') def health(): app.prom_init.up_gauge.set(1)", "controllers are defined. \"\"\" from flask import Blueprint from flask import current_app as", "\"\"\" from flask import Blueprint from flask import current_app as app from flask", "where all the general routes and controllers are defined. \"\"\" from flask import", "routes and controllers are defined. \"\"\" from flask import Blueprint from flask import" ]
[ "from django.urls import path from . import views app_name = \"trade\" urlpatterns =", "import views app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/', views.see_trade,", ". import views app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/',", "views app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/', views.see_trade, name=\"see\"),", "django.urls import path from . import views app_name = \"trade\" urlpatterns = [", "= \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/', views.see_trade, name=\"see\"), path('change/<str:other_username>', views.change_trade,", "app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/', views.see_trade, name=\"see\"), path('change/<str:other_username>',", "\"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/', views.see_trade, name=\"see\"), path('change/<str:other_username>', views.change_trade, name=\"change\"),", "import path from . import views app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/',", "from . import views app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"),", "path from . import views app_name = \"trade\" urlpatterns = [ path('start/<str:receiver_username>/', views.start,", "urlpatterns = [ path('start/<str:receiver_username>/', views.start, name=\"start\"), path('<str:other_username>/', views.see_trade, name=\"see\"), path('change/<str:other_username>', views.change_trade, name=\"change\"), ]" ]
[ "+= 1 elif split == 'test': scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images',", "if teller != '': description += ['<teller>'] + nltk.word_tokenize(teller) if drawer != '':", "current turn: merge with next turn if hamming_distance < 1: prev_bow = bow", "idx))) image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description", "= np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128, 128)) # load spelling corrections", "128. / h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128, 128))", "+= 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords',", "len(bow) GT_BOW[image] = bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1)", "if drawer in chitchat: drawer = '' # replace with spelling suggestions returned", "description = [w for w in description if w not in string.punctuation] bow", "tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer,", "c_val += 1 elif split == 'test': scene = h5_test.create_group(str(c_test)) c_test += 1", "parse and read raw CoDraw data and save it in HDF5 format for", "split_coords = lambda x: [int(c) for c in x.split(',')] bow = np.array([split_coords(b) for", "description if w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id,", "images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords", "'bye', 'hello'] # start saving data into hdf5; loop over all scenes c_train", "splits[0] split_coords = lambda x: [int(c) for c in x.split(',')] bow = np.array([split_coords(b)", "= np.zeros((bow_dim)) description = [] for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)]", "dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else:", "scale_y, 1]) background_img = cv2.resize(background_img, (128, 128)) # load spelling corrections - obtained", "w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] #", "tqdm import tqdm import yaml with open('config.yml', 'r') as f: keys = yaml.load(f,", "GeNeVA-GAN \"\"\" from glob import glob import json import os import pickle import", "and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset =", "= [] coordinates = [] with open(scene_file, 'r') as f: scene = json.load(f)", "msg[:offset] after = msg[offset:] after = after.replace(tok, tok_replace, 1) return before + after", "= GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in this turn hamming_distance = np.sum(bow", "1 continue # queue image, instruction, objects bow, object coordinates for saving if", "to be removed chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello']", "[] objects = [] coordinates = [] with open(scene_file, 'r') as f: scene", "new object is added in image for current turn: merge with next turn", "scaling parameters h, w, _ = background_img.shape scale_x = 128. / w scale_y", "1: prev_bow = bow idx += 1 continue # queue image, instruction, objects", "hdf5 files for train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val =", "\"\"\" Script to parse and read raw CoDraw data and save it in", "chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] # start saving", "if len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128,", "i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in this", "'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] # start saving data into hdf5; loop", "test split = scene_file.split('/')[-1].split('_')[0] images = [] utterences = [] objects = []", "if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok", "= len(bow) GT_BOW[image] = bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3,", "for b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:, 0] scaling =", "open(spell_check, 'rb') as f: spell_check = pickle.load(f) # create hdf5 files for train,", "flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset,", "['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] # start saving data into", "= [w for w in description if w not in chitchat] description =", "is added in image for current turn: merge with next turn if hamming_distance", "- prev_bow) turn = scene['dialog'][i] # lowercase all messages teller = str.lower(turn['msg_t']) drawer", "description += ['<drawer>'] + nltk.word_tokenize(drawer) description = [w for w in description if", "# mark purely chitchat turns to be removed chitchat = ['hi', 'done', 'ok',", "as f: spell_check = pickle.load(f) # create hdf5 files for train, val, test", "c_train = 0 c_val = 0 c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))):", "in chitchat] description = [w for w in description if w not in", "in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:, 0] scaling = scaling_ratio *", "chitchat turns if teller in chitchat: teller = '' if drawer in chitchat:", "json import os import pickle import string import cv2 import h5py import nltk", "train / val / test split = scene_file.split('/')[-1].split('_')[0] images = [] utterences =", "'r') as f: for line in f: splits = line.split('\\t') image = splits[0]", "'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img)", "spelling suggestions returned by Bing Spell Check API if teller in spell_check and", "chitchat turns to be removed chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks',", "# replace with spelling suggestions returned by Bing Spell Check API if teller", "+= 1 prev_bow = bow # add current scene's data to hdf5 if", "(bag of words) dicts for each image bow_dim = 0 GT_BOW = {}", "start saving data into hdf5; loop over all scenes c_train = 0 c_val", "= [] objects = [] coordinates = [] with open(scene_file, 'r') as f:", "'train': scene = h5_train.create_group(str(c_train)) c_train += 1 elif split == 'val': scene =", "== '': continue # if no new object is added in image for", "import nltk import numpy as np from tqdm import tqdm import yaml with", "0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join('", "read raw CoDraw data and save it in HDF5 format for GeNeVA-GAN \"\"\"", "if scene belongs to train / val / test split = scene_file.split('/')[-1].split('_')[0] images", "image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ',", "'' # replace with spelling suggestions returned by Bing Spell Check API if", "turns to be removed chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye',", "description = [] for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new", "== 'test': scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str)", "in this turn hamming_distance = np.sum(bow - prev_bow) turn = scene['dialog'][i] # lowercase", "turn: merge with next turn if hamming_distance < 1: prev_bow = bow idx", "= 0 c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene", "Bing Spell Check API if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for", "pickle.load(f) # create hdf5 files for train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'),", "glob import glob import json import os import pickle import string import cv2", "idx += 1 continue # queue image, instruction, objects bow, object coordinates for", "words) dicts for each image bow_dim = 0 GT_BOW = {} GT_OBJECTS =", "= '' if drawer in chitchat: drawer = '' # replace with spelling", "if turn['abs_d'] == '': continue # if no new object is added in", "# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. \"\"\" Script", "merge with next turn if turn['abs_d'] == '': continue # if no new", "data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else: print(scene_id) if __name__ ==", "description = [w for w in description if w not in chitchat] description", "<filename>scripts/codraw_dataset_generation/codraw_raw_to_hdf5.py # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. \"\"\"", "objects bow, object coordinates for saving if len(description) > 0: image = cv2.imread(os.path.join(images_path,", "current turn: merge with next turn if turn['abs_d'] == '': continue # if", "Check API with open(spell_check, 'rb') as f: spell_check = pickle.load(f) # create hdf5", "len(images) > 0: if split == 'train': scene = h5_train.create_group(str(c_train)) c_train += 1", "+= 1 continue # queue image, instruction, objects bow, object coordinates for saving", "parameters h, w, _ = background_img.shape scale_x = 128. / w scale_y =", "import glob import json import os import pickle import string import cv2 import", "= bow idx += 1 continue # queue image, instruction, objects bow, object", "f: for line in f: splits = line.split('\\t') image = splits[0] split_coords =", "'r') as f: scene = json.load(f) scene_id = scene['image_id'] # loop over turns", "codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width, scaling parameters h, w, _ =", "scene belongs to train / val / test split = scene_file.split('/')[-1].split('_')[0] images =", "len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if", "h5_train.create_group(str(c_train)) c_train += 1 elif split == 'val': scene = h5_val.create_group(str(c_val)) c_val +=", "cv2 import h5py import nltk import numpy as np from tqdm import tqdm", "split = scene_file.split('/')[-1].split('_')[0] images = [] utterences = [] objects = [] coordinates", "b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:, 0] scaling = scaling_ratio", "# loop over turns in a single scene idx = 0 prev_bow =", "tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in spell_check", "= [] idx += 1 prev_bow = bow # add current scene's data", "len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset']", "import string import cv2 import h5py import nltk import numpy as np from", "yaml with open('config.yml', 'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok,", "val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test", "= cv2.resize(background_img, (128, 128)) # load spelling corrections - obtained via Bing Spell", "mark purely chitchat turns to be removed chitchat = ['hi', 'done', 'ok', 'alright',", "load spelling corrections - obtained via Bing Spell Check API with open(spell_check, 'rb')", "= flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting tokens: <teller>,", "with next turn if turn['abs_d'] == '': continue # if no new object", "= cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description))", "== 'train': scene = h5_train.create_group(str(c_train)) c_train += 1 elif split == 'val': scene", "bow_dim = 0 GT_BOW = {} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as", "for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) ==", "[int(c) for c in x.split(',')] bow = np.array([split_coords(b) for b in splits[1].split()]) bow_dim", "data into hdf5; loop over all scenes c_train = 0 c_val = 0", "tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to train / val / test split", "lowercase all messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat turns", "by Bing Spell Check API if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0:", "0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) # mark purely chitchat", "for train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'),", "= background_img.shape scale_x = 128. / w scale_y = 128. / h scaling_ratio", "images = [] utterences = [] objects = [] coordinates = [] with", "x: [int(c) for c in x.split(',')] bow = np.array([split_coords(b) for b in splits[1].split()])", "f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset]", "# set height, width, scaling parameters h, w, _ = background_img.shape scale_x =", "{} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as f: for line in f:", "= 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to train", "numpy as np from tqdm import tqdm import yaml with open('config.yml', 'r') as", "a single scene idx = 0 prev_bow = np.zeros((bow_dim)) description = [] for", "np from tqdm import tqdm import yaml with open('config.yml', 'r') as f: keys", "scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences),", "in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in this turn", "GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no image for current turn: merge with", "scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else: print(scene_id) if __name__", "flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1", "= {} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as f: for line in", "[] idx += 1 prev_bow = bow # add current scene's data to", "c_train += 1 elif split == 'val': scene = h5_val.create_group(str(c_val)) c_val += 1", "/ w scale_y = 128. / h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img", "with next turn if hamming_distance < 1: prev_bow = bow idx += 1", "bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no", "scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else: print(scene_id) if __name__ == '__main__': create_h5()", "description if w not in chitchat] description = [w for w in description", "images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description = [] idx += 1 prev_bow", "scene = h5_train.create_group(str(c_train)) c_train += 1 elif split == 'val': scene = h5_val.create_group(str(c_val))", "belongs to train / val / test split = scene_file.split('/')[-1].split('_')[0] images = []", "HDF5 format for GeNeVA-GAN \"\"\" from glob import glob import json import os", "GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as f: for line in f: splits", "into hdf5; loop over all scenes c_train = 0 c_val = 0 c_test", "objects and bow (bag of words) dicts for each image bow_dim = 0", "objects added in this turn hamming_distance = np.sum(bow - prev_bow) turn = scene['dialog'][i]", "replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for", "replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting tokens: <teller>, <drawer> if teller !=", "import tqdm import yaml with open('config.yml', 'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader)", "h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background',", "prev_bow) turn = scene['dialog'][i] # lowercase all messages teller = str.lower(turn['msg_t']) drawer =", "# new objects added in this turn hamming_distance = np.sum(bow - prev_bow) turn", "= GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no image", "'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and bow", "spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace =", "import h5py import nltk import numpy as np from tqdm import tqdm import", "image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description =", "h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and", "h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128, 128)) # load", "== 'val': scene = h5_val.create_group(str(c_val)) c_val += 1 elif split == 'test': scene", "if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok", "len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset']", "# queue image, instruction, objects bow, object coordinates for saving if len(description) >", "+= 1 elif split == 'val': scene = h5_val.create_group(str(c_val)) c_val += 1 elif", "keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset] after", "= keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords =", "_ = background_img.shape scale_x = 128. / w scale_y = 128. / h", "spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width, scaling parameters h,", "over all scenes c_train = 0 c_val = 0 c_test = 0 for", "= [] for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects", "from tqdm import tqdm import yaml with open('config.yml', 'r') as f: keys =", "for c in x.split(',')] bow = np.array([split_coords(b) for b in splits[1].split()]) bow_dim =", "Licensed under the MIT license. \"\"\" Script to parse and read raw CoDraw", "be removed chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] #", "= replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0:", "pickle import string import cv2 import h5py import nltk import numpy as np", "chitchat] description = [w for w in description if w not in string.punctuation]", "h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and bow (bag of", "= [] utterences = [] objects = [] coordinates = [] with open(scene_file,", "'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img)", "elif split == 'test': scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt", "splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:,", "if w not in chitchat] description = [w for w in description if", "after = after.replace(tok, tok_replace, 1) return before + after def create_h5(): # load", "is no image for current turn: merge with next turn if turn['abs_d'] ==", "prev_bow = bow # add current scene's data to hdf5 if len(images) >", "background_img = cv2.resize(background_img, (128, 128)) # load spelling corrections - obtained via Bing", "of words) dicts for each image bow_dim = 0 GT_BOW = {} GT_OBJECTS", "1) return before + after def create_h5(): # load required keys scenes_path =", "scene_file.split('/')[-1].split('_')[0] images = [] utterences = [] objects = [] coordinates = []", "+= ['<drawer>'] + nltk.word_tokenize(drawer) description = [w for w in description if w", "scaling_ratio = np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128, 128)) # load spelling", "1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting", "to parse and read raw CoDraw data and save it in HDF5 format", "'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] # start saving data into hdf5;", "data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id)", "# if there is no image for current turn: merge with next turn", "identify if scene belongs to train / val / test split = scene_file.split('/')[-1].split('_')[0]", "add delimiting tokens: <teller>, <drawer> if teller != '': description += ['<teller>'] +", "cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow)", "turn = scene['dialog'][i] # lowercase all messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d'])", "= h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else: print(scene_id)", "GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) # mark purely chitchat turns to be", "- obtained via Bing Spell Check API with open(spell_check, 'rb') as f: spell_check", "GT_BOW[image] = bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image]", "as f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before =", "= {} with open(codraw_extracted_coords, 'r') as f: for line in f: splits =", "0 c_val = 0 c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify", "test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test =", "image, instruction, objects bow, object coordinates for saving if len(description) > 0: image", "1]) background_img = cv2.resize(background_img, (128, 128)) # load spelling corrections - obtained via", "turn hamming_distance = np.sum(bow - prev_bow) turn = scene['dialog'][i] # lowercase all messages", "+ nltk.word_tokenize(teller) if drawer != '': description += ['<drawer>'] + nltk.word_tokenize(drawer) description =", "h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects',", "obtained via Bing Spell Check API with open(spell_check, 'rb') as f: spell_check =", "'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords)", "each image bow_dim = 0 GT_BOW = {} GT_OBJECTS = {} with open(codraw_extracted_coords,", "image = splits[0] split_coords = lambda x: [int(c) for c in x.split(',')] bow", "in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token']", "'thanks', 'bye', 'hello'] # start saving data into hdf5; loop over all scenes", "'hello'] # start saving data into hdf5; loop over all scenes c_train =", "drawer = str.lower(turn['msg_d']) # clear chitchat turns if teller in chitchat: teller =", "c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to", "spelling corrections - obtained via Bing Spell Check API with open(spell_check, 'rb') as", "h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and bow (bag of words) dicts", "height, width, scaling parameters h, w, _ = background_img.shape scale_x = 128. /", "format for GeNeVA-GAN \"\"\" from glob import glob import json import os import", "instruction, objects bow, object coordinates for saving if len(description) > 0: image =", "idx = 0 prev_bow = np.zeros((bow_dim)) description = [] for i in range(len(scene['dialog'])):", "idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no image for current", "= replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting tokens: <teller>, <drawer> if teller", "/ h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128, 128)) #", "= 0 c_val = 0 c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): #", "no image for current turn: merge with next turn if turn['abs_d'] == '':", "Copyright (c) Microsoft Corporation. # Licensed under the MIT license. \"\"\" Script to", "for w in description if w not in chitchat] description = [w for", "= (bow[:, 1:] * scaling).astype(int) # mark purely chitchat turns to be removed", "> 0: if split == 'train': scene = h5_train.create_group(str(c_train)) c_train += 1 elif", "in chitchat: teller = '' if drawer in chitchat: drawer = '' #", "if hamming_distance < 1: prev_bow = bow idx += 1 continue # queue", "not in chitchat] description = [w for w in description if w not", "suggestions returned by Bing Spell Check API if teller in spell_check and len(spell_check[teller]['flaggedTokens'])", "val / test split = scene_file.split('/')[-1].split('_')[0] images = [] utterences = [] objects", "tok_replace, 1) return before + after def create_h5(): # load required keys scenes_path", "keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check']", "license. \"\"\" Script to parse and read raw CoDraw data and save it", "= flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in spell_check and", "= scene['image_id'] # loop over turns in a single scene idx = 0", "next turn if turn['abs_d'] == '': continue # if no new object is", "in chitchat: drawer = '' # replace with spelling suggestions returned by Bing", "tok_replace) # add delimiting tokens: <teller>, <drawer> if teller != '': description +=", "w not in chitchat] description = [w for w in description if w", "assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace)", "= keys['codraw_extracted_coordinates'] # set height, width, scaling parameters h, w, _ = background_img.shape", "utterences = [] objects = [] coordinates = [] with open(scene_file, 'r') as", "scale_x = 128. / w scale_y = 128. / h scaling_ratio = np.array([scale_x,", "= flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer", "objects = [] coordinates = [] with open(scene_file, 'r') as f: scene =", "coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no image for current turn:", "raw CoDraw data and save it in HDF5 format for GeNeVA-GAN \"\"\" from", "purely chitchat turns to be removed chitchat = ['hi', 'done', 'ok', 'alright', 'okay',", "add current scene's data to hdf5 if len(images) > 0: if split ==", "if w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)]", "msg[offset:] after = after.replace(tok, tok_replace, 1) return before + after def create_h5(): #", "in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace", "tok, tok_offset, tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token", "scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) #", "\"\"\" from glob import glob import json import os import pickle import string", "0: for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions'])", "h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path,", "= keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check =", "1:] * scaling).astype(int) # mark purely chitchat turns to be removed chitchat =", "under the MIT license. \"\"\" Script to parse and read raw CoDraw data", "h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width,", "return before + after def create_h5(): # load required keys scenes_path = keys['codraw_scenes']", "tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']:", "!= '': description += ['<teller>'] + nltk.word_tokenize(teller) if drawer != '': description +=", "for line in f: splits = line.split('\\t') image = splits[0] split_coords = lambda", "API if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']:", "dicts for each image bow_dim = 0 GT_BOW = {} GT_OBJECTS = {}", "= after.replace(tok, tok_replace, 1) return before + after def create_h5(): # load required", "tokens: <teller>, <drawer> if teller != '': description += ['<teller>'] + nltk.word_tokenize(teller) if", "queue image, instruction, objects bow, object coordinates for saving if len(description) > 0:", "prev_bow = np.zeros((bow_dim)) description = [] for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id,", "import yaml with open('config.yml', 'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg,", "[] utterences = [] objects = [] coordinates = [] with open(scene_file, 'r')", "nltk import numpy as np from tqdm import tqdm import yaml with open('config.yml',", "1 elif split == 'val': scene = h5_val.create_group(str(c_val)) c_val += 1 elif split", "no new object is added in image for current turn: merge with next", "np.sum(bow - prev_bow) turn = scene['dialog'][i] # lowercase all messages teller = str.lower(turn['msg_t'])", "drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting tokens: <teller>, <drawer> if", "this turn hamming_distance = np.sum(bow - prev_bow) turn = scene['dialog'][i] # lowercase all", "f: scene = json.load(f) scene_id = scene['image_id'] # loop over turns in a", "drawer = '' # replace with spelling suggestions returned by Bing Spell Check", "!= '': description += ['<drawer>'] + nltk.word_tokenize(drawer) description = [w for w in", "data and save it in HDF5 format for GeNeVA-GAN \"\"\" from glob import", "open(scene_file, 'r') as f: scene = json.load(f) scene_id = scene['image_id'] # loop over", "split == 'val': scene = h5_val.create_group(str(c_val)) c_val += 1 elif split == 'test':", "in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to train / val / test", "Check API if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in", "(128, 128)) # load spelling corrections - obtained via Bing Spell Check API", "from glob import glob import json import os import pickle import string import", "tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion']", "# add delimiting tokens: <teller>, <drawer> if teller != '': description += ['<teller>']", "for current turn: merge with next turn if hamming_distance < 1: prev_bow =", "turns in a single scene idx = 0 prev_bow = np.zeros((bow_dim)) description =", "bow, object coordinates for saving if len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id,", "description += ['<teller>'] + nltk.word_tokenize(teller) if drawer != '': description += ['<drawer>'] +", "offset, tok_replace): before = msg[:offset] after = msg[offset:] after = after.replace(tok, tok_replace, 1)", "== 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) # add", "for current turn: merge with next turn if turn['abs_d'] == '': continue #", "= h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt)", "and bow (bag of words) dicts for each image bow_dim = 0 GT_BOW", "def replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset] after = msg[offset:] after =", "delimiting tokens: <teller>, <drawer> if teller != '': description += ['<teller>'] + nltk.word_tokenize(teller)", "import os import pickle import string import cv2 import h5py import nltk import", "GT_BOW = {} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as f: for line", "if teller in chitchat: teller = '' if drawer in chitchat: drawer =", "tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting tokens:", "with open(spell_check, 'rb') as f: spell_check = pickle.load(f) # create hdf5 files for", "# if no new object is added in image for current turn: merge", "in f: splits = line.split('\\t') image = splits[0] split_coords = lambda x: [int(c)", "coordinates = [] with open(scene_file, 'r') as f: scene = json.load(f) scene_id =", "split == 'train': scene = h5_train.create_group(str(c_train)) c_train += 1 elif split == 'val':", "for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in", "turn if turn['abs_d'] == '': continue # if no new object is added", "(128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description = [] idx +=", "def create_h5(): # load required keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img", "= str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat turns if teller in chitchat:", "and read raw CoDraw data and save it in HDF5 format for GeNeVA-GAN", "cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height,", "w in description if w not in chitchat] description = [w for w", "axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) # mark purely chitchat turns", "keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width, scaling parameters", "!= 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert", "1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates))", "<teller>, <drawer> if teller != '': description += ['<teller>'] + nltk.word_tokenize(teller) if drawer", "# identify if scene belongs to train / val / test split =", "1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) # mark purely chitchat turns to", "[] with open(scene_file, 'r') as f: scene = json.load(f) scene_id = scene['image_id'] #", "required keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path =", "teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok =", "[w for w in description if w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id,", "= pickle.load(f) # create hdf5 files for train, val, test h5_train = h5py.File(os.path.join(h5_path,", "'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and bow (bag", "+ nltk.word_tokenize(drawer) description = [w for w in description if w not in", "width, scaling parameters h, w, _ = background_img.shape scale_x = 128. / w", "'' if drawer in chitchat: drawer = '' # replace with spelling suggestions", "idx += 1 prev_bow = bow # add current scene's data to hdf5", "dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else: print(scene_id) if __name__ == '__main__':", "', description)) objects.append(bow) coordinates.append(coords) description = [] idx += 1 prev_bow = bow", "CoDraw data and save it in HDF5 format for GeNeVA-GAN \"\"\" from glob", "it in HDF5 format for GeNeVA-GAN \"\"\" from glob import glob import json", "line.split('\\t') image = splits[0] split_coords = lambda x: [int(c) for c in x.split(',')]", "bow (bag of words) dicts for each image bow_dim = 0 GT_BOW =", "in description if w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords =", "open(codraw_extracted_coords, 'r') as f: for line in f: splits = line.split('\\t') image =", "hdf5; loop over all scenes c_train = 0 c_val = 0 c_test =", "Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset] after = msg[offset:] after", "'test': scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences',", "os import pickle import string import cv2 import h5py import nltk import numpy", "in HDF5 format for GeNeVA-GAN \"\"\" from glob import glob import json import", "drawer in chitchat: drawer = '' # replace with spelling suggestions returned by", "tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller,", "object is added in image for current turn: merge with next turn if", "image for current turn: merge with next turn if hamming_distance < 1: prev_bow", "removed chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] # start", "1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in", "/ test split = scene_file.split('/')[-1].split('_')[0] images = [] utterences = [] objects =", "= ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello'] # start saving data", "in a single scene idx = 0 prev_bow = np.zeros((bow_dim)) description = []", "'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background',", "objects.append(bow) coordinates.append(coords) description = [] idx += 1 prev_bow = bow # add", "0 GT_BOW = {} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as f: for", "scene idx = 0 prev_bow = np.zeros((bow_dim)) description = [] for i in", "with spelling suggestions returned by Bing Spell Check API if teller in spell_check", "save it in HDF5 format for GeNeVA-GAN \"\"\" from glob import glob import", "c_val = 0 c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if", "0] scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:]", "hamming_distance < 1: prev_bow = bow idx += 1 continue # queue image,", "= line.split('\\t') image = splits[0] split_coords = lambda x: [int(c) for c in", "range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in this turn hamming_distance", "= lambda x: [int(c) for c in x.split(',')] bow = np.array([split_coords(b) for b", "load required keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path", "= flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok,", "(c) Microsoft Corporation. # Licensed under the MIT license. \"\"\" Script to parse", "for each image bow_dim = 0 GT_BOW = {} GT_OBJECTS = {} with", "via Bing Spell Check API with open(spell_check, 'rb') as f: spell_check = pickle.load(f)", "image for current turn: merge with next turn if turn['abs_d'] == '': continue", "turns if teller in chitchat: teller = '' if drawer in chitchat: drawer", "== 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer", "'': description += ['<teller>'] + nltk.word_tokenize(teller) if drawer != '': description += ['<drawer>']", "= splits[0] split_coords = lambda x: [int(c) for c in x.split(',')] bow =", "API with open(spell_check, 'rb') as f: spell_check = pickle.load(f) # create hdf5 files", "in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token']", "GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no image for", "data to hdf5 if len(images) > 0: if split == 'train': scene =", "[w for w in description if w not in chitchat] description = [w", "spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset", "flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1", "= 0 GT_BOW = {} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r') as f:", "in description if w not in chitchat] description = [w for w in", "scene's data to hdf5 if len(images) > 0: if split == 'train': scene", "added in this turn hamming_distance = np.sum(bow - prev_bow) turn = scene['dialog'][i] #", "turn if hamming_distance < 1: prev_bow = bow idx += 1 continue #", "drawer != '': description += ['<drawer>'] + nltk.word_tokenize(drawer) description = [w for w", "json.load(f) scene_id = scene['image_id'] # loop over turns in a single scene idx", "128. / w scale_y = 128. / h scaling_ratio = np.array([scale_x, scale_y, 1])", "corrections - obtained via Bing Spell Check API with open(spell_check, 'rb') as f:", "in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace", "object coordinates for saving if len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx)))", "scene['dialog'][i] # lowercase all messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear", "keys['codraw_extracted_coordinates'] # set height, width, scaling parameters h, w, _ = background_img.shape scale_x", "turn['abs_d'] == '': continue # if no new object is added in image", "= scene['dialog'][i] # lowercase all messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) #", "idx)] # new objects added in this turn hamming_distance = np.sum(bow - prev_bow)", "bow_dim = len(bow) GT_BOW[image] = bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:, 0],", "{} with open(codraw_extracted_coords, 'r') as f: for line in f: splits = line.split('\\t')", "0: if split == 'train': scene = h5_train.create_group(str(c_train)) c_train += 1 elif split", "str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat turns if teller in chitchat: teller", "continue # queue image, instruction, objects bow, object coordinates for saving if len(description)", "scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id',", "# set objects and bow (bag of words) dicts for each image bow_dim", "(bow[:, 1:] * scaling).astype(int) # mark purely chitchat turns to be removed chitchat", "* np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) # mark", "0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions'])", "elif split == 'val': scene = h5_val.create_group(str(c_val)) c_val += 1 elif split ==", "data=background_img) # set objects and bow (bag of words) dicts for each image", "create hdf5 files for train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val", "= scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int)", "tok_offset, tok_replace) # add delimiting tokens: <teller>, <drawer> if teller != '': description", "keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width, scaling parameters h, w, _", "utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description = [] idx += 1 prev_bow =", "if split == 'train': scene = h5_train.create_group(str(c_train)) c_train += 1 elif split ==", "len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128))", "f: splits = line.split('\\t') image = splits[0] split_coords = lambda x: [int(c) for", "tok_offset, tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in", "h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background',", "nltk.word_tokenize(teller) if drawer != '': description += ['<drawer>'] + nltk.word_tokenize(drawer) description = [w", "Corporation. # Licensed under the MIT license. \"\"\" Script to parse and read", "flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens'])", "yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset] after = msg[offset:]", "= [w for w in description if w not in string.punctuation] bow =", "scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] *", "open('config.yml', 'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace):", "Spell Check API with open(spell_check, 'rb') as f: spell_check = pickle.load(f) # create", "0 prev_bow = np.zeros((bow_dim)) description = [] for i in range(len(scene['dialog'])): bow =", "< 1: prev_bow = bow idx += 1 continue # queue image, instruction,", "with open(scene_file, 'r') as f: scene = json.load(f) scene_id = scene['image_id'] # loop", "<drawer> if teller != '': description += ['<teller>'] + nltk.word_tokenize(teller) if drawer !=", "= msg[:offset] after = msg[offset:] after = after.replace(tok, tok_replace, 1) return before +", "over turns in a single scene idx = 0 prev_bow = np.zeros((bow_dim)) description", "import cv2 import h5py import nltk import numpy as np from tqdm import", "= json.load(f) scene_id = scene['image_id'] # loop over turns in a single scene", "all messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat turns if", "coordinates.append(coords) description = [] idx += 1 prev_bow = bow # add current", "= str.lower(turn['msg_d']) # clear chitchat turns if teller in chitchat: teller = ''", "string import cv2 import h5py import nltk import numpy as np from tqdm", "bow # add current scene's data to hdf5 if len(images) > 0: if", "if no new object is added in image for current turn: merge with", "scene = h5_val.create_group(str(c_val)) c_val += 1 elif split == 'test': scene = h5_test.create_group(str(c_test))", "np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128, 128)) # load spelling corrections -", "'alright', 'okay', 'thanks', 'bye', 'hello'] # start saving data into hdf5; loop over", "= 128. / w scale_y = 128. / h scaling_ratio = np.array([scale_x, scale_y,", "+ after def create_h5(): # load required keys scenes_path = keys['codraw_scenes'] images_path =", "hamming_distance = np.sum(bow - prev_bow) turn = scene['dialog'][i] # lowercase all messages teller", "assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset, tok_replace)", "before + after def create_h5(): # load required keys scenes_path = keys['codraw_scenes'] images_path", "= h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'),", "'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before", "GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in this turn hamming_distance = np.sum(bow -", "after = msg[offset:] after = after.replace(tok, tok_replace, 1) return before + after def", "for saving if len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image =", "= bow # add current scene's data to hdf5 if len(images) > 0:", "w in description if w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords", "as f: for line in f: splits = line.split('\\t') image = splits[0] split_coords", "= '' # replace with spelling suggestions returned by Bing Spell Check API", "c_test += 1 scene.create_dataset('images', data=images) dt = h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects))", "spell_check = pickle.load(f) # create hdf5 files for train, val, test h5_train =", "# add current scene's data to hdf5 if len(images) > 0: if split", "h5_test.create_dataset('background', data=background_img) # set objects and bow (bag of words) dicts for each", "scale_y = 128. / h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img,", "glob import json import os import pickle import string import cv2 import h5py", "= cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set", "not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if", "flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok, tok_offset,", "= keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width, scaling parameters h, w,", "for w in description if w not in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)]", "[] for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added", "the MIT license. \"\"\" Script to parse and read raw CoDraw data and", "= np.array([split_coords(b) for b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:, 0]", "h, w, _ = background_img.shape scale_x = 128. / w scale_y = 128.", "before = msg[:offset] after = msg[offset:] after = after.replace(tok, tok_replace, 1) return before", "Spell Check API if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token", "if drawer != '': description += ['<drawer>'] + nltk.word_tokenize(drawer) description = [w for", "set height, width, scaling parameters h, w, _ = background_img.shape scale_x = 128.", "tok, tok_offset, tok_replace) # add delimiting tokens: <teller>, <drawer> if teller != '':", "to hdf5 if len(images) > 0: if split == 'train': scene = h5_train.create_group(str(c_train))", "= yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset] after =", "= keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] # set height, width, scaling", "background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates'] #", "np.zeros((bow_dim)) description = [] for i in range(len(scene['dialog'])): bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] #", "lambda x: [int(c) for c in x.split(',')] bow = np.array([split_coords(b) for b in", "[] coordinates = [] with open(scene_file, 'r') as f: scene = json.load(f) scene_id", "description)) objects.append(bow) coordinates.append(coords) description = [] idx += 1 prev_bow = bow #", "'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) #", "teller = '' if drawer in chitchat: drawer = '' # replace with", "= scene_file.split('/')[-1].split('_')[0] images = [] utterences = [] objects = [] coordinates =", "if there is no image for current turn: merge with next turn if", "next turn if hamming_distance < 1: prev_bow = bow idx += 1 continue", "spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace =", "= bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] =", "len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) #", "in x.split(',')] bow = np.array([split_coords(b) for b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image]", "teller in chitchat: teller = '' if drawer in chitchat: drawer = ''", "flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] drawer =", "Script to parse and read raw CoDraw data and save it in HDF5", "import json import os import pickle import string import cv2 import h5py import", "'': description += ['<drawer>'] + nltk.word_tokenize(drawer) description = [w for w in description", "scene = json.load(f) scene_id = scene['image_id'] # loop over turns in a single", "as np from tqdm import tqdm import yaml with open('config.yml', 'r') as f:", "h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w')", "h5py import nltk import numpy as np from tqdm import tqdm import yaml", "description = [] idx += 1 prev_bow = bow # add current scene's", "in string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there", "str.lower(turn['msg_d']) # clear chitchat turns if teller in chitchat: teller = '' if", "tqdm import yaml with open('config.yml', 'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader) def", "for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) ==", "h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set", "0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to train /", "# Licensed under the MIT license. \"\"\" Script to parse and read raw", "to train / val / test split = scene_file.split('/')[-1].split('_')[0] images = [] utterences", "= h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects", "# create hdf5 files for train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w')", "in image for current turn: merge with next turn if hamming_distance < 1:", "saving data into hdf5; loop over all scenes c_train = 0 c_val =", "np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int) # mark purely", "single scene idx = 0 prev_bow = np.zeros((bow_dim)) description = [] for i", "bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] # new objects added in this turn hamming_distance =", "flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller =", "'': continue # if no new object is added in image for current", "= msg[offset:] after = after.replace(tok, tok_replace, 1) return before + after def create_h5():", "import numpy as np from tqdm import tqdm import yaml with open('config.yml', 'r')", "prev_bow = bow idx += 1 continue # queue image, instruction, objects bow,", "> 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image, (128, 128)) images.append(image)", "bow idx += 1 continue # queue image, instruction, objects bow, object coordinates", "scaling).astype(int) # mark purely chitchat turns to be removed chitchat = ['hi', 'done',", "create_h5(): # load required keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img =", "all scenes c_train = 0 c_val = 0 c_test = 0 for scene_file", "1 prev_bow = bow # add current scene's data to hdf5 if len(images)", "scene_id = scene['image_id'] # loop over turns in a single scene idx =", "cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description = [] idx", "['<teller>'] + nltk.word_tokenize(teller) if drawer != '': description += ['<drawer>'] + nltk.word_tokenize(drawer) description", "new objects added in this turn hamming_distance = np.sum(bow - prev_bow) turn =", "w, _ = background_img.shape scale_x = 128. / w scale_y = 128. /", "= h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w') h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w') h5_train.create_dataset('background', data=background_img) h5_val.create_dataset('background', data=background_img)", "# load required keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background'])", "teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat turns if teller in", "= h5_train.create_group(str(c_train)) c_train += 1 elif split == 'val': scene = h5_val.create_group(str(c_val)) c_val", "= flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller = replace_at_offset(teller, tok,", "as f: scene = json.load(f) scene_id = scene['image_id'] # loop over turns in", "np.array([split_coords(b) for b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:, 0] scaling", "'val': scene = h5_val.create_group(str(c_val)) c_val += 1 elif split == 'test': scene =", "there is no image for current turn: merge with next turn if turn['abs_d']", "merge with next turn if hamming_distance < 1: prev_bow = bow idx +=", "messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat turns if teller", "hdf5 if len(images) > 0: if split == 'train': scene = h5_train.create_group(str(c_train)) c_train", "current scene's data to hdf5 if len(images) > 0: if split == 'train':", "saving if len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image = cv2.resize(image,", "'rb') as f: spell_check = pickle.load(f) # create hdf5 files for train, val,", "flagged_token['suggestions'][0]['suggestion'] drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace) # add delimiting tokens: <teller>, <drawer>", "128)) # load spelling corrections - obtained via Bing Spell Check API with", "added in image for current turn: merge with next turn if hamming_distance <", "'okay', 'thanks', 'bye', 'hello'] # start saving data into hdf5; loop over all", "= flagged_token['token'] tok_offset = flagged_token['offset'] assert len(flagged_token['suggestions']) == 1 tok_replace = flagged_token['suggestions'][0]['suggestion'] teller", "coordinates for saving if len(description) > 0: image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx))) image", "Bing Spell Check API with open(spell_check, 'rb') as f: spell_check = pickle.load(f) #", "replace with spelling suggestions returned by Bing Spell Check API if teller in", "= 0 prev_bow = np.zeros((bow_dim)) description = [] for i in range(len(scene['dialog'])): bow", "with open(codraw_extracted_coords, 'r') as f: for line in f: splits = line.split('\\t') image", "c in x.split(',')] bow = np.array([split_coords(b) for b in splits[1].split()]) bow_dim = len(bow)", "scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to train / val /", "background_img.shape scale_x = 128. / w scale_y = 128. / h scaling_ratio =", "import pickle import string import cv2 import h5py import nltk import numpy as", "* scaling).astype(int) # mark purely chitchat turns to be removed chitchat = ['hi',", "w scale_y = 128. / h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img =", "keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder']", "/ val / test split = scene_file.split('/')[-1].split('_')[0] images = [] utterences = []", "0 c_test = 0 for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs", "string.punctuation] bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)] coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is", "= cv2.resize(image, (128, 128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description = []", "h5_val.create_group(str(c_val)) c_val += 1 elif split == 'test': scene = h5_test.create_group(str(c_test)) c_test +=", "# lowercase all messages teller = str.lower(turn['msg_t']) drawer = str.lower(turn['msg_d']) # clear chitchat", "chitchat: teller = '' if drawer in chitchat: drawer = '' # replace", "line in f: splits = line.split('\\t') image = splits[0] split_coords = lambda x:", "image bow_dim = 0 GT_BOW = {} GT_OBJECTS = {} with open(codraw_extracted_coords, 'r')", "Microsoft Corporation. # Licensed under the MIT license. \"\"\" Script to parse and", "split == 'test': scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images) dt =", "keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check = keys['codraw_spell_check'] codraw_extracted_coords = keys['codraw_extracted_coordinates']", "1 elif split == 'test': scene = h5_test.create_group(str(c_test)) c_test += 1 scene.create_dataset('images', data=images)", "data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and bow (bag of words) dicts for", "idx)] # if there is no image for current turn: merge with next", "scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images'] background_img = cv2.imread(keys['codraw_background']) h5_path = keys['codraw_hdf5_folder'] spell_check", "loop over all scenes c_train = 0 c_val = 0 c_test = 0", "data=background_img) h5_val.create_dataset('background', data=background_img) h5_test.create_dataset('background', data=background_img) # set objects and bow (bag of words)", "scene['image_id'] # loop over turns in a single scene idx = 0 prev_bow", "after.replace(tok, tok_replace, 1) return before + after def create_h5(): # load required keys", "set objects and bow (bag of words) dicts for each image bow_dim =", "splits = line.split('\\t') image = splits[0] split_coords = lambda x: [int(c) for c", "bow = np.array([split_coords(b) for b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] = bow[:,", "tok_replace): before = msg[:offset] after = msg[offset:] after = after.replace(tok, tok_replace, 1) return", "teller = replace_at_offset(teller, tok, tok_offset, tok_replace) if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) !=", "if len(images) > 0: if split == 'train': scene = h5_train.create_group(str(c_train)) c_train +=", "= h5_val.create_group(str(c_val)) c_val += 1 elif split == 'test': scene = h5_test.create_group(str(c_test)) c_test", "chitchat: drawer = '' # replace with spelling suggestions returned by Bing Spell", "bow[:, 0] scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1) GT_OBJECTS[image] = (bow[:,", "# load spelling corrections - obtained via Bing Spell Check API with open(spell_check,", "+= ['<teller>'] + nltk.word_tokenize(teller) if drawer != '': description += ['<drawer>'] + nltk.word_tokenize(drawer)", "# start saving data into hdf5; loop over all scenes c_train = 0", "= 128. / h scaling_ratio = np.array([scale_x, scale_y, 1]) background_img = cv2.resize(background_img, (128,", "MIT license. \"\"\" Script to parse and read raw CoDraw data and save", "and save it in HDF5 format for GeNeVA-GAN \"\"\" from glob import glob", "files for train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path,", "x.split(',')] bow = np.array([split_coords(b) for b in splits[1].split()]) bow_dim = len(bow) GT_BOW[image] =", "for GeNeVA-GAN \"\"\" from glob import glob import json import os import pickle", "= [] with open(scene_file, 'r') as f: scene = json.load(f) scene_id = scene['image_id']", "clear chitchat turns if teller in chitchat: teller = '' if drawer in", "spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset", "with open('config.yml', 'r') as f: keys = yaml.load(f, Loader=yaml.FullLoader) def replace_at_offset(msg, tok, offset,", "returned by Bing Spell Check API if teller in spell_check and len(spell_check[teller]['flaggedTokens']) !=", "!= 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok = flagged_token['token'] tok_offset = flagged_token['offset'] assert", "['<drawer>'] + nltk.word_tokenize(drawer) description = [w for w in description if w not", "teller != '': description += ['<teller>'] + nltk.word_tokenize(teller) if drawer != '': description", "for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))): # identify if scene belongs to train / val", "cv2.resize(background_img, (128, 128)) # load spelling corrections - obtained via Bing Spell Check", "loop over turns in a single scene idx = 0 prev_bow = np.zeros((bow_dim))", "f: spell_check = pickle.load(f) # create hdf5 files for train, val, test h5_train", "replace_at_offset(msg, tok, offset, tok_replace): before = msg[:offset] after = msg[offset:] after = after.replace(tok,", "128)) images.append(image) utterences.append(str.join(' ', description)) objects.append(bow) coordinates.append(coords) description = [] idx += 1", "= np.sum(bow - prev_bow) turn = scene['dialog'][i] # lowercase all messages teller =", "drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0: for flagged_token in spell_check[drawer]['flaggedTokens']: tok =", "h5py.special_dtype(vlen=str) scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt) scene.create_dataset('objects', data=np.array(objects)) scene.create_dataset('coords', data=np.array(coordinates)) scene.create_dataset('scene_id', data=scene_id) else: print(scene_id) if", "# clear chitchat turns if teller in chitchat: teller = '' if drawer", "= GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)] # if there is no image for current turn: merge", "nltk.word_tokenize(drawer) description = [w for w in description if w not in chitchat]", "scenes c_train = 0 c_val = 0 c_test = 0 for scene_file in", "continue # if no new object is added in image for current turn:", "and len(spell_check[teller]['flaggedTokens']) != 0: for flagged_token in spell_check[teller]['flaggedTokens']: tok = flagged_token['token'] tok_offset =", "train, val, test h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w') h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w')", "turn: merge with next turn if turn['abs_d'] == '': continue # if no", "tok, offset, tok_replace): before = msg[:offset] after = msg[offset:] after = after.replace(tok, tok_replace,", "after def create_h5(): # load required keys scenes_path = keys['codraw_scenes'] images_path = keys['codraw_images']" ]
[ "files: with open(file, 'r') as f: data = f.read() result = regex.search(pattern, data)", "latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for", "= 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files: with", "or (latest_version_declaration in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data) with open(file, 'w')", "def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version,", "old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version def update_version_in_files(files,", "== 0: is_latest_version = True tools.pprint('We use latest EO version', status='PASS') tools.pprint() else:", "tools.pprint('We use latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use old EO version:", "r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file, 'r') as f:", "\"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version')", "= tools.version_compare(current_version, latest_version) is_latest_version = False if compare == 1: latest_version = current_version", "tools import settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version,", "recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare =", "compare == 0: is_latest_version = True tools.pprint('We use latest EO version', status='PASS') tools.pprint()", "compare == 1: latest_version = current_version tools.pprint(f'Manual update latest EO version to {latest_version}',", "data) if (not result) or (latest_version_declaration in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration,", "latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version)", "updating files') return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0", "settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return found_files", "open(file, 'w') as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files", "f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files if __name__ ==", "<gh_stars>10-100 #! /usr/bin/python3 import sys import re as regex # Our scripts import", "if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO", "{latest_version}', status='WARN') elif compare == 0: is_latest_version = True tools.pprint('We use latest EO", "= f.read() result = regex.search(pattern, data) if (not result) or (latest_version_declaration in result.group()):", "settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version =", "data = f.read() result = regex.search(pattern, data) if (not result) or (latest_version_declaration in", "settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version", "continue new_data = regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as f: f.write(new_data) count_changed_files", "= is_update_needed(current_version, latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version)", "# Our scripts import tools import settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version')", "open(file, 'r') as f: data = f.read() result = regex.search(pattern, data) if (not", "sys import re as regex # Our scripts import tools import settings def", "regex # Our scripts import tools import settings def main(): tools.pprint() current_version =", "regex.search(pattern, data) if (not result) or (latest_version_declaration in result.group()): continue new_data = regex.sub(pattern,", "version updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version = False if", "is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version = False if compare == 1:", "tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version = False", "#! /usr/bin/python3 import sys import re as regex # Our scripts import tools", "as regex # Our scripts import tools import settings def main(): tools.pprint() current_version", "'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare", "use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version def", "version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating", "f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files if __name__ == '__main__':", "with open(file, 'w') as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return", "found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def", "import sys import re as regex # Our scripts import tools import settings", "latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files,", "latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version =", "use latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use old EO version: \"{current_version}\"',", "files') return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern", "= r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file, 'r') as", "tools.pprint('Updating version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file", "0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file,", "result = regex.search(pattern, data) if (not result) or (latest_version_declaration in result.group()): continue new_data", "data) with open(file, 'w') as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated')", "count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files:", "= regex.search(pattern, data) if (not result) or (latest_version_declaration in result.group()): continue new_data =", "latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration", "scripts import tools import settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version =", "latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version)", "main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version)", "tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version", "as f: data = f.read() result = regex.search(pattern, data) if (not result) or", "latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return found_files =", "(not result) or (latest_version_declaration in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data) with", "latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN')", "tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if", "= settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../',", "version', status='PASS') tools.pprint() else: tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating", "Our scripts import tools import settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version", "re as regex # Our scripts import tools import settings def main(): tools.pprint()", "settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml',", "= f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file, 'r') as f: data =", "'r') as f: data = f.read() result = regex.search(pattern, data) if (not result)", "status='PASS') tools.pprint() else: tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files')", "is_latest_version = False if compare == 1: latest_version = current_version tools.pprint(f'Manual update latest", "to {latest_version}', status='WARN') elif compare == 0: is_latest_version = True tools.pprint('We use latest", "in files: with open(file, 'r') as f: data = f.read() result = regex.search(pattern,", "return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern =", "+= 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files if __name__ == '__main__': tools.move_to_script_dir(sys.argv[0]) main()", "else: tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version,", "EO version to {latest_version}', status='WARN') elif compare == 0: is_latest_version = True tools.pprint('We", "latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version = False if compare == 1: latest_version", "latest_version) is_latest_version = False if compare == 1: latest_version = current_version tools.pprint(f'Manual update", "= False if compare == 1: latest_version = current_version tools.pprint(f'Manual update latest EO", "update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version,", "= True tools.pprint('We use latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use old", "EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version def update_version_in_files(files, latest_version):", "version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in", "True tools.pprint('We use latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use old EO", "tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN') elif compare == 0: is_latest_version", "result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as f: f.write(new_data)", "update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>'", "is_latest_version = True tools.pprint('We use latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use", "regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files}", "count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files if __name__ == '__main__': tools.move_to_script_dir(sys.argv[0])", "compare = tools.version_compare(current_version, latest_version) is_latest_version = False if compare == 1: latest_version =", "= current_version tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN') elif compare ==", "0: is_latest_version = True tools.pprint('We use latest EO version', status='PASS') tools.pprint() else: tools.pprint(f'We", "def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration =", "(latest_version_declaration in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as", "version to {latest_version}', status='WARN') elif compare == 0: is_latest_version = True tools.pprint('We use", "new_data = regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as f: f.write(new_data) count_changed_files +=", "import re as regex # Our scripts import tools import settings def main():", "current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version:", "= tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version,", "f.read() result = regex.search(pattern, data) if (not result) or (latest_version_declaration in result.group()): continue", "with open(file, 'r') as f: data = f.read() result = regex.search(pattern, data) if", "tools.pprint(f'Start updating files') return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files =", "False if compare == 1: latest_version = current_version tools.pprint(f'Manual update latest EO version", "'w') as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files if", "elif compare == 0: is_latest_version = True tools.pprint('We use latest EO version', status='PASS')", "is_update_needed(current_version, latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version',", "result) or (latest_version_declaration in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data) with open(file,", "pattern = r'<eolang\\.version>.*<\\/eolang\\.version>' latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file, 'r')", "as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files updated') return count_changed_files if __name__", "latest EO version to {latest_version}', status='WARN') elif compare == 0: is_latest_version = True", "import settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version", "updated\\n') def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version = False if compare", "EO version', status='PASS') tools.pprint() else: tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start", "latest_version = current_version tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN') elif compare", "/usr/bin/python3 import sys import re as regex # Our scripts import tools import", "is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files = 0 pattern = r'<eolang\\.version>.*<\\/eolang\\.version>'", "if compare == 1: latest_version = current_version tools.pprint(f'Manual update latest EO version to", "status='WARN') tools.pprint(f'Start updating files') return is_latest_version, latest_version def update_version_in_files(files, latest_version): tools.pprint('Updating version') count_changed_files", "if (not result) or (latest_version_declaration in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data)", "status='WARN') elif compare == 0: is_latest_version = True tools.pprint('We use latest EO version',", "1: latest_version = current_version tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN') elif", "def is_update_needed(current_version, latest_version): compare = tools.version_compare(current_version, latest_version) is_latest_version = False if compare ==", "return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n')", "is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True)", "is_latest_version: return found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version", "= regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as f: f.write(new_data) count_changed_files += 1", "f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file, 'r') as f: data = f.read()", "tools.pprint() else: tools.pprint(f'We use old EO version: \"{current_version}\"', status='WARN') tools.pprint(f'Start updating files') return", "= settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version') is_latest_version, latest_version = is_update_needed(current_version, latest_version) if is_latest_version: return", "latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>' for file in files: with open(file, 'r') as f: data", "in result.group()): continue new_data = regex.sub(pattern, latest_version_declaration, data) with open(file, 'w') as f:", "tools.version_compare(current_version, latest_version) is_latest_version = False if compare == 1: latest_version = current_version tools.pprint(f'Manual", "f: data = f.read() result = regex.search(pattern, data) if (not result) or (latest_version_declaration", "current_version tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN') elif compare == 0:", "update latest EO version to {latest_version}', status='WARN') elif compare == 0: is_latest_version =", "== 1: latest_version = current_version tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN')", "for file in files: with open(file, 'r') as f: data = f.read() result", "latest_version_declaration, data) with open(file, 'w') as f: f.write(new_data) count_changed_files += 1 tools.pprint(f'{count_changed_files} files", "file in files: with open(file, 'r') as f: data = f.read() result =", "tools.search_files_by_pattern('../../', 'pom.xml', recursive=True) update_version_in_files(found_files, latest_version) settings.set_setting('current_eo_version', latest_version) tools.pprint('EO version updated\\n') def is_update_needed(current_version, latest_version):", "import tools import settings def main(): tools.pprint() current_version = settings.get_setting('current_eo_version') latest_version = settings.get_setting('latest_eo_version')" ]
[ "******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count all test cases \"\"\" import os", "for file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1)", "discover if __name__ == \"__main__\": runner = unittest.TextTestRunner() args = sys.argv cov.start() test_case_files", "failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except CoverageException: print(\"No", "file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop()", "conditions of the Mulan PSL v2. # You may obtain a copy of", "# -*- coding:utf-8 -*- \"\"\" Count all test cases \"\"\" import os import", "return discover if __name__ == \"__main__\": runner = unittest.TextTestRunner() args = sys.argv cov.start()", "coverage import CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\")", "runner = unittest.TextTestRunner() args = sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH,", "PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS", "result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__", "licensed under the Mulan PSL v2. # You can use this software according", "at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS,", "THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY", "SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND,", "\"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test cases Args: file_path: test cases", "PSL v2 for more details. # ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count", "\"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures =", "= os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path):", "# See the Mulan PSL v2 for more details. # ******************************************************************************/ # -*-", "os import sys import unittest import coverage from coverage import CoverageException suite =", "MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. # See the Mulan PSL", "= sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH,", "test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors", "errors = [] failures = [] for file in test_case_files: runner_result = runner.run(specify_case(file))", "PSL v2. # You can use this software according to the terms and", "sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except CoverageException: print(\"No data to report\") sys.exit(1)", "# ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count all test cases \"\"\" import", "may obtain a copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS", "file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__ == \"__main__\": runner = unittest.TextTestRunner()", "discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if", "Mulan PSL v2 for more details. # ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\"", "os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\"", "top_level_dir=file_path ) return discover if __name__ == \"__main__\": runner = unittest.TextTestRunner() args =", "to the terms and conditions of the Mulan PSL v2. # You may", "= runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report()", "of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON", "-*- \"\"\" Count all test cases \"\"\" import os import sys import unittest", "any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except CoverageException: print(\"No data to", "\"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test", "errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except CoverageException:", "Test specify test cases Args: file_path: test cases file path Returns: discover result", "\"test_release/\") ] errors = [] failures = [] for file in test_case_files: runner_result", "# You can use this software according to the terms and conditions of", "terms and conditions of the Mulan PSL v2. # You may obtain a", "import sys import unittest import coverage from coverage import CoverageException suite = unittest.TestSuite()", "PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN \"AS", "the terms and conditions of the Mulan PSL v2. # You may obtain", "= os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\",", "= [] failures = [] for file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors)", "import CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov", "CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov =", "suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH", "cases file path Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path", "# http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT", "under the Mulan PSL v2. # You can use this software according to", "Args: file_path: test cases file path Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover(", "test cases file path Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\",", "http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES", "\"__main__\": runner = unittest.TextTestRunner() args = sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"),", "IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER", "NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. #", "sys import unittest import coverage from coverage import CoverageException suite = unittest.TestSuite() BASE_PATH", "See the Mulan PSL v2 for more details. # ******************************************************************************/ # -*- coding:utf-8", "sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\")", "test cases Args: file_path: test cases file path Returns: discover result \"\"\" discover", "= coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test", "#!/usr/bin/python3 # ****************************************************************************** # Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights", "in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try:", "import os import sys import unittest import coverage from coverage import CoverageException suite", "import unittest import coverage from coverage import CoverageException suite = unittest.TestSuite() BASE_PATH =", "rights reserved. # licensed under the Mulan PSL v2. # You can use", "[] failures = [] for file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures)", "\"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__ ==", "Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. # licensed under", "\"\"\" Test specify test cases Args: file_path: test cases file path Returns: discover", "unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__ == \"__main__\": runner =", "specify test cases Args: file_path: test cases file path Returns: discover result \"\"\"", ") return discover if __name__ == \"__main__\": runner = unittest.TextTestRunner() args = sys.argv", "cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify", "IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR", "PURPOSE. # See the Mulan PSL v2 for more details. # ******************************************************************************/ #", "cases Args: file_path: test cases file path Returns: discover result \"\"\" discover =", "according to the terms and conditions of the Mulan PSL v2. # You", "INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR #", "__name__ == \"__main__\": runner = unittest.TextTestRunner() args = sys.argv cov.start() test_case_files = [", "WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "v2. # You may obtain a copy of Mulan PSL v2 at: #", "= unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__ == \"__main__\": runner", "OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__ == \"__main__\": runner = unittest.TextTestRunner() args", "reserved. # licensed under the Mulan PSL v2. # You can use this", "Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN", "Count all test cases \"\"\" import os import sys import unittest import coverage", "ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,", "runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) #", "BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\",", "v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED ON AN \"AS IS\"", "file_path: test cases file path Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path,", "software according to the terms and conditions of the Mulan PSL v2. #", "unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"],", "****************************************************************************** # Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. #", "v2 for more details. # ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count all", "\"\"\" Count all test cases \"\"\" import os import sys import unittest import", "You can use this software according to the terms and conditions of the", "TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def", "Co., Ltd. 2020-2020. All rights reserved. # licensed under the Mulan PSL v2.", "def specify_case(file_path): \"\"\" Test specify test cases Args: file_path: test cases file path", "A PARTICULAR # PURPOSE. # See the Mulan PSL v2 for more details.", "= unittest.TextTestRunner() args = sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"),", "import coverage from coverage import CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH", "= [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors =", "if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except CoverageException: print(\"No data", "from coverage import CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH,", "-*- coding:utf-8 -*- \"\"\" Count all test cases \"\"\" import os import sys", "Mulan PSL v2. # You may obtain a copy of Mulan PSL v2", "\"\"\" import os import sys import unittest import coverage from coverage import CoverageException", "specify_case(file_path): \"\"\" Test specify test cases Args: file_path: test cases file path Returns:", "\"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures = [] for file in", "Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. # licensed under the Mulan", "[] for file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]):", "FOR A PARTICULAR # PURPOSE. # See the Mulan PSL v2 for more", "test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True)", "OR FIT FOR A PARTICULAR # PURPOSE. # See the Mulan PSL v2", "FIT FOR A PARTICULAR # PURPOSE. # See the Mulan PSL v2 for", "os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures = [] for file", "\"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED,", "runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors, failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except", "PSL v2. # You may obtain a copy of Mulan PSL v2 at:", "] errors = [] failures = [] for file in test_case_files: runner_result =", "# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A", "All rights reserved. # licensed under the Mulan PSL v2. # You can", "(c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. # licensed under the", "PARTICULAR # PURPOSE. # See the Mulan PSL v2 for more details. #", "os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures = [] for file in test_case_files:", "TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. # See the", "failures]): sys.exit(1) cov.stop() try: cov.report(show_missing=True) # cov.html_report() except CoverageException: print(\"No data to report\")", "more details. # ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count all test cases", "the Mulan PSL v2 for more details. # ******************************************************************************/ # -*- coding:utf-8 -*-", "# ****************************************************************************** # Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.", "NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. # See the Mulan", "= [] for file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if any([errors,", "file path Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path )", "copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS PROVIDED", "if __name__ == \"__main__\": runner = unittest.TextTestRunner() args = sys.argv cov.start() test_case_files =", "v2. # You can use this software according to the terms and conditions", "a copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE IS", "path Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return", "Mulan PSL v2. # You can use this software according to the terms", "KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY", "# PURPOSE. # See the Mulan PSL v2 for more details. # ******************************************************************************/", "# THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF", "You may obtain a copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 #", "os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"])", "ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR", "AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR #", "can use this software according to the terms and conditions of the Mulan", "this software according to the terms and conditions of the Mulan PSL v2.", "LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE. # See", "the Mulan PSL v2. # You may obtain a copy of Mulan PSL", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR", "discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover if __name__ == \"__main__\":", "unittest.TextTestRunner() args = sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH,", "IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING", "= unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH = os.path.join(BASE_PATH, \"test\") cov = coverage.coverage(include=[BASE_PATH +", "\"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures = [] for", "Returns: discover result \"\"\" discover = unittest.defaultTestLoader.discover( file_path, pattern=\"test*.py\", top_level_dir=file_path ) return discover", "BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR # PURPOSE.", "of the Mulan PSL v2. # You may obtain a copy of Mulan", "cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ]", "details. # ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count all test cases \"\"\"", "# You may obtain a copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2", "Technologies Co., Ltd. 2020-2020. All rights reserved. # licensed under the Mulan PSL", "coverage.coverage(include=[BASE_PATH + \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test cases", "os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures = []", "Ltd. 2020-2020. All rights reserved. # licensed under the Mulan PSL v2. #", "and conditions of the Mulan PSL v2. # You may obtain a copy", "obtain a copy of Mulan PSL v2 at: # http://license.coscl.org.cn/MulanPSL2 # THIS SOFTWARE", "WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "args = sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"),", "[ os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = []", "all test cases \"\"\" import os import sys import unittest import coverage from", "BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR # IMPLIED, INCLUDING BUT", "omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test cases Args: file_path: test", "+ \"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test cases Args:", "\"/javcra/*\"], omit=[\"*__init__.py\", \"*/check_requires/*.py\", \"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test cases Args: file_path:", "cases \"\"\" import os import sys import unittest import coverage from coverage import", "# licensed under the Mulan PSL v2. # You can use this software", "for more details. # ******************************************************************************/ # -*- coding:utf-8 -*- \"\"\" Count all test", "EITHER EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR", "os.path.join(TEST_CASE_PATH, \"test_start/\"), os.path.join(TEST_CASE_PATH, \"test_modify/\"), os.path.join(TEST_CASE_PATH, \"test_check/\"), os.path.join(TEST_CASE_PATH, \"test_release/\") ] errors = [] failures", "failures = [] for file in test_case_files: runner_result = runner.run(specify_case(file)) errors.extend(runner_result.errors) failures.extend(runner_result.failures) if", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT", "unittest import coverage from coverage import CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))", "\"*/api/obscloud.py\"]) def specify_case(file_path): \"\"\" Test specify test cases Args: file_path: test cases file", "use this software according to the terms and conditions of the Mulan PSL", "# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. # licensed", "== \"__main__\": runner = unittest.TextTestRunner() args = sys.argv cov.start() test_case_files = [ os.path.join(TEST_CASE_PATH,", "2020-2020. All rights reserved. # licensed under the Mulan PSL v2. # You", "the Mulan PSL v2. # You can use this software according to the", "test cases \"\"\" import os import sys import unittest import coverage from coverage", "coverage from coverage import CoverageException suite = unittest.TestSuite() BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) TEST_CASE_PATH =", "coding:utf-8 -*- \"\"\" Count all test cases \"\"\" import os import sys import" ]
[ "pyutilib.component.core # @usage: import tasks_yz driver = pyutilib.workflow.TaskDriver() driver.register_task('TaskZ') driver.register_task('TaskY') print(driver.parse_args(['TaskZ','--x=3','--y=4'])) print(driver.parse_args(['TaskY','--X=3','--Y=4'])) #", "<reponame>PyUtilib/PyUtilib<gh_stars>10-100 import pyutilib.workflow import pyutilib.component.core # @usage: import tasks_yz driver = pyutilib.workflow.TaskDriver() driver.register_task('TaskZ')", "pyutilib.workflow import pyutilib.component.core # @usage: import tasks_yz driver = pyutilib.workflow.TaskDriver() driver.register_task('TaskZ') driver.register_task('TaskY') print(driver.parse_args(['TaskZ','--x=3','--y=4']))", "import pyutilib.workflow import pyutilib.component.core # @usage: import tasks_yz driver = pyutilib.workflow.TaskDriver() driver.register_task('TaskZ') driver.register_task('TaskY')", "import pyutilib.component.core # @usage: import tasks_yz driver = pyutilib.workflow.TaskDriver() driver.register_task('TaskZ') driver.register_task('TaskY') print(driver.parse_args(['TaskZ','--x=3','--y=4'])) print(driver.parse_args(['TaskY','--X=3','--Y=4']))", "# @usage: import tasks_yz driver = pyutilib.workflow.TaskDriver() driver.register_task('TaskZ') driver.register_task('TaskY') print(driver.parse_args(['TaskZ','--x=3','--y=4'])) print(driver.parse_args(['TaskY','--X=3','--Y=4'])) # @:usage" ]
[ "data[10] -to get the 10th data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1,", "data according to idx :param idx- index to take example: data[10] -to get", "Dataset class to load Synthetic Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val',", "os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type", "np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp", "with gpu if available :param tensor - the tensor to wrap with Variable", "ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod def __call__(data): # swap color", "AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Axon Dataset \"\"\" def __init__(self,", "sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0)", "load/ save :param type - train or test dataset \"\"\" __location__ = os.path.realpath(", "get length of data example: len(data) \"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets", ":param x_data - the data :param y_data - the labels \"\"\" tt =", "# normalise between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) -", "for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp", "= [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor):", "axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data =", "os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' +", "= self.y_data[idx] elif self.read == 'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data", "from torch.autograd import Variable from load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch", "data to Tensors.\"\"\" @staticmethod def __call__(data): # swap color axis because # numpy", "* class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Axon Dataset \"\"\"", ":param y_data - the labels :param ids - the ids corresponding to each", "labels \"\"\" tt = ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data) data =", "the tensor to wrap with Variable \"\"\" # Do cuda() before wrapping with", "import Variable from load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class", "= load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif self.read == 'image': self.folder =", "Inherits pytorch Dataset class to load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data',", "images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original", "== 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data -", "W x C # torch image: C X H X W #data =", "y_data - the labels \"\"\" tt = ToTensor() x_train_temp = tt(x_data) y_train_temp =", "Do cuda() before wrapping with variable if torch.cuda.is_available(): return Variable(tensor.cuda()) else: return Variable(tensor)", "generate :param data_name (string)- data name to load/ save :param type - train", "# torch image: C X H X W #data = data.transpose((1, 0)) data", "2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data = [sample_x_data, sample_y_data]", "self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx]", "generate a new synthetic dataset with parameters args print('no dataset with the name')", "np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return data @staticmethod def", "= os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' +", "\"\"\" tt = ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data", "tt(self.y_data) return x_data, y_data def __len__(self): \"\"\" get length of data example: len(data)", "self.transform = transform self.resize = resize self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(),", "img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort()", "_ = load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif self.read == 'image': self.folder", "train or test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__,", "the labels :param ids - the ids corresponding to each sample \"\"\" tt", "self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),", "+ '_x_data_' + type + '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_'", "float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if", "= os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy') name_y_points = os.path.join(__location__,'npy_data/'", "self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) ==", "\"\"\" get length of data example: len(data) \"\"\" return self.len_data def __getitem__(self, idx):", "sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data =", "\"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2:", "= resize def read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\" tt = ToTensor()", "data_name) self.len_data = len(self.x_data) elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original =", "a list of tensors- of which each list contains tensors of several samples", "self.read = read self.transform = transform self.resize = resize self.normalise = normalise __location__", "folder- location of dataset :param type - train or test dataset \"\"\" self.data_name", "data.transpose((1, 0)) data = np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda()", "plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float)", "no dataset currently created, generate a new synthetic dataset with parameters args print('no", "to take example: data[10] -to get the 10th data sample\"\"\" __location__ = os.path.realpath(", "sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Synthetic", "data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return data, max_len @staticmethod def", "Variable \"\"\" # Do cuda() before wrapping with variable if torch.cuda.is_available(): return Variable(tensor.cuda())", "self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder,", "dataset \"\"\" self.data_name = data_name self.read = read self.transform = transform self.resize =", "+ '_y_data_' + type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_'", "def __call__(data): # swap color axis because # numpy image: H x W", "\"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num - number", "or test dataset \"\"\" self.data_name = data_name self.read = read self.transform = transform", "__location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data, _ =", "\"\"\"\" Inherits pytorch Dataset class to load Synthetic Axon Dataset \"\"\" def __init__(self,", "return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits into a", "= tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data def __len__(self): \"\"\" get length", "\"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name", "= [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to", "num - number of data to generate :param data_name (string)- data name to", "sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data =", ":param type - train or test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__)))", "[sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load", "name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy') try: self.x_data", ":param ids - the ids corresponding to each sample \"\"\" tt = ToTensor()", "class to load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None,", "index to take example: data[10] -to get the 10th data sample\"\"\" __location__ =", "len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data", "from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch from", "list contains tensors of several samples (i.e. one id) :param x_data - the", "os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data", "except: # if no dataset currently created, generate a new synthetic dataset with", "- the tensor to wrap with Variable \"\"\" # Do cuda() before wrapping", "between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) )", "self.resize = resize self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read", "= [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id =", "ids - the ids corresponding to each sample \"\"\" tt = ToTensor() unique_ids", "numpy as np from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils", "if self.read == 'npy': self.x_data, self.y_data, _ = load_dataset(type, folder, data_name) self.len_data =", "\"\"\"takes data and splits into a list of tensors- of which each list", "[x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits", "[img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original = images_original", "data_to_tensor(x_data, y_data): \"\"\"takes data and splits into a list of tensors- of which", "index to take example: data[10] -to get the 10th data sample\"\"\" if self.resize:", "np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data", "len(data) \"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets samples from data according to", "== 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1,", "created, generate a new synthetic dataset with parameters args print('no dataset with the", "X W #data = data.transpose((1, 0)) data = np.array([data]) data = torch.Tensor(data) if", "save :param folder- location of dataset :param type - train or test dataset", "== 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if", "def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num - number of", "X H X W #data = data.transpose((1, 0)) data = np.array([data]) data =", "= [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for img", "pytorch Dataset class to load Synthetic Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256',", "= self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)),", "= data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and splits into", "= (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data)", "splits into a list of tensors- of which each list contains tensors of", "y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return data,", "data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor with gpu if available", "tensor with gpu if available :param tensor - the tensor to wrap with", "resize self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy':", "list of tensors- of which each list contains tensors of several samples (i.e.", "if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else:", "self.len_data = len(self.x_data) elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img", "+ data_name + '_y_points_data_' + type + '.npy') try: self.x_data = np.load(name_x, mmap_mode='r')", "in data to Tensors.\"\"\" @staticmethod def __call__(data): # swap color axis because #", "self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: #", "sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data,", "- the labels \"\"\" tt = ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data)", "self.transform = transform self.resize = resize def read_tensor_dataset(self): \"\"\" converts dataset to tensors", "in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data", ":param tensor - the tensor to wrap with Variable \"\"\" # Do cuda()", "'.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy') try:", "'_x_data_' + type + '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' +", "\"\"\" tt = ToTensor() unique_ids = np.unique(ids) data = [None] * unique_ids.size len", "self.read == 'npy': self.x_data, self.y_data, _ = load_dataset(type, folder, data_name) self.len_data = len(self.x_data)", "= normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data,", "\"\"\" :param num - number of data to generate :param data_name (string)- data", "\"\"\"creates a Variable tensor with gpu if available :param tensor - the tensor", "example: len(data) \"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets samples from data according", "to load Synthetic Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None):", "len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i]", "(plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if", "if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between", "normalise between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data))", "- train or test dataset \"\"\" self.data_name = data_name self.read = read self.transform", "os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape)", "data_name + '_x_data_' + type + '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name +", "data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if self.resize:", "torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch from torch.autograd", "length of data example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples", "= transform self.resize = resize def read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\"", "def __getitem__(self, idx): \"\"\"gets samples from data according to idx :param idx- index", "= ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp] return", "__getitem__(self, idx): \"\"\"gets samples from data according to idx :param idx- index to", "np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size)", "sample_y_data = self.y_data[idx] elif self.read == 'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx]", "Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\"", "return data class ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod def __call__(data):", "torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data = [sample_x_data, sample_y_data] return data", "and splits into a list of tensors- of which each list contains tensors", "to wrap with Variable \"\"\" # Do cuda() before wrapping with variable if", "load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train')", "10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if", "os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask", "sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise: sample_x_data", "- the labels :param ids - the ids corresponding to each sample \"\"\"", "os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type + '.npy') name_y = os.path.join(__location__,'npy_data/'", "__len__(self): \"\"\" get length of data example: len(data) \"\"\" return self.len_data def __getitem__(self,", "\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name +", "\"\"\"\" Inherits pytorch Dataset class to load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only',", "create_variable(tensor): \"\"\"creates a Variable tensor with gpu if available :param tensor - the", "data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits into a list of tensors- of", "sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod def", "self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data", "class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Axon Dataset \"\"\" def", "@staticmethod def __call__(data): # swap color axis because # numpy image: H x", "unique_ids = np.unique(ids) data = [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i", "a new synthetic dataset with parameters args print('no dataset with the name') self.data_name", "in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))]", "def read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\" tt = ToTensor() x_data =", "train or test dataset \"\"\" self.data_name = data_name self.read = read self.transform =", "self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\" get length of data example: len(data)", "Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num", "mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if no dataset currently created, generate a", "data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Synthetic Axon Dataset", "self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data,", "os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data =", "mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0)", "sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data,", "+ type + '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type", "available :param tensor - the tensor to wrap with Variable \"\"\" # Do", "@staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and splits into a list of tensors-", "(i.e. one id) :param x_data - the data :param y_data - the labels", "one id) :param x_data - the data :param y_data - the labels :param", ":param num - number of data to generate :param data_name (string)- data name", "ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp =", "+ data_name + '_y_data_' + type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name", "+ '.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points =", "-to get the 10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read", "os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type + '.npy')", "= self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read == 'image': data_path = self.images_original[idx] mask_path", "data = data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and splits", "data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path))", "ids): \"\"\"takes data and splits into a list of tensors- of which each", "\"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data)", "'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\",", "y_train_temp[0], len[i]] max_len = int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a", "int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len", "tt = ToTensor() unique_ids = np.unique(ids) data = [None] * unique_ids.size len =", ":param data_name (string)- data name to load/ save :param folder- location of dataset", "self.data_name = data_name self.read = read self.transform = transform self.resize = resize self.normalise", "ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0],", "i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp =", "tt = ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data def", "length of data example: len(data) \"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets samples", "id) :param x_data - the data :param y_data - the labels :param ids", "swap color axis because # numpy image: H x W x C #", "new synthetic dataset with parameters args print('no dataset with the name') self.data_name =", "'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize))", "x_train_temp = tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp] return data @staticmethod", "from load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load", "which each list contains tensors of several samples (i.e. one id) :param x_data", "self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data", "dataset with the name') self.data_name = data_name self.transform = transform self.resize = resize", "import transforms, utils import torch from torch.autograd import Variable from load_memmap import *", "x_data - the data :param y_data - the labels \"\"\" tt = ToTensor()", "(1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0)", "len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1]", "dataset :param type - train or test dataset \"\"\" self.data_name = data_name self.read", "transform self.resize = resize def read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\" tt", "data and splits into a list of tensors- of which each list contains", "Dataset, DataLoader from torchvision import transforms, utils import torch from torch.autograd import Variable", "tensors of several samples (i.e. one id) :param x_data - the data :param", "def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)-", "Synthetic Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param", "elif self.read == 'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread(", "= self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data", "dataset currently created, generate a new synthetic dataset with parameters args print('no dataset", "for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),", ") - 1 data = [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits", "1 data = [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset", "= np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read", "os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type +", "= data_name self.read = read self.transform = transform self.resize = resize self.normalise =", "torch.cuda.is_available(): data = data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and", "data = [x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data", "os.path.join(__location__,self.data_name,'train') images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img", "of data example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from", "= len(self.x_data) elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for", "tensor to wrap with Variable \"\"\" # Do cuda() before wrapping with variable", "self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data =", "# numpy image: H x W x C # torch image: C X", "self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read == 'image': data_path", "self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data", "the data :param y_data - the labels \"\"\" tt = ToTensor() x_train_temp =", "data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)- data name", "to take example: data[10] -to get the 10th data sample\"\"\" if self.resize: sample_x_data", "torch image: C X H X W #data = data.transpose((1, 0)) data =", "- torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data = [sample_x_data, sample_y_data] return", "type='val', transform=None, resize=None): \"\"\" :param num - number of data to generate :param", "contains tensors of several samples (i.e. one id) :param x_data - the data", "return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor with gpu if", "= read self.transform = transform self.resize = resize self.normalise = normalise __location__ =", "normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data, _", "or test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/'", "== 'npy': self.x_data, self.y_data, _ = load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif", ":param x_data - the data :param y_data - the labels :param ids -", "test dataset \"\"\" self.data_name = data_name self.read = read self.transform = transform self.resize", "resize def read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\" tt = ToTensor() x_data", "== 'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder,", "= tt(self.y_data) return x_data, y_data def __len__(self): \"\"\" get length of data example:", "load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'):", "= torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data):", "type + '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type +", "numpy image: H x W x C # torch image: C X H", "os.path.dirname(__file__))) if self.read == 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data", "with the name') self.data_name = data_name self.transform = transform self.resize = resize def", "example: data[10] -to get the 10th data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]),", "(1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data =", "tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return", "= self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data)", "import Dataset, DataLoader from torchvision import transforms, utils import torch from torch.autograd import", "H x W x C # torch image: C X H X W", "data_name + '_y_data_' + type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name +", "np.load(name_y_points) except: # if no dataset currently created, generate a new synthetic dataset", "print('no dataset with the name') self.data_name = data_name self.transform = transform self.resize =", "x W x C # torch image: C X H X W #data", "'.npy') name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy') name_y_points", "def __len__(self): \"\"\" get length of data example: len(data) \"\"\" return self.len_data def", "- the data :param y_data - the labels \"\"\" tt = ToTensor() x_train_temp", "each sample \"\"\" tt = ToTensor() unique_ids = np.unique(ids) data = [None] *", "self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if no dataset currently", "x_data, y_data def __len__(self): \"\"\" get length of data example: len(data) \"\"\" return", "data = [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id", "self.len_data = len(images_original) def __len__(self): \"\"\" get length of data example: len(data) \"\"\"", "if self.read == 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data =", "= ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data def __len__(self):", "+ data_name + '_x_data_' + type + '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name", "y_data = tt(self.y_data) return x_data, y_data def __len__(self): \"\"\" get length of data", "location of dataset :param type - train or test dataset \"\"\" self.data_name =", "import torch from torch.autograd import Variable from load_memmap import * class AxonDataset(Dataset): \"\"\"\"", "len(images_original) def __len__(self): \"\"\" get length of data example: len(data) \"\"\" return self.len_data", "ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data def __len__(self): \"\"\"", "to each sample \"\"\" tt = ToTensor() unique_ids = np.unique(ids) data = [None]", "@staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor with gpu if available :param tensor", "data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and splits into a list of", "data_name (string)- data name to load/ save :param type - train or test", "y_data - the labels :param ids - the ids corresponding to each sample", "len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from data according to", "sample \"\"\" tt = ToTensor() unique_ids = np.unique(ids) data = [None] * unique_ids.size", "10th data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]),", "gpu if available :param tensor - the tensor to wrap with Variable \"\"\"", "os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data, _ = load_dataset(type, folder, data_name) self.len_data", "y_data, ids): \"\"\"takes data and splits into a list of tensors- of which", "# Do cuda() before wrapping with variable if torch.cuda.is_available(): return Variable(tensor.cuda()) else: return", "currently created, generate a new synthetic dataset with parameters args print('no dataset with", "__location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_'", "type - train or test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x", "sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read == 'image': data_path = self.images_original[idx]", "the data :param y_data - the labels :param ids - the ids corresponding", "== 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder,", "read self.transform = transform self.resize = resize self.normalise = normalise __location__ = os.path.realpath(", "__location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if self.resize: sample_x_data =", "2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/", "from data according to idx :param idx- index to take example: data[10] -to", "color axis because # numpy image: H x W x C # torch", "transform=None, resize=None): \"\"\" :param num - number of data to generate :param data_name", "tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data def __len__(self): \"\"\" get length of", "return x_data, y_data def __len__(self): \"\"\" get length of data example: len(data) \"\"\"", "# if no dataset currently created, generate a new synthetic dataset with parameters", "dataset to tensors \"\"\" tt = ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data)", "= 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data = [sample_x_data,", "idx- index to take example: data[10] -to get the 10th data sample\"\"\" __location__", "Inherits pytorch Dataset class to load Synthetic Axon Dataset \"\"\" def __init__(self, num=50000,", "self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask", "axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data", "tt = ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp]", "return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Synthetic Axon", "os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1,", "x_data = tt(self.x_data) y_data = tt(self.y_data) return x_data, y_data def __len__(self): \"\"\" get", "[-1,1] if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) -", "synthetic dataset with parameters args print('no dataset with the name') self.data_name = data_name", "normalise=False, read='npy'): \"\"\" :param data_name (string)- data name to load/ save :param folder-", "parameters args print('no dataset with the name') self.data_name = data_name self.transform = transform", "with parameters args print('no dataset with the name') self.data_name = data_name self.transform =", "data[10] -to get the 10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if", "x C # torch image: C X H X W #data = data.transpose((1,", "with Variable \"\"\" # Do cuda() before wrapping with variable if torch.cuda.is_available(): return", "[img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for img in", "= tt(y_data) data = [x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids):", "\"\"\" get length of data example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx):", "__len__(self): \"\"\" get length of data example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self,", "image: C X H X W #data = data.transpose((1, 0)) data = np.array([data])", "torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2:", "= np.load(name_y_points) except: # if no dataset currently created, generate a new synthetic", "torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays in data", "to tensors \"\"\" tt = ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data) return", "= tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return data, max_len", "name to load/ save :param folder- location of dataset :param type - train", "if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1", "x_data - the data :param y_data - the labels :param ids - the", "num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num - number of data to", "data = [sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays in data to", "if torch.cuda.is_available(): data = data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data", "np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i]", "ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp] return data", "os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data, _ = load_dataset(type, folder, data_name)", "name') self.data_name = data_name self.transform = transform self.resize = resize def read_tensor_dataset(self): \"\"\"", "sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data", "data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num - number of data to generate", "torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise", "resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)- data name to load/ save :param", "idx- index to take example: data[10] -to get the 10th data sample\"\"\" if", "data name to load/ save :param folder- location of dataset :param type -", "sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\",", "Variable tensor with gpu if available :param tensor - the tensor to wrap", "len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0],", "self.y_data, _ = load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif self.read == 'image':", "np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read ==", "to Tensors.\"\"\" @staticmethod def __call__(data): # swap color axis because # numpy image:", "sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif", "= torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) ==", "to load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False,", "images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\" get", "(1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read == 'image':", "= torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data class ToTensor:", "DataLoader from torchvision import transforms, utils import torch from torch.autograd import Variable from", "SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Synthetic Axon Dataset \"\"\" def", ":param folder- location of dataset :param type - train or test dataset \"\"\"", "folder, data_name) self.len_data = len(self.x_data) elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original", "example: data[10] -to get the 10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__)))", "+ type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type", "= torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays in", "idx :param idx- index to take example: data[10] -to get the 10th data", "= np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx]", "else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data =", "each list contains tensors of several samples (i.e. one id) :param x_data -", "to idx :param idx- index to take example: data[10] -to get the 10th", "+ type + '.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r')", "ndarrays in data to Tensors.\"\"\" @staticmethod def __call__(data): # swap color axis because", "np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data", "= np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return data @staticmethod", "W #data = data.transpose((1, 0)) data = np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available():", "__init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)- data", "- 1 data = [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch", "data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits into a list", "+ '.npy') name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy')", "of tensors- of which each list contains tensors of several samples (i.e. one", "data = np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return data", "def data_to_tensor(x_data, y_data): \"\"\"takes data and splits into a list of tensors- of", "name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy') name_y_points =", "@staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits into a list of", "read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\" tt = ToTensor() x_data = tt(self.x_data)", "type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)- data name to load/", "= os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type + '.npy') name_y =", "def create_variable(tensor): \"\"\"creates a Variable tensor with gpu if available :param tensor -", "sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape)", "torchvision import transforms, utils import torch from torch.autograd import Variable from load_memmap import", "sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if self.resize: sample_x_data", "= np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if no dataset currently created,", "\"\"\" :param data_name (string)- data name to load/ save :param folder- location of", "[sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod", "wrap with Variable \"\"\" # Do cuda() before wrapping with variable if torch.cuda.is_available():", "y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits into", "self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data", "pytorch Dataset class to load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train',", "return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and splits into a list", "data example: len(data) \"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets samples from data", "def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and splits into a list of tensors-", "self.images_mask = images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self):", "torch.min(sample_x_data)) ) - 1 data = [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\"", "= np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data)", "id) :param x_data - the data :param y_data - the labels \"\"\" tt", "- the data :param y_data - the labels :param ids - the ids", "get the 10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read ==", "= tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len))", "H X W #data = data.transpose((1, 0)) data = np.array([data]) data = torch.Tensor(data)", "tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return data, max_len @staticmethod", "os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy') try: self.x_data = np.load(name_x,", "'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))]", "tensors- of which each list contains tensors of several samples (i.e. one id)", "= np.unique(ids) data = [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in", "folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)- data name to", "data_name + '_y_points_data_' + type + '.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data", "self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0) sample_x_data", "transforms, utils import torch from torch.autograd import Variable from load_memmap import * class", "(string)- data name to load/ save :param type - train or test dataset", "name to load/ save :param type - train or test dataset \"\"\" __location__", "dataset with parameters args print('no dataset with the name') self.data_name = data_name self.transform", "get length of data example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets", "type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type +", "self.resize = resize def read_tensor_dataset(self): \"\"\" converts dataset to tensors \"\"\" tt =", "load/ save :param folder- location of dataset :param type - train or test", "int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor with gpu", "= [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original =", "np from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch", "into a list of tensors- of which each list contains tensors of several", "if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise: sample_x_data =", "'npy': self.x_data, self.y_data, _ = load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif self.read", "data_name (string)- data name to load/ save :param folder- location of dataset :param", "transform self.resize = resize self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if", "name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type + '.npy') name_y", "self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\" get length of data example:", "samples (i.e. one id) :param x_data - the data :param y_data - the", "= images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\" get length of", "take example: data[10] -to get the 10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(),", "torch from torch.autograd import Variable from load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits", "data_name self.read = read self.transform = transform self.resize = resize self.normalise = normalise", "self.len_data def __getitem__(self, idx): \"\"\"gets samples from data according to idx :param idx-", "samples from data according to idx :param idx- index to take example: data[10]", "Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num -", "data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1,", "- the ids corresponding to each sample \"\"\" tt = ToTensor() unique_ids =", "np.unique(ids) data = [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size):", "of several samples (i.e. one id) :param x_data - the data :param y_data", "(len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from data according to idx :param idx-", "* unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] ==", "a Variable tensor with gpu if available :param tensor - the tensor to", "the 10th data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data =", "= [x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes data and", "self.x_data, self.y_data, _ = load_dataset(type, folder, data_name) self.len_data = len(self.x_data) elif self.read ==", "self.folder, \"original\"))] images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask =", "[None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i]", "as np from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import", "C # torch image: C X H X W #data = data.transpose((1, 0))", "= os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]),", "ids corresponding to each sample \"\"\" tt = ToTensor() unique_ids = np.unique(ids) data", "+ '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy')", "= int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]]", "'.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points)", "len[i]] max_len = int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable", "'_y_points_data_' + type + '.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y,", "y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data,", "np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if no dataset currently created, generate", "\"\"\" self.data_name = data_name self.read = read self.transform = transform self.resize = resize", "data :param y_data - the labels :param ids - the ids corresponding to", "example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from data according", "take example: data[10] -to get the 10th data sample\"\"\" if self.resize: sample_x_data =", "of which each list contains tensors of several samples (i.e. one id) :param", "of dataset :param type - train or test dataset \"\"\" self.data_name = data_name", "\"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod def __call__(data): # swap color axis", "- number of data to generate :param data_name (string)- data name to load/", "dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' + data_name", "(torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data = [sample_x_data, sample_y_data] return data class", "\"\"\" tt = ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp,", "- train or test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x =", "= data.transpose((1, 0)) data = np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data =", "self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read == 'image': data_path = self.images_original[idx] mask_path =", "= os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy') try: self.x_data =", "= resize self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read ==", "axis because # numpy image: H x W x C # torch image:", "\"mask\"))] self.images_mask = images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def", "according to idx :param idx- index to take example: data[10] -to get the", "- torch.min(sample_x_data)) ) - 1 data = [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset):", "np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if no", "mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if no dataset", "= np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data =", "data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes data and splits into a", "= transform self.resize = resize self.normalise = normalise __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__)))", "in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id])", "tensors \"\"\" tt = ToTensor() x_data = tt(self.x_data) y_data = tt(self.y_data) return x_data,", "elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for img in", "get the 10th data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data", "to load/ save :param folder- location of dataset :param type - train or", "x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] = [x_train_temp[0], y_train_temp[0], len[i]] max_len =", "self.read == 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]),", "import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Axon Dataset", "return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from data according to idx :param", "Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param", "image: H x W x C # torch image: C X H X", "of data example: len(data) \"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets samples from", "data class ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod def __call__(data): #", "resize=None): \"\"\" :param num - number of data to generate :param data_name (string)-", "transform=None, resize=None, normalise=False, read='npy'): \"\"\" :param data_name (string)- data name to load/ save", "= data_name self.transform = transform self.resize = resize def read_tensor_dataset(self): \"\"\" converts dataset", "= torch.Tensor(sample_y_data) if len(sample_x_data.shape) == 2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) #", "= np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id])", "else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] elif self.read == 'image': data_path =", "because # numpy image: H x W x C # torch image: C", "data name to load/ save :param type - train or test dataset \"\"\"", "type + '.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points", "read='npy'): \"\"\" :param data_name (string)- data name to load/ save :param folder- location", ":param y_data - the labels \"\"\" tt = ToTensor() x_train_temp = tt(x_data) y_train_temp", "idx): \"\"\"gets samples from data according to idx :param idx- index to take", "labels :param ids - the ids corresponding to each sample \"\"\" tt =", "unique_ids.size len = np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int)", "# swap color axis because # numpy image: H x W x C", "self.data_name = data_name self.transform = transform self.resize = resize def read_tensor_dataset(self): \"\"\" converts", "__init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\" :param num - number of data", "Variable from load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to", "to generate :param data_name (string)- data name to load/ save :param type -", "-to get the 10th data sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize))", "= self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data, axis=0) sample_y_data = np.expand_dims(sample_y_data, axis=0)", "corresponding to each sample \"\"\" tt = ToTensor() unique_ids = np.unique(ids) data =", "data = torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return data @staticmethod def data_to_tensor(x_data,", "== ids)[0].astype(int) len[i] = int(ind_id.size) x_train_temp = tt(x_data[ind_id]) y_train_temp = tt(y_data[ind_id]) data[i] =", "for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask self.images_original = images_original self.images_mask.sort()", "converts dataset to tensors \"\"\" tt = ToTensor() x_data = tt(self.x_data) y_data =", "sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data =", "ToTensor() unique_ids = np.unique(ids) data = [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int) for", "class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Synthetic Axon Dataset \"\"\"", "0)) data = np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return", "__call__(data): # swap color axis because # numpy image: H x W x", "= os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data, _ = load_dataset(type,", "the 10th data sample\"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy':", "self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data =", "test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) name_x = os.path.join(__location__, 'npy_data/' +", "os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': self.x_data, self.y_data, _ = load_dataset(type, folder,", "images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for", "np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return", "self.read == 'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)),", "data_name self.transform = transform self.resize = resize def read_tensor_dataset(self): \"\"\" converts dataset to", "= os.path.join(__location__,self.data_name,'train') images_original = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask =", "number of data to generate :param data_name (string)- data name to load/ save", "one id) :param x_data - the data :param y_data - the labels \"\"\"", "the name') self.data_name = data_name self.transform = transform self.resize = resize def read_tensor_dataset(self):", "= ToTensor() unique_ids = np.unique(ids) data = [None] * unique_ids.size len = np.zeros(unique_ids.size).astype(int)", "= np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except: # if", "sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data class", "[x_train_temp[0], y_train_temp[0], len[i]] max_len = int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates", "\"\"\" return self.len_data def __getitem__(self, idx): \"\"\"gets samples from data according to idx", "sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data =", "self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\" get length", "def __len__(self): \"\"\" get length of data example: len(data) \"\"\" return (len(self.x_data)) def", "the ids corresponding to each sample \"\"\" tt = ToTensor() unique_ids = np.unique(ids)", "sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays", "data = [sample_x_data, sample_y_data] return data class SyntheticDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class", "'npy_data/' + data_name + '_x_data_' + type + '.npy') name_y = os.path.join(__location__,'npy_data/' +", "class to load Synthetic Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None,", "y_data def __len__(self): \"\"\" get length of data example: len(data) \"\"\" return (len(self.x_data))", "self.y_data_points = np.load(name_y_points) except: # if no dataset currently created, generate a new", "C X H X W #data = data.transpose((1, 0)) data = np.array([data]) data", "self.y_data[idx] elif self.read == 'image': data_path = self.images_original[idx] mask_path = self.images_mask[idx] sample_x_data =", "tt(y_data) data = [x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data, y_data, ids): \"\"\"takes", "save :param type - train or test dataset \"\"\" __location__ = os.path.realpath( os.path.join(os.getcwd(),", "load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset class to load Axon", "data :param y_data - the labels \"\"\" tt = ToTensor() x_train_temp = tt(x_data)", "len(self.x_data) elif self.read == 'image': self.folder = os.path.join(__location__,self.data_name,'train') images_original = [img for img", ":param idx- index to take example: data[10] -to get the 10th data sample\"\"\"", "from torchvision import transforms, utils import torch from torch.autograd import Variable from load_memmap", "of data to generate :param data_name (string)- data name to load/ save :param", "'_y_data_' + type + '.npy') name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' +", "#data = data.transpose((1, 0)) data = np.array([data]) data = torch.Tensor(data) if torch.cuda.is_available(): data", "2: sample_x_data.unsqueeze_(0) if len(sample_y_data.shape) == 2: sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise:", "np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize)) else: sample_x_data = self.x_data[idx] sample_y_data = self.y_data[idx] sample_x_data = np.expand_dims(sample_x_data,", "sample\"\"\" if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize)) sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize))", "(string)- data name to load/ save :param folder- location of dataset :param type", "data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype( float) sample_x_data = torch.Tensor(sample_x_data) sample_y_data", ":param data_name (string)- data name to load/ save :param type - train or", "\"\"\" converts dataset to tensors \"\"\" tt = ToTensor() x_data = tt(self.x_data) y_data", "images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\" get length of data", "Tensors.\"\"\" @staticmethod def __call__(data): # swap color axis because # numpy image: H", "try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data = np.load(name_y, mmap_mode='r') self.y_data_points = np.load(name_y_points) except:", "\"\"\"gets samples from data according to idx :param idx- index to take example:", "\"original\"))] images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\"))] self.images_mask = images_mask", "args print('no dataset with the name') self.data_name = data_name self.transform = transform self.resize", "class ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\" @staticmethod def __call__(data): # swap", "if available :param tensor - the tensor to wrap with Variable \"\"\" #", "= int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor with", "self.folder, \"mask\"))] self.images_mask = images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original)", "data to generate :param data_name (string)- data name to load/ save :param type", "torch.Tensor(data) if torch.cuda.is_available(): data = data.cuda() return data @staticmethod def data_to_tensor(x_data, y_data): \"\"\"takes", "return self.len_data def __getitem__(self, idx): \"\"\"gets samples from data according to idx :param", "the labels \"\"\" tt = ToTensor() x_train_temp = tt(x_data) y_train_temp = tt(y_data) data", "sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1 data =", "to load/ save :param type - train or test dataset \"\"\" __location__ =", "y_data): \"\"\"takes data and splits into a list of tensors- of which each", "= plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"mask\", mask_path))).astype(", "img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\"))] images_mask = [img for img in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder,", "torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert", "Dataset class to load Axon Dataset \"\"\" def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None,", "= np.expand_dims(sample_y_data, axis=0) sample_x_data = torch.Tensor(sample_x_data) sample_y_data = torch.Tensor(sample_y_data) data = [sample_x_data, sample_y_data]", "max_len = int(np.max(len)) return data, max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor", "import numpy as np from torch.utils.data import Dataset, DataLoader from torchvision import transforms,", "os.path.join(os.getcwd(), os.path.dirname(__file__))) if self.read == 'npy': if self.resize: sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize))", "= np.zeros(unique_ids.size).astype(int) for i in np.arange(unique_ids.size): ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int) len[i] =", "mask_path = self.images_mask[idx] sample_x_data = plt.imread( os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, \"original\", data_path)) sample_y_data = (plt.imread(", "= len(images_original) def __len__(self): \"\"\" get length of data example: len(data) \"\"\" return", "sample_y_data.unsqueeze_(0) # normalise between [-1,1] if self.normalise: sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data)", "= images_mask self.images_original = images_original self.images_mask.sort() self.images_original.sort() self.len_data = len(images_original) def __len__(self): \"\"\"", "several samples (i.e. one id) :param x_data - the data :param y_data -", "load Synthetic Axon Dataset \"\"\" def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None): \"\"\"", "type - train or test dataset \"\"\" self.data_name = data_name self.read = read", "tensor - the tensor to wrap with Variable \"\"\" # Do cuda() before", "+ '_y_points_data_' + type + '.npy') try: self.x_data = np.load(name_x, mmap_mode='r') self.y_data =", "torch.autograd import Variable from load_memmap import * class AxonDataset(Dataset): \"\"\"\" Inherits pytorch Dataset", "= tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp] return data @staticmethod def", "if no dataset currently created, generate a new synthetic dataset with parameters args", "data example: len(data) \"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from data", "utils import torch from torch.autograd import Variable from load_memmap import * class AxonDataset(Dataset):", "\"\"\" return (len(self.x_data)) def __getitem__(self, idx): \"\"\"gets samples from data according to idx", "= [sample_x_data, sample_y_data] return data class ToTensor: \"\"\"Convert ndarrays in data to Tensors.\"\"\"", "tt(x_data) y_train_temp = tt(y_data) data = [x_train_temp, y_train_temp] return data @staticmethod def data_ids_to_tensor_list(x_data,", ":param type - train or test dataset \"\"\" self.data_name = data_name self.read =", "\"\"\" # Do cuda() before wrapping with variable if torch.cuda.is_available(): return Variable(tensor.cuda()) else:", "max_len @staticmethod def create_variable(tensor): \"\"\"creates a Variable tensor with gpu if available :param" ]
[ "<reponame>Gerard-007/deenux from CustomerClass import Customer import random class Order: num_of_orders = 0 def", "= 0 def __init__(self): self.order_ID = random.randint(00000000, 999999999) self.customer_ID =Customer.business_ID self.ship_to_party_ID = Customer.business_address", "import random class Order: num_of_orders = 0 def __init__(self): self.order_ID = random.randint(00000000, 999999999)", "class Order: num_of_orders = 0 def __init__(self): self.order_ID = random.randint(00000000, 999999999) self.customer_ID =Customer.business_ID", "import Customer import random class Order: num_of_orders = 0 def __init__(self): self.order_ID =", "random class Order: num_of_orders = 0 def __init__(self): self.order_ID = random.randint(00000000, 999999999) self.customer_ID", "num_of_orders = 0 def __init__(self): self.order_ID = random.randint(00000000, 999999999) self.customer_ID =Customer.business_ID self.ship_to_party_ID =", "Order: num_of_orders = 0 def __init__(self): self.order_ID = random.randint(00000000, 999999999) self.customer_ID =Customer.business_ID self.ship_to_party_ID", "Customer import random class Order: num_of_orders = 0 def __init__(self): self.order_ID = random.randint(00000000,", "from CustomerClass import Customer import random class Order: num_of_orders = 0 def __init__(self):", "CustomerClass import Customer import random class Order: num_of_orders = 0 def __init__(self): self.order_ID" ]
[ "together=together) if result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' +", "self.yaml_loader(filename) if result[0] : (content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件' + file", "+ '类型为include语法成功') return (True, content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''):", "元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' , 'main') : result =", "+ '未通过yaml语法检测,' + result[1]) data = { 'main' : content, 'include': include_content, 'roles':", "False else : sub_preserve = preserve if not name : name = roles_path", "name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述", "'roles': {name:content_dict}, } if preserve : result = self.write2db(name, data, 'roles', describe=describe) if", "失败为False,返回失败原因 ''' if file_type not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file", "+ result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True, content) def roles(self, roles_path,", "+ result[1]) return (False, '文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,'", "this_basedir = os.path.dirname(filename) include_content = {} roles_content = {} for file, file_type in", "not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if", "self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1])", "describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye", "'失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None or not this_basedir:", "describe=describe) elif yaml_tpye == 'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name,", "(True, data) else : return (True, content) def include(self, file, this_basedir=None, file_type='main', preserve=True,", "+ '未通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的roles名为' + roles", "self.check_roles(content_dict) if result[0] : includefile_dict = result[1] for file, file_type in includefile_dict.items() :", "return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename +", "describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return", "''' if preserve and together: sub_preserve = False else : sub_preserve = preserve", "file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称", "preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容", "this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' +", "file + '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' + file + '未通过语法检测,' +", ": self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1]) return", "filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '未通过yaml语法检测,' +", "+ '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件'", "os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict)", "describe=describe) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return", "include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' +", "{}, 'include': include_content, 'roles': {name:content_dict}, } if preserve : result = self.write2db(name, data,", "= result[1] for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type,", "name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return", ": self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1]) return", "= self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1])", "元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件'", "is None or not this_basedir: this_roles_path = roles_path else : try : this_roles_path", "(False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功')", "name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if", "{name:content_dict}, } if preserve : result = self.write2db(name, data, 'roles', describe=describe) if not", "'未通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的roles名为' + roles +", "+ '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if preserve : result = self.write2db(name,", ":return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in ('main', 'tasks', 'var') :", "not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件'", "result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024) if result[0] : temp_list", "失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' , 'main') : result = self.main(this_path, preserve=preserve,", "get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): '''", "file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型", "file + '未通过yaml语法检测,' + result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]}) for roles", "= result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False,", "result[0] : (filename, content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件' + filename +", "self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is", "+ this_dir + '/main.yaml' result = read_file(yaml_file) if not result[0] : if this_dir", "+ filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' + filename + '转化成yaml数据时失败,'", "this_roles_path + '/' + this_dir + '/main.yaml' result = read_file(yaml_file) if not result[0]", "+ result[1]) return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1]) result =", "= preserve result = self.yaml_loader(filename) if result[0] : (filename, content, yaml_data) = result[1:]", "get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024) if result[0] : temp_list = result[1]", "data, 'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' +", "and together: sub_preserve = False else : sub_preserve = preserve result = self.yaml_loader(filename)", "+ result[1]) return (False, '文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,'", "= self.write2db(name, content, 'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + file +", "self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result = self.check_include(yaml_data,", "result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件'", "'include': include_content, 'roles': roles_content, } if preserve : result = self.write2db(name, data, 'main',", ": filename = file else : try : filename = this_basedir + '/'", "os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together)", "成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' , 'main') : result = self.main(this_path,", "+ filename + '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content = {} roles_content", ", 'main') : result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye ==", "'/main.yaml' result = read_file(yaml_file) if not result[0] : if this_dir == 'tasks' :", "self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir is None", "result[0] : temp_list = result[1] for temp in temp_list : result = read_file(temp)", "'类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if preserve : result = self.write2db(name, content,", "+ file + '未通过yaml语法检测,' + result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]}) for", "'语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None or not this_basedir: this_roles_path = roles_path", "= result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir,", "+ result[1]) return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if not result[0] :", "name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in ('main',", "preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve,", "+ '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '未通过yaml语法检测,' + result[1])", "+ filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的include文件名为'", ": self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename", "in roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2])", "from library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False,", "result = self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' +", "filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的include文件名为' +", "+ '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir is None or not this_basedir", "'失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir + '/main.yaml语法错误,' +", "+ result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together : return (True, data)", ": result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' +", "this_basedir + '/' + file except : filename = file result = self.yaml_loader(filename)", "'roles': roles_content, } if preserve : result = self.write2db(name, data, 'main', describe=describe) if", "preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述", "yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def", "return (False, this_dir + '/main.yaml不存在') continue else : content_dict[this_dir] = result[1] temp_dir =", ": file = os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list : result = self.roles(roles,", "temp_list : result = read_file(temp) if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] =", "'未通过yaml语法检测,' + result[1]) data = { 'main' : content, 'include': include_content, 'roles': roles_content,", "result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件'", "this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在')", "result = self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:'", "+ '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename", "together=together, name=name, describe=describe) elif yaml_tpye == 'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks',", "name : name = roles_path result = self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为'", "= {} roles_content = {} for file, file_type in includefile_dict.items() : result =", "result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together : return (True, data) else", "def main(self, filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件", "'/main.yaml语法错误,原因:' + result[1]) return (False, this_dir + '/main.yaml语法错误,' + result[1]) data = {", "name = roles_path result = self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' + roles_path", "+ roles_path + '失败,' + this_dir + '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在')", "result[1]) return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data)", "result[1]) return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件'", "self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件'", "result = self.write2db(name, data, 'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path", "self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为'", "sub_preserve = False else : sub_preserve = preserve result = self.yaml_loader(filename) if result[0]", "(content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1])", "read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True,", "+ filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '未通过yaml语法检测,'", "'参数file_type错误') result = self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:'", "成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' +", "filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1]) data = { 'main'", "this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型", "roles + '未通过yaml语法检测,' + result[1]) data = { 'main' : content, 'include': include_content,", "self.write2db(name, data, 'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,'", "{} roles_content = {} for file, file_type in includefile_dict.items() : result = self.include(file,", "'meta', 'defaults') : yaml_file = this_roles_path + '/' + this_dir + '/main.yaml' result", "result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' +", "self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')", "+ '中的roles名为' + roles + '未通过yaml语法检测,' + result[1]) data = { 'main' :", "this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 '''", "else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye +", "= preserve if not name : name = roles_path result = self._isrolesname(name) if", "if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False,", "+ '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查", "for roles in roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0]", "main(self, filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称", ": temp_list = result[1] for temp in temp_list : result = read_file(temp) if", ": result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles'", "result[1]) result = self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' + file +", "roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles", "not this_basedir: this_roles_path = roles_path else : try : this_roles_path = this_basedir +", ": del content_dict['templates'] result = self.check_roles(content_dict) if result[0] : includefile_dict = result[1] for", "filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' + filename + '转化成yaml数据时失败,' +", "(True, content) def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数", "not this_basedir : filename = file else : try : filename = this_basedir", "= read_file(yaml_file) if not result[0] : if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' +", "result[1]) data = { 'main' : content, 'include': include_content, 'roles': roles_content, } if", "else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' +", "'main' : {}, 'include': include_content, 'roles': {name:content_dict}, } if preserve : result =", "library.connecter.ansible.yaml import Yaml_Base from library.utils.file import read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base):", "if preserve and together: sub_preserve = False else : sub_preserve = preserve result", "file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0]", "= this_roles_path + '/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0, max_size=4 *", "+ result[1]) return (False, result[1]) if preserve : result = self.write2db(name, content, 'include',", "result[1] for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve)", "return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True, content)", "yaml_data) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return", "+ filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1]) data = {", "file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file", "('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file = this_roles_path + '/' + this_dir", "filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1]) else : file =", "'类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' +", "= {} for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file =", "= self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]})", "+ filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1]) else : file", "not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库'", "失败为False,返回失败原因 ''' if preserve and together: sub_preserve = False else : sub_preserve =", "from library.utils.file import read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self, this_path,", ": self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' + filename", "include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典))", "(False, this_dir + '/main.yaml语法错误,' + result[1]) data = { 'main' : {}, 'include':", "include_content, 'roles': {name:content_dict}, } if preserve : result = self.write2db(name, data, 'roles', describe=describe)", "for temp in temp_list : result = read_file(temp) if result[0] : temp_file =", "content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict) if result[0] : includefile_dict = result[1]", "self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file) if not", "result[0] : (roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:'", "preserve and together: sub_preserve = False else : sub_preserve = preserve result =", "Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数", "return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together :", "self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')", "= self.yaml_loader(filename) if result[0] : (filename, content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件'", "if result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename", "'转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) = result[1:]", "result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if", "library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='',", "preserve and together: sub_preserve = False else : sub_preserve = preserve if not", "+ roles + '未通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的roles名为'", ":return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if preserve and together:", ": result = self.write2db(name, content, 'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件' +", "+ result[1]) data = { 'main' : content, 'include': include_content, 'roles': roles_content, }", "1024 * 1024) if result[0] : temp_list = result[1] for temp in temp_list", "result[0] : (content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:'", "this_roles_path = roles_path include_content = {} for this_dir in ('tasks', 'vars', 'handlers', 'meta',", "this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库", "not result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else :", ": self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result =", "'不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename,", ":return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together: sub_preserve = False else", "+ '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1])", "+ '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self,", "= os.path.dirname(filename) include_content = {} roles_content = {} for file, file_type in includefile_dict.items()", "in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误')", "if this_basedir is None or not this_basedir : filename = file else :", "(roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1])", "元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together: sub_preserve = False else :", "this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' +", "else : try : filename = this_basedir + '/' + file except :", "'中的roles名为' + roles + '未通过yaml语法检测,' + result[1]) data = { 'main' : content,", ": name = roles_path result = self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' +", "= { 'main' : content, 'include': include_content, 'roles': roles_content, } if preserve :", "result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] : del", ": filename = file result = self.yaml_loader(filename) if result[0] : (content, yaml_data) =", "+ filename + '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if result[0] : (roles_list,", "result[1]) return (False, '文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,' +", "self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1]) return (False,", ": try : this_roles_path = this_basedir + '/roles/' + roles_path except : this_roles_path", "= result[1] for temp in temp_list : result = read_file(temp) if result[0] :", "self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1]) return (False,", "if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:'", "else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1])", "= self.write2db(name, data, 'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path +", "+ roles_path + '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' +", ": result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' +", "= self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' +", "self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在') return (False, this_dir +", "preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in", "result[1]) return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]})", "filename + '类型为include语法成功') return (True, content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='',", "yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True, together=False, name='', describe=''): '''", "preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False,", "+ '失败,' + this_dir + '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue else", "name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述", "roles_path include_content = {} for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') :", "+ roles_path + '成功') if together : return (True, content_dict, include_content) else :", "return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' +", "(True, content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数", "else : content_dict[this_dir] = result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates'] = {}", "yaml_data) = result[2:] else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return", "= {} if preserve and together: sub_preserve = False else : sub_preserve =", ": this_roles_path = roles_path include_content = {} for this_dir in ('tasks', 'vars', 'handlers',", "== 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在') return", "from library.connecter.ansible.yaml import Yaml_Base from library.utils.file import read_file from library.utils.path import get_pathlist class", "filename + '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict)", "'成功') if together : return (True, content_dict, include_content) else : return (True, {},", "if together : return (True, content_dict, include_content) else : return (True, {}, {})", "'失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功')", "{} if preserve and together: sub_preserve = False else : sub_preserve = preserve", "self.write2db(name, content, 'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:'", "continue else : content_dict[this_dir] = result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates'] =", "'/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024)", "'参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True, together=False, name='',", ":参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因", "成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if preserve and together: sub_preserve =", ": result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles =", "self.yaml_loader(filename) if result[0] : (filename, content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件' +", "{} for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file = this_roles_path", "None or not this_basedir: this_roles_path = roles_path else : try : this_roles_path =", "+ file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if preserve : result", "'无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together : return (True,", "'main') : result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye == 'include'", "or not this_basedir : filename = file else : try : filename =", "False else : sub_preserve = preserve result = self.yaml_loader(filename) if result[0] : (filename,", "yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye", "content, 'include': include_content, 'roles': roles_content, } if preserve : result = self.write2db(name, data,", "else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1])", "preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述", "''' if file_type not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file +", "roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' +", "(filename, content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' +", "return (False, '文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1])", "file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if", "(False, result[1]) if preserve : result = self.write2db(name, content, 'include', describe=describe) if not", "if result[0] : includefile_dict = result[1] for file, file_type in includefile_dict.items() : result", "if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1]) return (False,", "result[1]) return result if this_basedir is None or not this_basedir : filename =", "self.write2db(name, data, 'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:'", "def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件", "if not result[0] : if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path +", "if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir +", "+ result[1]) return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1]) else :", "Yaml_Base from library.utils.file import read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self,", "else : if this_basedir is None or not this_basedir: this_roles_path = roles_path else", "result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' + filename", ": self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir is", "+ result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' +", "+ result[1]) return (False, '文件' + filename + '未通过yaml语法检测,' + result[1]) this_basedir =", "成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together: sub_preserve = False else : sub_preserve", "if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False,", "None or not this_basedir : filename = file else : try : filename", "(False, '文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1]) data", "roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' +", "together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {}", "roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None or", "return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else", "+ filename + '类型为include语法成功') return (True, content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False,", "result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir", "yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库", "'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data'", "filename + '类型为full_roles或者main语法成功') if together : return (True, data) else : return (True,", "} if preserve : result = self.write2db(name, data, 'roles', describe=describe) if not result[0]", "'类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1]) result", "= read_file(temp) if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not", "检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空))", "+ result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return", ": (filename, content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:'", ": return (True, content) def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): '''", "file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles' : result = self.roles(this_path, this_basedir=this_basedir,", "''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return", "data) else : return (True, content) def include(self, file, this_basedir=None, file_type='main', preserve=True, name='',", "'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1])", "data, 'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' +", "describe=describe) elif yaml_tpye == 'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name,", "'文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1]) else :", "else : sub_preserve = preserve if not name : name = roles_path result", "result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together", "''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return", "'/main.yaml不存在') continue else : content_dict[this_dir] = result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates']", "= os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] : del content_dict['templates'] result =", "try : filename = this_basedir + '/' + file except : filename =", ": self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir", ": include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为'", "roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if preserve and together: sub_preserve = False", "result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' +", "+ yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True, together=False, name='', describe=''):", "file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir is None or not", "+ file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' +", "(False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None or not this_basedir: this_roles_path =", "result[1]) data = { 'main' : {}, 'include': include_content, 'roles': {name:content_dict}, } if", "(False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else :", "if result[0] : (filename, content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件' + filename", "this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称", "class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器", "return result if this_basedir is None or not this_basedir : filename = file", "roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles", "self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' + filename", "content_dict['templates'] result = self.check_roles(content_dict) if result[0] : includefile_dict = result[1] for file, file_type", "+ '/main.yaml' result = read_file(yaml_file) if not result[0] : if this_dir == 'tasks'", "'类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件'", "filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together :", "= file else : try : filename = this_basedir + '/' + file", "together : return (True, data) else : return (True, content) def include(self, file,", "this_roles_path = roles_path else : try : this_roles_path = this_basedir + '/roles/' +", "name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' ,", "= os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve,", "return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True,", "(False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True, together=False,", "os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:'", "if not name : name = roles_path result = self._isrolesname(name) if not result", "if file_type not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误')", "this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录", "+ '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename +", "+ result[1]) return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' +", "if together : return (True, data) else : return (True, content) def include(self,", "if result[0] : (roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' + filename +", "read_file(temp) if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates']", "{} result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024) if result[0] :", "return (False, '文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1])", "this_dir + '/main.yaml不存在') continue else : content_dict[this_dir] = result[1] temp_dir = this_roles_path +", "result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye", "result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles' :", "= this_basedir + '/' + file except : filename = file result =", "this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if", "zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together: sub_preserve = False", "(False, '文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1]) else", "'/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue else : content_dict[this_dir] = result[1] temp_dir", "if preserve and together: sub_preserve = False else : sub_preserve = preserve if", "else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' +", "'tasks', 'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result =", ": (roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' +", "if result[0] : temp_list = result[1] for temp in temp_list : result =", "describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' , 'main')", "not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,'", "includefile_dict = result[1] for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir,", "self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else", "'通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together : return (True,", "= roles_path include_content = {} for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults')", "('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result", "= result[2:] else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False,", "'include': include_content, 'roles': {name:content_dict}, } if preserve : result = self.write2db(name, data, 'roles',", "not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' +", "result[1]) return (False, '文件' + filename + '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename)", "roles + '未通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的roles名为' +", "+ '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的include文件名为' + file", "''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空),", "元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if preserve and together: sub_preserve", "if this_basedir is None or not this_basedir: this_roles_path = roles_path else : try", "self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together : return (True, content_dict, include_content) else", "result = self.yaml_loader(filename) if result[0] : (filename, content, yaml_data) = result[1:] else :", "zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if preserve and", "os.path.dirname(filename) include_content = {} roles_content = {} for file, file_type in includefile_dict.items() :", "for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file = this_roles_path +", "describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果,", "'文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if", "router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称", "+ result[1]) data = { 'main' : {}, 'include': include_content, 'roles': {name:content_dict}, }", "+ roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件'", "this_roles_path = this_basedir + '/roles/' + roles_path except : this_roles_path = roles_path include_content", "{ 'main' : content, 'include': include_content, 'roles': roles_content, } if preserve : result", "filename = file result = self.yaml_loader(filename) if result[0] : (content, yaml_data) = result[2:]", "= self.check_roles(content_dict) if result[0] : includefile_dict = result[1] for file, file_type in includefile_dict.items()", ":参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 '''", "name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together: sub_preserve", "filename + '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content = {} roles_content =", "not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1])", ": self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' + result[1])", "+ '/' + this_dir + '/main.yaml' result = read_file(yaml_file) if not result[0] :", "yaml_tpye == 'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else", "= self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye +", "+ result[1]) result = self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) = result[1:] else", "content_dict[this_dir] = result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates'] = {} result =", "+ file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file) if not result[0]", ": includefile_dict = result[1] for file, file_type in includefile_dict.items() : result = self.include(file,", ": (content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' +", "data = { 'main' : content, 'include': include_content, 'roles': roles_content, } if preserve", "include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' +", "= { 'main' : {}, 'include': include_content, 'roles': {name:content_dict}, } if preserve :", "'失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' + file +", ": self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在') return (False, this_dir", "file except : filename = file result = self.yaml_loader(filename) if result[0] : (content,", "} if preserve : result = self.write2db(name, data, 'main', describe=describe) if not result[0]", "in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] :", "this_dir + '/main.yaml语法错误,' + result[1]) data = { 'main' : {}, 'include': include_content,", "this_dir + '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue else : content_dict[this_dir] =", "result if this_basedir is None or not this_basedir : filename = file else", ": self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file) if", "return (True, content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确", "+ '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path +", "+ '/' + file except : filename = file result = self.yaml_loader(filename) if", "return result def main(self, filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合", "+ '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None or not", "+ file except : filename = file result = self.yaml_loader(filename) if result[0] :", "result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True,", "result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list : result", "else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result", "together: sub_preserve = False else : sub_preserve = preserve if not name :", "result[1] for temp in temp_list : result = read_file(temp) if result[0] : temp_file", "sub_preserve = preserve if not name : name = roles_path result = self._isrolesname(name)", "self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件'", "this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 '''", ":参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典))", "if not result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else", "not name : name = roles_path result = self._isrolesname(name) if not result :", "preserve : result = self.write2db(name, data, 'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为'", "== 'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye", "return (True, data) else : return (True, content) def include(self, file, this_basedir=None, file_type='main',", "sub_preserve = preserve result = self.yaml_loader(filename) if result[0] : (filename, content, yaml_data) =", "(False, '文件' + filename + '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content =", "if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False,", "result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if", "file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if preserve : result =", "'handlers', 'meta', 'defaults') : yaml_file = this_roles_path + '/' + this_dir + '/main.yaml'", "result[1]) result = self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) = result[1:] else :", ": result = read_file(temp) if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1]", "name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 '''", "includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return", "+ '成功') if together : return (True, content_dict, include_content) else : return (True,", "yaml_tpye == 'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif", "file_type not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return", "= roles_path result = self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' + roles_path +", "not content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict) if result[0] : includefile_dict =", "content_dict = {} if preserve and together: sub_preserve = False else : sub_preserve", "include_content = {} roles_content = {} for file, file_type in includefile_dict.items() : result", "elif yaml_tpye == 'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe)", "together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data'", "for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if", ": return (True, data) else : return (True, content) def include(self, file, this_basedir=None,", "self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles' : result =", "+ roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir", ": sub_preserve = preserve result = self.yaml_loader(filename) if result[0] : (filename, content, yaml_data)", "检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果,", ": result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye == 'include' :", "result[1]) if preserve : result = self.write2db(name, content, 'include', describe=describe) if not result[0]", "temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] : del content_dict['templates'] result", ": temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] : del content_dict['templates']", "result = self.write2db(name, content, 'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + file", "roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容", "1024) if result[0] : temp_list = result[1] for temp in temp_list : result", "include_content, 'roles': roles_content, } if preserve : result = self.write2db(name, data, 'main', describe=describe)", "content, 'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' +", "sub_preserve = False else : sub_preserve = preserve if not name : name", "= {} for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type,", "preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容", "this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type", "except : this_roles_path = roles_path include_content = {} for this_dir in ('tasks', 'vars',", ": self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1]) return", "self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename +", "describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together: sub_preserve =", "return (False, this_dir + '/main.yaml语法错误,' + result[1]) data = { 'main' : {},", "= this_basedir + '/roles/' + roles_path except : this_roles_path = roles_path include_content =", "os from library.connecter.ansible.yaml import Yaml_Base from library.utils.file import read_file from library.utils.path import get_pathlist", "self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' + filename +", "'未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,'", "== 'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else :", "'类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir is None or not this_basedir :", "+ roles + '未通过yaml语法检测,' + result[1]) data = { 'main' : content, 'include':", "+ '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) =", "'main' : content, 'include': include_content, 'roles': roles_content, } if preserve : result =", "'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye ==", "'类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '中的include文件名为' + file +", "'类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if not result[0]", "result = self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return", "'类型为full_roles或者main语法成功') if together : return (True, data) else : return (True, content) def", "filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' +", "= self.yaml_loader(filename) if result[0] : (content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件' +", ": yaml_file = this_roles_path + '/' + this_dir + '/main.yaml' result = read_file(yaml_file)", "self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' + filename +", "* 1024) if result[0] : temp_list = result[1] for temp in temp_list :", "file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename", "self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False,", "filename = file else : try : filename = this_basedir + '/' +", "* 1024 * 1024) if result[0] : temp_list = result[1] for temp in", "in ('full_roles' , 'main') : result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif", "'未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content = {} roles_content = {} for", "roles in roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] :", ": filename = this_basedir + '/' + file except : filename = file", ": content_dict[this_dir] = result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates'] = {} result", "result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' +", "'/main.yaml语法错误,' + result[1]) data = { 'main' : {}, 'include': include_content, 'roles': {name:content_dict},", "or not this_basedir: this_roles_path = roles_path else : try : this_roles_path = this_basedir", "+ this_dir + '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue else : content_dict[this_dir]", "file else : try : filename = this_basedir + '/' + file except", ": try : filename = this_basedir + '/' + file except : filename", "= self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' + filename +", "result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles = os.path.basename(roles)", "return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None or not this_basedir: this_roles_path", "file + '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path", "max_size=4 * 1024 * 1024) if result[0] : temp_list = result[1] for temp", "= os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles +", "+ roles_path except : this_roles_path = roles_path include_content = {} for this_dir in", "+ result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if", "{ 'main' : {}, 'include': include_content, 'roles': {name:content_dict}, } if preserve : result", "import Yaml_Base from library.utils.file import read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base): def", "+ '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content = {} roles_content = {}", "+ filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库'", "is None or not this_basedir : filename = file else : try :", ": include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:'", "return (True, content) def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确", "self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if preserve :", "roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库", "this_basedir is None or not this_basedir: this_roles_path = roles_path else : try :", "preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file +", "data = { 'main' : {}, 'include': include_content, 'roles': {name:content_dict}, } if preserve", "+ result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list :", "preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件'", "result = self.yaml_loader(filename) if result[0] : (content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件'", "'类型为include语法成功') return (True, content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): '''", "file result = self.yaml_loader(filename) if result[0] : (content, yaml_data) = result[2:] else :", "+ '中的include文件名为' + file + '未通过yaml语法检测,' + result[1]) else : file = os.path.basename(file)", "get_death=0, max_size=4 * 1024 * 1024) if result[0] : temp_list = result[1] for", "(False, '文件' + filename + '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if result[0]", "result = self.check_roles(content_dict) if result[0] : includefile_dict = result[1] for file, file_type in", "zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' , 'main') :", "result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True, content) def roles(self, roles_path, this_basedir=None,", "filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库", "this_basedir : filename = file else : try : filename = this_basedir +", "temp_dir = this_roles_path + '/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0, max_size=4", "else : sub_preserve = preserve result = self.yaml_loader(filename) if result[0] : (filename, content,", ": sub_preserve = preserve if not name : name = roles_path result =", "roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir +", "preserve : result = self.write2db(name, data, 'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件'", "result[0] : includefile_dict = result[1] for file, file_type in includefile_dict.items() : result =", "else : return (True, content) def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''):", "del content_dict['templates'] result = self.check_roles(content_dict) if result[0] : includefile_dict = result[1] for file,", "= self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False,", "if not content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict) if result[0] : includefile_dict", ": {}, 'include': include_content, 'roles': {name:content_dict}, } if preserve : result = self.write2db(name,", ": self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' + filename", "else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir +", "together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and together:", "return (False, '文件' + filename + '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content", "'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1])", "'类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename +", "preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in", ": content, 'include': include_content, 'roles': roles_content, } if preserve : result = self.write2db(name,", "(False, '参数file_type错误') result = self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' + file +", "not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件'", "+ '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir + '/main.yaml语法错误,' + result[1]) data =", "'/' + file except : filename = file result = self.yaml_loader(filename) if result[0]", ":参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因", "'未通过yaml语法检测,' + result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list", "describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in ('main', 'tasks',", "in ('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file = this_roles_path + '/' +", "temp in temp_list : result = read_file(temp) if result[0] : temp_file = os.path.basename(temp)", "result[1]) this_basedir = os.path.dirname(filename) include_content = {} roles_content = {} for file, file_type", "if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result", "and together: sub_preserve = False else : sub_preserve = preserve if not name", "roles_content = {} for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir,", "'vars', 'handlers', 'meta', 'defaults') : yaml_file = this_roles_path + '/' + this_dir +", "''' if yaml_tpye in ('full_roles' , 'main') : result = self.main(this_path, preserve=preserve, together=together,", "together: sub_preserve = False else : sub_preserve = preserve result = self.yaml_loader(filename) if", "content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict) if", "(False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' + file", ": result = self.write2db(name, data, 'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件' +", "roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因", "return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if", "not in ('main', 'tasks', 'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False,", "'roles包含的include文件' + file + '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为'", ": if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir", "(False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together : return", "'var') : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file)", "('full_roles' , 'main') : result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye", "name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明", "self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together : return (True, data) else :", "self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True, content) def roles(self, roles_path, this_basedir=None, preserve=True,", "describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict = {} if preserve", "def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径", "+ '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' + file", "library.utils.file import read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None,", "this_basedir is None or not this_basedir : filename = file else : try", "result[1]) return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename", "zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not in ('main', 'tasks', 'var')", "yaml_file = this_roles_path + '/' + this_dir + '/main.yaml' result = read_file(yaml_file) if", "+ '/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 *", "result = self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件'", "+ '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir + '/main.yaml语法错误,'", "include_content.update({file:result[1]}) for roles in roles_list : result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together) if", "include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径", "+ file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir is None or", "name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve", "= False else : sub_preserve = preserve result = self.yaml_loader(filename) if result[0] :", "result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' +", "+ this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir + '/main.yaml语法错误,' + result[1])", "+ '/roles/' + roles_path except : this_roles_path = roles_path include_content = {} for", "includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为'", "except : filename = file result = self.yaml_loader(filename) if result[0] : (content, yaml_data)", "return (False, result[1]) if preserve : result = self.write2db(name, content, 'include', describe=describe) if", "+ '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1])", "describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明", "result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together : return (True, content_dict, include_content)", "file_type=file_type) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return", "this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file = this_roles_path + '/'", "this_basedir=this_basedir, preserve=sub_preserve, together=together) if result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else :", "+ roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写') else : if this_basedir is None", "= False else : sub_preserve = preserve if not name : name =", "try : this_roles_path = this_basedir + '/roles/' + roles_path except : this_roles_path =", "+ file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type)", "'defaults') : yaml_file = this_roles_path + '/' + this_dir + '/main.yaml' result =", "return (False, '参数file_type错误') result = self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' + file", "result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path", "together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles'", "roles_path except : this_roles_path = roles_path include_content = {} for this_dir in ('tasks',", "'不是接受值,只能接受full_roles、main、include、roles') return result def main(self, filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径", "+ '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together : return", "''' content_dict = {} if preserve and together: sub_preserve = False else :", "if preserve : result = self.write2db(name, data, 'roles', describe=describe) if not result[0] :", "检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空))", "= self.check_main(yaml_data) if result[0] : (roles_list, includefile_dict) = result[1:] else : self.logger.error('检测yaml文件' +", "file = os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list : result = self.roles(roles, this_basedir=this_basedir,", "preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return", "result = read_file(temp) if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if", "roles_path else : try : this_roles_path = this_basedir + '/roles/' + roles_path except", "import os from library.connecter.ansible.yaml import Yaml_Base from library.utils.file import read_file from library.utils.path import", "filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1]) return (False, '文件' +", "file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if", "'/roles/' + roles_path except : this_roles_path = roles_path include_content = {} for this_dir", "= self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles' : result", "name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' +", "this_dir + '/main.yaml' result = read_file(yaml_file) if not result[0] : if this_dir ==", "result[0] : if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' +", "yaml_tpye in ('full_roles' , 'main') : result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe)", "result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1])", "preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict =", "roles_path + '失败,' + this_dir + '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue", ": self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')", "+ '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1])", "roles_path + '成功') if together : return (True, content_dict, include_content) else : return", "+ filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1]) return (False, '文件'", "result[1]) return (False, '文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,' +", "'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1])", "together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称", "else : file = os.path.basename(file) include_content.update({file:result[1]}) for roles in roles_list : result =", "file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' if file_type not", "else : try : this_roles_path = this_basedir + '/roles/' + roles_path except :", "= result[1] if not content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict) if result[0]", "import read_file from library.utils.path import get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main',", "result = self.write2db(name, data, 'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + filename", "includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件'", "preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if preserve and", "''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录 file:文件 this_path:引用时的路径 file_type:类型 preserve:是否写入数据库 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果,", "+ '类型为full_roles或者main语法成功') if together : return (True, data) else : return (True, content)", "if preserve : result = self.write2db(name, data, 'main', describe=describe) if not result[0] :", "+ '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1]) result = self.check_include(yaml_data, file_type=file_type) if not", "content) def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''): ''' 检测include文件的语法等是否正确 :参数 this_basedir:引用该文件的上级目录", "+ '/main.yaml语法错误,' + result[1]) data = { 'main' : {}, 'include': include_content, 'roles':", "filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if", "read_file(yaml_file) if not result[0] : if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path", "+ file + '未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' + file + '未通过语法检测,'", "检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果,", "= result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1]) return (False,", "this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return", "+ file + '未通过语法检测,' + result[1]) else : include_content.update({file:result[1]}) else : self.logger.error('检测yaml文件roles名为' +", "= roles_path else : try : this_roles_path = this_basedir + '/roles/' + roles_path", "content, yaml_data) = result[1:] else : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1])", "roles_content, } if preserve : result = self.write2db(name, data, 'main', describe=describe) if not", "失败为False,返回失败原因 ''' content_dict = {} if preserve and together: sub_preserve = False else", "= self.write2db(name, data, 'main', describe=describe) if not result[0] : self.logger.error('检测yaml文件' + filename +", "'/' + this_dir + '/main.yaml' result = read_file(yaml_file) if not result[0] : if", "file + '类型为include语法失败,参数file_type错误') return (False, '参数file_type错误') result = self._isinclude(file) if not result[0] :", "= {} result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024) if result[0]", "preserve : result = self.write2db(name, content, 'include', describe=describe) if not result[0] : self.logger.error('检测yaml文件'", "+ '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue else : content_dict[this_dir] = result[1]", ": self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' + result[1])", "{} for file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve)", ": if this_basedir is None or not this_basedir: this_roles_path = roles_path else :", "if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False,", "roles_path + '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path", "'文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1]) data =", "+ result[1]) self.logger.info('检测yaml文件roles名为' + roles_path + '成功') if together : return (True, content_dict,", "= get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024) if result[0] : temp_list =", "'中的include文件名为' + file + '未通过yaml语法检测,' + result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]})", "= self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path +", "self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return", "+ result[1]) return (False, this_dir + '/main.yaml语法错误,' + result[1]) data = { 'main'", "+ result[1]) this_basedir = os.path.dirname(filename) include_content = {} roles_content = {} for file,", "= this_roles_path + '/' + this_dir + '/main.yaml' result = read_file(yaml_file) if not", "'无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True, content) def roles(self,", "result def main(self, filename, preserve=True, together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数", "(False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功') return (True, content) def", "= file result = self.yaml_loader(filename) if result[0] : (content, yaml_data) = result[2:] else", "(False, this_dir + '/main.yaml不存在') continue else : content_dict[this_dir] = result[1] temp_dir = this_roles_path", "this_roles_path + '/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024", ": self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1]) return (False, result[1]) if preserve", "temp_list = result[1] for temp in temp_list : result = read_file(temp) if result[0]", "result[1]) return (False, this_dir + '/main.yaml语法错误,' + result[1]) data = { 'main' :", "'失败,' + this_dir + '/main.yaml不存在') return (False, this_dir + '/main.yaml不存在') continue else :", "together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/ roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明", "= self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye == 'include' : result =", "this_basedir: this_roles_path = roles_path else : try : this_roles_path = this_basedir + '/roles/'", "+ filename + '类型为full_roles或者main语法成功') if together : return (True, data) else : return", ": result = self.write2db(name, data, 'roles', describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' +", "'类型为include语法失败,但无法写入数据库,原因:' + result[1]) return (False, '无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为include语法成功')", "describe=describe) if not result[0] : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1]) return", ": this_roles_path = this_basedir + '/roles/' + roles_path except : this_roles_path = roles_path", "describe=describe) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1]) return", "result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return result if this_basedir", "content) def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''): ''' 检测单个roles的语法等是否正确 :参数 this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/", "file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1])", "file, file_type in includefile_dict.items() : result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not", "in temp_list : result = read_file(temp) if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file]", "self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve) if not result[0] : self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:'", "+ filename + '通过yaml语法检测,但无法写入数据库' + result[1]) self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功') if together", "filename = this_basedir + '/' + file except : filename = file result", "+ result[1]) return result if this_basedir is None or not this_basedir : filename", "content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024) if", "this_dir + '/main.yaml语法错误,原因:' + result[1]) return (False, this_dir + '/main.yaml语法错误,' + result[1]) data", "'文件' + filename + '转化成yaml数据时失败,' + result[1]) result = self.check_main(yaml_data) if result[0] :", "+ '/main.yaml不存在') continue else : content_dict[this_dir] = result[1] temp_dir = this_roles_path + '/templates/'", "result[2:] else : self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1]) return (False, result[1])", "'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在') return (False,", "this_basedir + '/roles/' + roles_path except : this_roles_path = roles_path include_content = {}", "describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明", "+ '失败,无法写入数据库,' + result[1]) return (False, '无法写入数据库,' + result[1]) self.logger.info('检测yaml文件roles名为' + roles_path +", "preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye == 'include' : result = self.include(this_path, this_basedir=this_basedir,", "include_content = {} for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') : yaml_file", "result = read_file(yaml_file) if not result[0] : if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为'", "self._isinclude(file) if not result[0] : self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1]) return", "elif yaml_tpye == 'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe)", "this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe) elif yaml_tpye == 'roles' : result = self.roles(this_path,", "name=name, describe=describe) elif yaml_tpye == 'roles' : result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together,", "self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye == 'include' : result = self.include(this_path,", "if preserve : result = self.write2db(name, content, 'include', describe=describe) if not result[0] :", "if yaml_tpye in ('full_roles' , 'main') : result = self.main(this_path, preserve=preserve, together=together, name=name,", "filename:文件 name:名称 this_basedir:目录 yaml_tpye:yaml文件类型 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因", "roles_path result = self._isrolesname(name) if not result : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')", "preserve if not name : name = roles_path result = self._isrolesname(name) if not", "'未通过语法检测,原因:' + result[1]) return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1]) else", "+ '未通过yaml语法检测,' + result[1]) else : file = os.path.basename(file) include_content.update({file:result[1]}) for roles in", "name=name, describe=describe) elif yaml_tpye == 'include' : result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve,", ": result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe) else : self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' +", "import get_pathlist class Read_File(Yaml_Base): def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''):", "'类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1]) return (False, '文件' + filename + '未通过yaml语法检测,' + result[1]) this_basedir", "'文件' + filename + '未通过yaml语法检测,' + result[1]) this_basedir = os.path.dirname(filename) include_content = {}", "result[1]) return (False, result[1]) if preserve : result = self.write2db(name, content, 'include', describe=describe)", ":return 元组,第一个为执行结果, 成功为true,文件内容(格式为字典)) 失败为False,返回失败原因 ''' if yaml_tpye in ('full_roles' , 'main') : result", "+ yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles') return result", "preserve result = self.yaml_loader(filename) if result[0] : (filename, content, yaml_data) = result[1:] else", "result[0] : include_content.update(result[2]) roles = os.path.basename(roles) roles_content.update({roles:result[1]}) else : self.logger.error('检测yaml文件' + filename +", "if result[0] : (content, yaml_data) = result[2:] else : self.logger.error('检测yaml文件' + file +", "if result[0] : temp_file = os.path.basename(temp) content_dict['templates'][temp_file] = result[1] if not content_dict['templates'] :", "roles_path:引用该roles的main文件写的roles路径 preserve:是否写入数据库 together:是否返回该roles下所有文件内容 name:yaml文件内容写入数据的名称 describe:yaml文件内容写入数据的描述 zhname:yaml文件内容写入数据的中文名称,很简短说明 :return 元组,第一个为执行结果, 成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空)) 失败为False,返回失败原因 ''' content_dict", "not result[0] : if this_dir == 'tasks' : self.logger.error('检测yaml文件roles名为' + roles_path + '失败,'", "result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe) elif yaml_tpye == 'include' : result", "result[1] if not content_dict['templates'] : del content_dict['templates'] result = self.check_roles(content_dict) if result[0] :", "result[1] temp_dir = this_roles_path + '/templates/' content_dict['templates'] = {} result = get_pathlist(temp_dir, get_death=0,", "together=False, name='', describe=''): ''' 检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查 include:只能为相对路径 roles:只能为字母和数字组合 :参数 filename:文件 name:名称 preserve:是否写入数据库 together:是否返回该main下所有文件内容 name:yaml文件内容写入数据的名称", "def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''): ''' 检测来自文件的yaml语法等是否正确的路由器 :参数 filename:文件" ]
[ "# Define your item pipelines here # # Don't forget to add your", "item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags for", "import redis import re import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis',", "add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import", "# # Don't forget to add your pipeline to the ITEM_PIPELINES setting #", "Define your item pipelines here # # Don't forget to add your pipeline", "your item pipelines here # # Don't forget to add your pipeline to", "decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b", "self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"])", "-*- coding: utf-8 -*- # Define your item pipelines here # # Don't", "open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\",", "re import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def", "# -*- coding: utf-8 -*- # Define your item pipelines here # #", "to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis", "ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os class CvsScrapyPipeline(object):", "CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if", "def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"):", "the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os class", "utf-8 -*- # Define your item pipelines here # # Don't forget to", "spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags for a", "for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"]) return item def", "pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES", "your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re", "= redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None,", "Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html", "See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis", "import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self,", "def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in", "[a for b in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"],", "item pipelines here # # Don't forget to add your pipeline to the", "os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item,", "if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags for a in", "here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting", "# Don't forget to add your pipeline to the ITEM_PIPELINES setting # See:", "b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"]) return item def close_spider(self, spider): pass", "item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags for a in b]))", "port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for", "tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"]) return item", "pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import", "class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider):", "b in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"])", "setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os class CvsScrapyPipeline(object): def", "-*- # Define your item pipelines here # # Don't forget to add", "item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"],", "https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis =", "coding: utf-8 -*- # Define your item pipelines here # # Don't forget", "a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"]) return item def close_spider(self,", "for b in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"])", "forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import", "item[\"tags\"]=\",\".join(filter(None, [a for b in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"])", "redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\")) def process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a", "process_item(self, item, spider): if item.get(\"addr\"): tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags", "to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os", "tags=re.findall(r\"(\\D*[縣市])?(\\D*[區鎮鄉市])?(\\D*[村里])?(\\D*[路大道街])\", item[\"addr\"]) item[\"tags\"]=\",\".join(filter(None, [a for b in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街])", "<reponame>joehwang/auto-door # -*- coding: utf-8 -*- # Define your item pipelines here #", "in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"]) return item def close_spider(self, spider):", "# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import redis import re import os class CvsScrapyPipeline(object): def open_spider(self,spider):", "in tags for a in b])) #(\\D*[縣市])(\\D*[區鎮鄉市])(\\D*[路大道街]) self._redis.set(item[\"name\"], item[\"addr\"]) self._redis.set(item[\"phone\"], item[\"name\"]) self._redis.lpush(item[\"kind\"],item[\"name\"]) return", "redis import re import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379,", "import re import os class CvsScrapyPipeline(object): def open_spider(self,spider): self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv(\"REDISPWD\"))" ]
[ "= sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE IF NOT EXISTS outcome (date", "= '\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_income(self, month):", "stmt = \"INSERT INTO outcome (date, value, comment) VALUES (?, ?, ?)\" args", "FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows", "FROM income WHERE strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall()", "(date date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value,", "'\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_outcome(self, month): cur = self.conn.cursor()", "= cur.fetchall() return rows def get_total_income(self, month): cur = self.conn.cursor() stmt = \"SELECT", "value = (?) AND comment = (?)\" args = (value, comment) self.conn.execute(stmt, args)", "def delete_income(self, value, comment): stmt = \"DELETE FROM income WHERE value = (?)", "self.conn.commit() def get_income(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM income", "\"SELECT * FROM income WHERE strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt) rows", "def get_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM outcome WHERE", "= (?) AND comment = (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit()", "(date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment): stmt = \"DELETE", "self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value, comment): stmt = \"INSERT INTO outcome", "WHERE value = (?) AND comment = (?)\" args = (value, comment) self.conn.execute(stmt,", "cur.fetchone() return total[0] def get_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT *", "add_income(self, date, value, comment): stmt = \"INSERT INTO income (date, value, comment) VALUES", "def setup(self): outcome = \"CREATE TABLE IF NOT EXISTS outcome (date date, value", "\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_income(self, month): cur = self.conn.cursor()", "TABLE IF NOT EXISTS outcome (date date, value float, comment varchar(50))\" income =", "self.conn.commit() def delete_outcome(self, value, comment): stmt = \"DELETE FROM outcome WHERE value =", "= '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_outcome(self, month): cur =", "= \"CREATE TABLE IF NOT EXISTS outcome (date date, value float, comment varchar(50))\"", "self.dbname = dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE IF", "self.conn = sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE IF NOT EXISTS outcome", "* FROM income WHERE strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt) rows =", "args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment): stmt =", "varchar(50))\" income = \"CREATE TABLE IF NOT EXISTS income (date date, value float,", "= (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur", "cur.execute(stmt) rows = cur.fetchall() return rows def get_total_outcome(self, month): cur = self.conn.cursor() stmt", "dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE IF NOT EXISTS", "SUM(value) FROM outcome WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt)", "month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome WHERE strftime('%m', date)", "WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt) total = cur.fetchone()", "args) self.conn.commit() def add_outcome(self, date, value, comment): stmt = \"INSERT INTO outcome (date,", "self.conn.cursor() stmt = \"SELECT SUM(value) FROM income WHERE strftime('%m', date) = '\" +", "?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment):", "<gh_stars>1-10 import sqlite3 class DBHelper: def __init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn =", "= self.conn.cursor() stmt = \"SELECT * FROM income WHERE strftime('%m', date) = '\"", "return rows def get_total_income(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM", "outcome (date, value, comment) VALUES (?, ?, ?)\" args = (date, value, comment)", "outcome = \"CREATE TABLE IF NOT EXISTS outcome (date date, value float, comment", "args) self.conn.commit() def delete_income(self, value, comment): stmt = \"DELETE FROM income WHERE value", "(value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment): stmt = \"DELETE FROM", "delete_outcome(self, value, comment): stmt = \"DELETE FROM outcome WHERE value = (?) AND", "income WHERE strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return", "get_total_income(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM income WHERE strftime('%m',", "setup(self): outcome = \"CREATE TABLE IF NOT EXISTS outcome (date date, value float,", "income (date, value, comment) VALUES (?, ?, ?)\" args = (date, value, comment)", "\"CREATE TABLE IF NOT EXISTS outcome (date date, value float, comment varchar(50))\" income", "= self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome WHERE strftime('%m', date) = '\"", "comment = (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month):", "comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value, comment): stmt = \"INSERT INTO", "args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value, comment):", "month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM income WHERE strftime('%m', date)", "WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_outcome(self,", "DBHelper: def __init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome", "comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment): stmt = \"INSERT", "value, comment) VALUES (?, ?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args)", "self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment): stmt = \"INSERT INTO income (date,", "month + \"'\" cur.execute(stmt) total = cur.fetchone() return total[0] def get_outcome(self, month): cur", "value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value, comment): stmt = \"INSERT", "= \"DELETE FROM outcome WHERE value = (?) AND comment = (?)\" args", "NOT EXISTS outcome (date date, value float, comment varchar(50))\" income = \"CREATE TABLE", "(?, ?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self,", "+ month + \"'\" cur.execute(stmt) total = cur.fetchone() return total[0] def get_outcome(self, month):", "income = \"CREATE TABLE IF NOT EXISTS income (date date, value float, comment", "cur = self.conn.cursor() stmt = \"SELECT * FROM income WHERE strftime('%m', date) =", "args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur = self.conn.cursor()", "strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt) total = cur.fetchone() return", "\"'\" cur.execute(stmt) total = cur.fetchone() return total[0] def get_outcome(self, month): cur = self.conn.cursor()", "self.conn.commit() def add_income(self, date, value, comment): stmt = \"INSERT INTO income (date, value,", "comment): stmt = \"INSERT INTO income (date, value, comment) VALUES (?, ?, ?)\"", "VALUES (?, ?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def", "= \"SELECT SUM(value) FROM outcome WHERE strftime('%m', date) = '\" + month +", "stmt = \"DELETE FROM income WHERE value = (?) AND comment = (?)\"", "comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur = self.conn.cursor() stmt = \"SELECT", "cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM income WHERE strftime('%m', date) =", "income WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt) total =", "comment): stmt = \"DELETE FROM outcome WHERE value = (?) AND comment =", "self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment): stmt = \"DELETE FROM income WHERE", "varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment): stmt = \"INSERT INTO", "(value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur = self.conn.cursor() stmt =", "'\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_income(self, month): cur", "income (date date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date,", "+ \"'\" cur.execute(stmt) total = cur.fetchone() return total[0] def get_outcome(self, month): cur =", "strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return rows def", "date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment):", "value, comment): stmt = \"INSERT INTO outcome (date, value, comment) VALUES (?, ?,", "value, comment): stmt = \"INSERT INTO income (date, value, comment) VALUES (?, ?,", "dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE", "comment) VALUES (?, ?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit()", "= (value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur = self.conn.cursor() stmt", "return rows def get_total_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM", "outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def", "self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome WHERE strftime('%m', date) = '\" +", "cur.execute(stmt) rows = cur.fetchall() return rows def get_total_income(self, month): cur = self.conn.cursor() stmt", "self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment): stmt = \"DELETE FROM outcome WHERE", "import sqlite3 class DBHelper: def __init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname)", "rows = cur.fetchall() return rows def get_total_income(self, month): cur = self.conn.cursor() stmt =", "date, value, comment): stmt = \"INSERT INTO outcome (date, value, comment) VALUES (?,", "= (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment):", "'\" + month + \"'\" cur.execute(stmt) total = cur.fetchone() return total[0] def get_outcome(self,", "float, comment varchar(50))\" income = \"CREATE TABLE IF NOT EXISTS income (date date,", "= cur.fetchall() return rows def get_total_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT", "FROM outcome WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt) total", "(?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur =", "= \"DELETE FROM income WHERE value = (?) AND comment = (?)\" args", "rows = cur.fetchall() return rows def get_total_outcome(self, month): cur = self.conn.cursor() stmt =", "= (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment): stmt =", "__init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome = \"CREATE", "?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date,", "+month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_income(self, month): cur =", "\"INSERT INTO income (date, value, comment) VALUES (?, ?, ?)\" args = (date,", "= dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE IF NOT", "value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment): stmt", "AND comment = (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def get_income(self,", "= (value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment): stmt = \"DELETE", "\"CREATE TABLE IF NOT EXISTS income (date date, value float, comment varchar(50))\" self.conn.execute(outcome)", "cur.execute(stmt) total = cur.fetchone() return total[0] def get_outcome(self, month): cur = self.conn.cursor() stmt", "AND comment = (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self,", "comment varchar(50))\" income = \"CREATE TABLE IF NOT EXISTS income (date date, value", "get_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM outcome WHERE strftime('%m',", "class DBHelper: def __init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname) def setup(self):", "stmt = \"SELECT SUM(value) FROM income WHERE strftime('%m', date) = '\" + month", "def get_income(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM income WHERE", "comment): stmt = \"INSERT INTO outcome (date, value, comment) VALUES (?, ?, ?)\"", "self.conn.cursor() stmt = \"SELECT * FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt)", "\"DELETE FROM outcome WHERE value = (?) AND comment = (?)\" args =", "WHERE strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return rows", "IF NOT EXISTS income (date date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit()", "= \"SELECT SUM(value) FROM income WHERE strftime('%m', date) = '\" + month +", "outcome (date date, value float, comment varchar(50))\" income = \"CREATE TABLE IF NOT", "= \"INSERT INTO outcome (date, value, comment) VALUES (?, ?, ?)\" args =", "stmt = \"SELECT SUM(value) FROM outcome WHERE strftime('%m', date) = '\" + month", "(?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment): stmt", "NOT EXISTS income (date date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def", "args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment): stmt", "FROM income WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt) total", "total[0] def get_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM outcome", "= \"CREATE TABLE IF NOT EXISTS income (date date, value float, comment varchar(50))\"", "date, value float, comment varchar(50))\" income = \"CREATE TABLE IF NOT EXISTS income", "INTO income (date, value, comment) VALUES (?, ?, ?)\" args = (date, value,", "comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment): stmt = \"DELETE FROM income", "\"SELECT SUM(value) FROM income WHERE strftime('%m', date) = '\" + month + \"'\"", "self.conn.execute(stmt, args) self.conn.commit() def get_income(self, month): cur = self.conn.cursor() stmt = \"SELECT *", "self.conn.cursor() stmt = \"SELECT * FROM income WHERE strftime('%m', date) = '\" +month+", "return total[0] def get_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM", "add_outcome(self, date, value, comment): stmt = \"INSERT INTO outcome (date, value, comment) VALUES", "def add_income(self, date, value, comment): stmt = \"INSERT INTO income (date, value, comment)", "?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value,", "stmt = \"DELETE FROM outcome WHERE value = (?) AND comment = (?)\"", "self.conn.commit() def add_outcome(self, date, value, comment): stmt = \"INSERT INTO outcome (date, value,", "cur = self.conn.cursor() stmt = \"SELECT * FROM outcome WHERE strftime('%m', date) =", "strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_outcome(self, month):", "(date, value, comment) VALUES (?, ?, ?)\" args = (date, value, comment) self.conn.execute(stmt,", "get_income(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM income WHERE strftime('%m',", "= cur.fetchone() return total[0] def get_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT", "rows def get_total_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome", "total = cur.fetchone() return total[0] def get_outcome(self, month): cur = self.conn.cursor() stmt =", "(?, ?, ?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self,", "self.conn.commit() def delete_income(self, value, comment): stmt = \"DELETE FROM income WHERE value =", "float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment): stmt =", "\"INSERT INTO outcome (date, value, comment) VALUES (?, ?, ?)\" args = (date,", "IF NOT EXISTS outcome (date date, value float, comment varchar(50))\" income = \"CREATE", "value, comment): stmt = \"DELETE FROM income WHERE value = (?) AND comment", "value, comment): stmt = \"DELETE FROM outcome WHERE value = (?) AND comment", "stmt = \"SELECT * FROM income WHERE strftime('%m', date) = '\" +month+ \"'\"", "date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_outcome(self, month): cur", "def get_total_income(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM income WHERE", "sqlite3.connect(dbname) def setup(self): outcome = \"CREATE TABLE IF NOT EXISTS outcome (date date,", "comment): stmt = \"DELETE FROM income WHERE value = (?) AND comment =", "comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value, comment): stmt = \"DELETE FROM outcome", "(date date, value float, comment varchar(50))\" income = \"CREATE TABLE IF NOT EXISTS", "delete_income(self, value, comment): stmt = \"DELETE FROM income WHERE value = (?) AND", "(?) AND comment = (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def", "income WHERE value = (?) AND comment = (?)\" args = (value, comment)", "rows def get_total_income(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM income", "cur.fetchall() return rows def get_total_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value)", "= self.conn.cursor() stmt = \"SELECT SUM(value) FROM income WHERE strftime('%m', date) = '\"", "= self.conn.cursor() stmt = \"SELECT * FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\"", "outcome WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt) total =", "sqlite3 class DBHelper: def __init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname) def", "args) self.conn.commit() def delete_outcome(self, value, comment): stmt = \"DELETE FROM outcome WHERE value", "FROM income WHERE value = (?) AND comment = (?)\" args = (value,", "outcome WHERE value = (?) AND comment = (?)\" args = (value, comment)", "TABLE IF NOT EXISTS income (date date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income)", "EXISTS income (date date, value float, comment varchar(50))\" self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self,", "def add_outcome(self, date, value, comment): stmt = \"INSERT INTO outcome (date, value, comment)", "date) = '\" + month + \"'\" cur.execute(stmt) total = cur.fetchone() return total[0]", "\"SELECT * FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall()", "self.conn.execute(outcome) self.conn.execute(income) self.conn.commit() def add_income(self, date, value, comment): stmt = \"INSERT INTO income", "?)\" args = (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value,", "\"SELECT SUM(value) FROM outcome WHERE strftime('%m', date) = '\" + month + \"'\"", "= \"SELECT * FROM income WHERE strftime('%m', date) = '\" +month+ \"'\" cur.execute(stmt)", "args) self.conn.commit() def get_income(self, month): cur = self.conn.cursor() stmt = \"SELECT * FROM", "* FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows = cur.fetchall() return", "INTO outcome (date, value, comment) VALUES (?, ?, ?)\" args = (date, value,", "comment = (?)\" args = (value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_outcome(self, value,", "(date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value, comment): stmt =", "\"DELETE FROM income WHERE value = (?) AND comment = (?)\" args =", "EXISTS outcome (date date, value float, comment varchar(50))\" income = \"CREATE TABLE IF", "get_total_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome WHERE strftime('%m',", "cur.fetchall() return rows def get_total_income(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value)", "= '\" + month + \"'\" cur.execute(stmt) total = cur.fetchone() return total[0] def", "= \"INSERT INTO income (date, value, comment) VALUES (?, ?, ?)\" args =", "date, value, comment): stmt = \"INSERT INTO income (date, value, comment) VALUES (?,", "value, comment) self.conn.execute(stmt, args) self.conn.commit() def delete_income(self, value, comment): stmt = \"DELETE FROM", "month): cur = self.conn.cursor() stmt = \"SELECT * FROM income WHERE strftime('%m', date)", "FROM outcome WHERE value = (?) AND comment = (?)\" args = (value,", "stmt = \"SELECT * FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows", "date) = '\" +month+ \"'\" cur.execute(stmt) rows = cur.fetchall() return rows def get_total_income(self,", "value float, comment varchar(50))\" income = \"CREATE TABLE IF NOT EXISTS income (date", "= \"SELECT * FROM outcome WHERE strftime('%m', date) = '\"+month+\"'\" cur.execute(stmt) rows =", "def get_total_outcome(self, month): cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome WHERE", "month): cur = self.conn.cursor() stmt = \"SELECT * FROM outcome WHERE strftime('%m', date)", "stmt = \"INSERT INTO income (date, value, comment) VALUES (?, ?, ?)\" args", "def delete_outcome(self, value, comment): stmt = \"DELETE FROM outcome WHERE value = (?)", "SUM(value) FROM income WHERE strftime('%m', date) = '\" + month + \"'\" cur.execute(stmt)", "cur = self.conn.cursor() stmt = \"SELECT SUM(value) FROM outcome WHERE strftime('%m', date) =", "def __init__(self, dbname=\"expenses.sqlite\"): self.dbname = dbname self.conn = sqlite3.connect(dbname) def setup(self): outcome =", "= (date, value, comment) self.conn.execute(stmt, args) self.conn.commit() def add_outcome(self, date, value, comment): stmt" ]
[ "}, initial_coords=(0, 0, 0), ) # (x,y,z) = (0,0,0) # stateful transform: offset", ") # (x,y,z) = (0,0,0) # stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification(", "+ s1) / 2, \"y\": lambda state, s0, s1, s2: (s1 - s0)", "state, x, y, z: z, }, { \"x\": lambda state, s0, s1, s2:", "state.y_off, \"stages[2]\": lambda state, x, y, z: z + state.z_off, }, { \"x\":", "\"stages[0]\": lambda state, x, y, z: x + state.x_off, \"stages[1]\": lambda state, x,", "lambda state, s0, s1, s2: (s1 - s0) / 2, \"z\": lambda state,", "\"stages[0]\": lambda state, x, y, z: x - y, \"stages[1]\": lambda state, x,", "s1, s2: s0 - state.x_off, \"y\": lambda state, s0, s1, s2: s1 -", "offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x + state.x_off,", "{ \"stages[0]\": lambda state, x, y, z: x + state.x_off, \"stages[1]\": lambda state,", "state, s0, s1, s2: (s0 + s1) / 2, \"y\": lambda state, s0,", "lambda state, s0, s1, s2: s0 - state.x_off, \"y\": lambda state, s0, s1,", "<filename>tests/utils.py<gh_stars>1-10 from dataclasses import dataclass from autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController", "0 class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 # cartesian x_y_z = LogicalAxisSpecification(", "state, x, y, z: y + state.y_off, \"stages[2]\": lambda state, x, y, z:", "import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off: float = 0", "y, z: y + state.y_off, \"stages[2]\": lambda state, x, y, z: z +", "lambda state, x, y, z: x + y, \"stages[2]\": lambda state, x, y,", "x + y, \"stages[2]\": lambda state, x, y, z: z, }, { \"x\":", "MockMotionController @dataclass class CoordinateOffsets: x_off: float = 0 y_off: float = 0 z_off:", "0 z_off: float = 0 class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 #", "s0, s1, s2: s1 - state.y_off, \"z\": lambda state, s0, s1, s2: s2", "x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x - y,", "0, 0), ) # (x,y,z) = (0,0,0) # stateful transform: offset coordinates offset_x_y_z", "@dataclass class CoordinateOffsets: x_off: float = 0 y_off: float = 0 z_off: float", "y, z: x + y, \"stages[2]\": lambda state, x, y, z: z, },", "s1, s2: (s1 - s0) / 2, \"z\": lambda state, s0, s1, s2:", "3.14159 / 4 # cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x,", "y, z: z + state.z_off, }, { \"x\": lambda state, s0, s1, s2:", "# cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x", "s0) / 2, \"z\": lambda state, s0, s1, s2: s2, }, initial_coords=(0, 0,", "\"stages[2]\": lambda state, x, y, z: z + state.z_off, }, { \"x\": lambda", "float = 0 y_off: float = 0 z_off: float = 0 class LogicalMockMotionController(MockMotionController):", "x - y, \"stages[1]\": lambda state, x, y, z: x + y, \"stages[2]\":", "state, s0, s1, s2: s2 - state.z_off, }, initial_coords=(0, 0, 0), state=CoordinateOffsets, )", "z + state.z_off, }, { \"x\": lambda state, s0, s1, s2: s0 -", "+ state.z_off, }, { \"x\": lambda state, s0, s1, s2: s0 - state.x_off,", "dataclasses import dataclass from autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class", "LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 # cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\":", "state, x, y, z: x + y, \"stages[2]\": lambda state, x, y, z:", "+ state.x_off, \"stages[1]\": lambda state, x, y, z: y + state.y_off, \"stages[2]\": lambda", "float = 0 z_off: float = 0 class LogicalMockMotionController(MockMotionController): r = 3.14159 /", "}, { \"x\": lambda state, s0, s1, s2: s0 - state.x_off, \"y\": lambda", "{ \"x\": lambda state, s0, s1, s2: s0 - state.x_off, \"y\": lambda state,", "\"y\": lambda state, s0, s1, s2: s1 - state.y_off, \"z\": lambda state, s0,", "# (x,y,z) = (0,0,0) # stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( {", "0 y_off: float = 0 z_off: float = 0 class LogicalMockMotionController(MockMotionController): r =", "/ 2, \"z\": lambda state, s0, s1, s2: s2, }, initial_coords=(0, 0, 0),", "s0, s1, s2: (s0 + s1) / 2, \"y\": lambda state, s0, s1,", "(0,0,0) # stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state,", "s2: s0 - state.x_off, \"y\": lambda state, s0, s1, s2: s1 - state.y_off,", "lambda state, x, y, z: z, }, { \"x\": lambda state, s0, s1,", "s1, s2: s1 - state.y_off, \"z\": lambda state, s0, s1, s2: s2 -", "dataclass from autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off:", "z: z, }, { \"x\": lambda state, s0, s1, s2: (s0 + s1)", "= LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x + state.x_off, \"stages[1]\":", "+ y, \"stages[2]\": lambda state, x, y, z: z, }, { \"x\": lambda", "s2: s2, }, initial_coords=(0, 0, 0), ) # (x,y,z) = (0,0,0) # stateful", "y, z: x - y, \"stages[1]\": lambda state, x, y, z: x +", "\"y\": lambda state, s0, s1, s2: (s1 - s0) / 2, \"z\": lambda", "class CoordinateOffsets: x_off: float = 0 y_off: float = 0 z_off: float =", "lambda state, s0, s1, s2: s1 - state.y_off, \"z\": lambda state, s0, s1,", "s0, s1, s2: (s1 - s0) / 2, \"z\": lambda state, s0, s1,", "- y, \"stages[1]\": lambda state, x, y, z: x + y, \"stages[2]\": lambda", "state, x, y, z: x - y, \"stages[1]\": lambda state, x, y, z:", "x, y, z: x + state.x_off, \"stages[1]\": lambda state, x, y, z: y", "\"x\": lambda state, s0, s1, s2: (s0 + s1) / 2, \"y\": lambda", "import dataclass from autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets:", "state, x, y, z: z + state.z_off, }, { \"x\": lambda state, s0,", "x, y, z: z, }, { \"x\": lambda state, s0, s1, s2: (s0", "s1, s2: (s0 + s1) / 2, \"y\": lambda state, s0, s1, s2:", "y + state.y_off, \"stages[2]\": lambda state, x, y, z: z + state.z_off, },", "CoordinateOffsets: x_off: float = 0 y_off: float = 0 z_off: float = 0", "state, s0, s1, s2: s1 - state.y_off, \"z\": lambda state, s0, s1, s2:", "s0 - state.x_off, \"y\": lambda state, s0, s1, s2: s1 - state.y_off, \"z\":", "lambda state, x, y, z: x - y, \"stages[1]\": lambda state, x, y,", "0), ) # (x,y,z) = (0,0,0) # stateful transform: offset coordinates offset_x_y_z =", "/ 4 # cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y,", "y, \"stages[2]\": lambda state, x, y, z: z, }, { \"x\": lambda state,", "lambda state, s0, s1, s2: (s0 + s1) / 2, \"y\": lambda state,", "initial_coords=(0, 0, 0), ) # (x,y,z) = (0,0,0) # stateful transform: offset coordinates", "z: x + state.x_off, \"stages[1]\": lambda state, x, y, z: y + state.y_off,", "LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x - y, \"stages[1]\": lambda", "= 0 class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 # cartesian x_y_z =", "y, z: z, }, { \"x\": lambda state, s0, s1, s2: (s0 +", "{ \"stages[0]\": lambda state, x, y, z: x - y, \"stages[1]\": lambda state,", "s2: (s1 - s0) / 2, \"z\": lambda state, s0, s1, s2: s2,", "= 0 y_off: float = 0 z_off: float = 0 class LogicalMockMotionController(MockMotionController): r", "s2: (s0 + s1) / 2, \"y\": lambda state, s0, s1, s2: (s1", "2, \"z\": lambda state, s0, s1, s2: s2, }, initial_coords=(0, 0, 0), )", "coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x +", "stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y,", "from dataclasses import dataclass from autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass", "= LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x - y, \"stages[1]\":", "+ state.y_off, \"stages[2]\": lambda state, x, y, z: z + state.z_off, }, {", "z_off: float = 0 class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 # cartesian", "s1 - state.y_off, \"z\": lambda state, s0, s1, s2: s2 - state.z_off, },", "r = 3.14159 / 4 # cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda", "z: y + state.y_off, \"stages[2]\": lambda state, x, y, z: z + state.z_off,", "autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off: float = 0 y_off: float =", "cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x -", "= (0,0,0) # stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda", "# stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x,", "autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off: float =", "x, y, z: z + state.z_off, }, { \"x\": lambda state, s0, s1,", "- s0) / 2, \"z\": lambda state, s0, s1, s2: s2, }, initial_coords=(0,", "- state.x_off, \"y\": lambda state, s0, s1, s2: s1 - state.y_off, \"z\": lambda", "2, \"y\": lambda state, s0, s1, s2: (s1 - s0) / 2, \"z\":", "(s1 - s0) / 2, \"z\": lambda state, s0, s1, s2: s2, },", "4 # cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z:", "lambda state, s0, s1, s2: s2 - state.z_off, }, initial_coords=(0, 0, 0), state=CoordinateOffsets,", "\"stages[1]\": lambda state, x, y, z: x + y, \"stages[2]\": lambda state, x,", "import MockMotionController @dataclass class CoordinateOffsets: x_off: float = 0 y_off: float = 0", "= 3.14159 / 4 # cartesian x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state,", "s0, s1, s2: s2, }, initial_coords=(0, 0, 0), ) # (x,y,z) = (0,0,0)", "}, { \"x\": lambda state, s0, s1, s2: (s0 + s1) / 2,", "y, z: x + state.x_off, \"stages[1]\": lambda state, x, y, z: y +", "s0, s1, s2: s0 - state.x_off, \"y\": lambda state, s0, s1, s2: s1", "lambda state, s0, s1, s2: s2, }, initial_coords=(0, 0, 0), ) # (x,y,z)", "z, }, { \"x\": lambda state, s0, s1, s2: (s0 + s1) /", "from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off: float = 0 y_off: float", "from autodidaqt.instrument import LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off: float", "s2, }, initial_coords=(0, 0, 0), ) # (x,y,z) = (0,0,0) # stateful transform:", "state, s0, s1, s2: s0 - state.x_off, \"y\": lambda state, s0, s1, s2:", "= 0 z_off: float = 0 class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4", "x, y, z: x + y, \"stages[2]\": lambda state, x, y, z: z,", "state.y_off, \"z\": lambda state, s0, s1, s2: s2 - state.z_off, }, initial_coords=(0, 0,", "LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x + state.x_off, \"stages[1]\": lambda", "lambda state, x, y, z: y + state.y_off, \"stages[2]\": lambda state, x, y,", "state.x_off, \"stages[1]\": lambda state, x, y, z: y + state.y_off, \"stages[2]\": lambda state,", "x, y, z: x - y, \"stages[1]\": lambda state, x, y, z: x", "state.z_off, }, { \"x\": lambda state, s0, s1, s2: s0 - state.x_off, \"y\":", "\"z\": lambda state, s0, s1, s2: s2, }, initial_coords=(0, 0, 0), ) #", "state, s0, s1, s2: (s1 - s0) / 2, \"z\": lambda state, s0,", "\"x\": lambda state, s0, s1, s2: s0 - state.x_off, \"y\": lambda state, s0,", "s2: s1 - state.y_off, \"z\": lambda state, s0, s1, s2: s2 - state.z_off,", "x, y, z: y + state.y_off, \"stages[2]\": lambda state, x, y, z: z", "z: x + y, \"stages[2]\": lambda state, x, y, z: z, }, {", "lambda state, x, y, z: z + state.z_off, }, { \"x\": lambda state,", "/ 2, \"y\": lambda state, s0, s1, s2: (s1 - s0) / 2,", "y, \"stages[1]\": lambda state, x, y, z: x + y, \"stages[2]\": lambda state,", "- state.y_off, \"z\": lambda state, s0, s1, s2: s2 - state.z_off, }, initial_coords=(0,", "(s0 + s1) / 2, \"y\": lambda state, s0, s1, s2: (s1 -", "offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z: x", "state, x, y, z: x + state.x_off, \"stages[1]\": lambda state, x, y, z:", "x + state.x_off, \"stages[1]\": lambda state, x, y, z: y + state.y_off, \"stages[2]\":", "z: z + state.z_off, }, { \"x\": lambda state, s0, s1, s2: s0", "state.x_off, \"y\": lambda state, s0, s1, s2: s1 - state.y_off, \"z\": lambda state,", "z: x - y, \"stages[1]\": lambda state, x, y, z: x + y,", "LogicalAxisSpecification from autodidaqt.mock import MockMotionController @dataclass class CoordinateOffsets: x_off: float = 0 y_off:", "transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\": lambda state, x, y, z:", "y_off: float = 0 z_off: float = 0 class LogicalMockMotionController(MockMotionController): r = 3.14159", "s1) / 2, \"y\": lambda state, s0, s1, s2: (s1 - s0) /", "s1, s2: s2, }, initial_coords=(0, 0, 0), ) # (x,y,z) = (0,0,0) #", "lambda state, x, y, z: x + state.x_off, \"stages[1]\": lambda state, x, y,", "\"z\": lambda state, s0, s1, s2: s2 - state.z_off, }, initial_coords=(0, 0, 0),", "state, s0, s1, s2: s2, }, initial_coords=(0, 0, 0), ) # (x,y,z) =", "\"stages[2]\": lambda state, x, y, z: z, }, { \"x\": lambda state, s0,", "float = 0 class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 # cartesian x_y_z", "\"stages[1]\": lambda state, x, y, z: y + state.y_off, \"stages[2]\": lambda state, x,", "class LogicalMockMotionController(MockMotionController): r = 3.14159 / 4 # cartesian x_y_z = LogicalAxisSpecification( {", "{ \"x\": lambda state, s0, s1, s2: (s0 + s1) / 2, \"y\":", "x_off: float = 0 y_off: float = 0 z_off: float = 0 class", "(x,y,z) = (0,0,0) # stateful transform: offset coordinates offset_x_y_z = LogicalAxisSpecification( { \"stages[0]\":" ]
[ "\"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional):", "に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列.", "を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\"", "from django.shortcuts import render from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\"", "from functools import wraps from django.shortcuts import render from Login.models import M_User def", "**kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request, redirect_field_name, msg) return", "(str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func)", "def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str):", "(function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request, *args,", "django.shortcuts import render from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login", "login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数.", "\"\"\" def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request, *args,", "import render from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。", "request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request, redirect_field_name, msg) return wrapper return", "err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func", "Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def", "view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request,", "return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request, redirect_field_name, msg) return wrapper return decorator", "decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg}", "optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs):", "Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。", "loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def", "loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request,", "import wraps from django.shortcuts import render from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str,", "<reponame>msamunetogetoge/BookRecommendationApp from functools import wraps from django.shortcuts import render from Login.models import M_User", "need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ", "from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg}", "M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name", "loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str):", "def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs)", "@wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return", "が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg", "wraps from django.shortcuts import render from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None):", "redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function, optional): viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func):", "*args, **kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request, redirect_field_name, msg)", "import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args:", "if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request, redirect_field_name, msg) return wrapper", "viewe関数. err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if", "(str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return", "err_msg (str): loginしていなかったときに表示したい文字列. \"\"\" def decorator(func): @wraps(func) def wrapper(request, *args, **kwargs): if request.user.is_authenticated:", "view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。 Args: redirect_field_name (str): loginしていなかったときに、表示したいページ view_func (function,", "functools import wraps from django.shortcuts import render from Login.models import M_User def need_login(redirect_field_name:str,", "def wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request,", "wrapper(request, *args, **kwargs): if request.user.is_authenticated: return func(request, *args, **kwargs) msg={\"msg\":err_msg} return render(request, redirect_field_name,", "render from Login.models import M_User def need_login(redirect_field_name:str, err_msg:str, view_func=None): \"\"\" login が必要な場所で使う。 loginしていない時に、redirecr_field_name" ]
[ "return communities_processed def get_by_year(cp): for dictionary in cp: try: year = int(dictionary['name']) id", "import request import requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest'", "= request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed", "requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui'", "'/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp", "= request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp)", "= json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp) if __name__ ==", "except: year = 0 if year != 0: path = repository_url + dictionary['link']", "dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0 if year != 0:", "0 if year != 0: path = repository_url + dictionary['link'] + '/collections' print(path)", "dictionary in cp: try: year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj =", "communities_load = json.loads(communities_json) communities_processed = [] for dictionary in communities_load: if dictionary['name'] and", "year = 0 if year != 0: path = repository_url + dictionary['link'] +", "restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for", "repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def get_communities(): communities =", "communities_processed def get_by_year(cp): for dictionary in cp: try: year = int(dictionary['name']) id =", "#for rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def", "[] for dictionary in communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed)", "jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in cp:", "print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp) if __name__ == '__main__': get_by_year(get_communities())", "+ '/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id'])", "= dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0 if year !=", "text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in cp: try:", "= json.loads(communities_json) communities_processed = [] for dictionary in communities_load: if dictionary['name'] and dictionary['name']", "from urllib import request import requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath", "communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for dictionary in communities_load: if dictionary['name']", "path = repository_url + dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json =", "def get_by_year(cp): for dictionary in cp: try: year = int(dictionary['name']) id = dictionary['id']", "= 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url", "!= '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text)", "as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in", "in cp: try: year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8')", "print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp =", "cp: try: year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except:", "request import requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath", "int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0 if", "= [] for dictionary in communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary)", "communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as", "get_communities(): communities = request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load =", "import requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath =", "rest api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def get_communities():", "dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text", "'/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8')", "import json from urllib import request import requests #for rest api repository_url =", "and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text =", "repository_url + dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle'])", "request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed =", "open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for", "xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath + '/communities') communities_json", "get_by_year(cp): for dictionary in cp: try: year = int(dictionary['name']) id = dictionary['id'] print(year)", "year != 0: path = repository_url + dictionary['link'] + '/collections' print(path) curr_collections =", "'': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return", "try: year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year", "+ restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = []", "= curr_collections.read().decode('utf-8') except: year = 0 if year != 0: path = repository_url", "jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in cp: try: year = int(dictionary['name'])", "json.loads(communities_json) communities_processed = [] for dictionary in communities_load: if dictionary['name'] and dictionary['name'] !=", "curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp) if __name__", "urllib import request import requests #for rest api repository_url = 'http://10.3.100.22:8080' restpath =", "dictionary in communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\",", "'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url +", "#print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def", "!= 0: path = repository_url + dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path)", "curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path)", "'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary", "dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path +=", "in communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w')", "year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year =", "= repository_url + dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8'))", "curr_collections.read().decode('utf-8') except: year = 0 if year != 0: path = repository_url +", "communities_processed = [] for dictionary in communities_load: if dictionary['name'] and dictionary['name'] != '':", "print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0 if year != 0: path", "restpath = '/rest' xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath", "json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp) if __name__ == '__main__':", "0: path = repository_url + dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json", "#ccj = curr_collections.read().decode('utf-8') except: year = 0 if year != 0: path =", "= '/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath + '/communities') communities_json =", "+ '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for dictionary", "communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed", "for dictionary in communities_load: if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with", "= 0 if year != 0: path = repository_url + dictionary['link'] + '/collections'", "with open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp):", "dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile: text = json.dumps(communities_processed)", "for dictionary in cp: try: year = int(dictionary['name']) id = dictionary['id'] print(year) #ccj", "'/rest' xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath + '/communities')", "json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in cp: try: year =", "+ dictionary['link'] + '/collections' print(path) curr_collections = request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path", "communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for dictionary in communities_load:", "if year != 0: path = repository_url + dictionary['link'] + '/collections' print(path) curr_collections", "id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0 if year", "api repository_url = 'http://10.3.100.22:8080' restpath = '/rest' xmlpath = '/xmlui' def get_communities(): communities", "= json.dumps(communities_processed) jsonfile.write(text) return communities_processed def get_by_year(cp): for dictionary in cp: try: year", "= communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for dictionary in communities_load: if", "request.urlopen(path) curr_json = json.loads(curr_collections.read().decode('utf-8')) print(curr_json[0]['handle']) path += str(curr_json[0]['id']) temp = requests.get(path) print(temp) if", "if dictionary['name'] and dictionary['name'] != '': communities_processed.append(dictionary) #print(communities_processed) with open(\"test.json\", 'w') as jsonfile:", "def get_communities(): communities = request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load", "= int(dictionary['name']) id = dictionary['id'] print(year) #ccj = curr_collections.read().decode('utf-8') except: year = 0", "= '/rest' xmlpath = '/xmlui' def get_communities(): communities = request.urlopen(repository_url + restpath +", "communities = request.urlopen(repository_url + restpath + '/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json)", "json from urllib import request import requests #for rest api repository_url = 'http://10.3.100.22:8080'", "'/communities') communities_json = communities.read().decode('utf-8') communities_load = json.loads(communities_json) communities_processed = [] for dictionary in" ]
[ "security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop):", "pulumi.runtime from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security", "you cannot use a Network Security Group with in-line Network Security Rules in", "are doing! *** import pulumi import pulumi.runtime from .. import utilities, tables class", "provides both a standalone Network Security Rule resource, and allows for Network Security", "the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by", "Network security groups enable inbound or outbound traffic to be enabled or denied.", "not edit by hand unless you're certain you know what you are doing!", "__name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with the", "conflict of rule settings and will overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None,", "Do not edit by hand unless you're certain you know what you are", "if not isinstance(__name__, str): raise TypeError('Expected resource name to be a string') if", "contains a list of network security rules. Network security groups enable inbound or", "TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if not", "Terraform currently provides both a standalone Network Security Rule resource, and allows for", "with in-line Network Security Rules in conjunction with any Network Security Rule resources.", "raise TypeError('Missing required property location') __props__['location'] = location __props__['name'] = name if not", "required property location') __props__['location'] = location __props__['name'] = name if not resource_group_name: raise", "= security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self,", "~> **NOTE on Network Security Groups and Network Security Rules:** Terraform currently provides", "security groups enable inbound or outbound traffic to be enabled or denied. ~>", "generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not", "WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***", "import pulumi.runtime from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network", "__props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return", "defined in-line within the Network Security Group resource. At this time you cannot", "= resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__,", "any Network Security Rule resources. Doing so will cause a conflict of rule", "isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ =", "utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group that contains a", "Security Rule resources. Doing so will cause a conflict of rule settings and", "Security Rules in conjunction with any Network Security Rule resources. Doing so will", "NetworkSecurityGroup resource with the given unique name, props, and options.\"\"\" if not __name__:", "__props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop): return", "Network Security Groups and Network Security Rules:** Terraform currently provides both a standalone", "*** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool.", "Security Rule resource, and allows for Network Security Rules to be defined in-line", "resource name argument (for URN creation)') if not isinstance(__name__, str): raise TypeError('Expected resource", "not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__", "by hand unless you're certain you know what you are doing! *** import", "pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict()", "will overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None):", "list of network security rules. Network security groups enable inbound or outbound traffic", "pulumi import pulumi.runtime from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a", "options to be a ResourceOptions instance') __props__ = dict() if not location: raise", "super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop", "unless you're certain you know what you are doing! *** import pulumi import", "Group resource. At this time you cannot use a Network Security Group with", "of rule settings and will overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None,", "Group with in-line Network Security Rules in conjunction with any Network Security Rule", "__opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with the given", "not location: raise TypeError('Missing required property location') __props__['location'] = location __props__['name'] = name", "*** # *** Do not edit by hand unless you're certain you know", "not resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules", "be a ResourceOptions instance') __props__ = dict() if not location: raise TypeError('Missing required", "if not location: raise TypeError('Missing required property location') __props__['location'] = location __props__['name'] =", "by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit", "\"\"\"Create a NetworkSecurityGroup resource with the given unique name, props, and options.\"\"\" if", "Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're", "import pulumi import pulumi.runtime from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages", "Rules in conjunction with any Network Security Rule resources. Doing so will cause", "= location __props__['name'] = name if not resource_group_name: raise TypeError('Missing required property resource_group_name')", "you know what you are doing! *** import pulumi import pulumi.runtime from ..", "property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__(", "rule settings and will overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None,", "translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop", "a network security group that contains a list of network security rules. Network", "location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with the given unique", "resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags']", "Rules to be defined in-line within the Network Security Group resource. At this", "= name if not resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name", "security group that contains a list of network security rules. Network security groups", "name, props, and options.\"\"\" if not __name__: raise TypeError('Missing resource name argument (for", "resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup',", "def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or", "will cause a conflict of rule settings and will overwrite rules. \"\"\" def", "name to be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected", "import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group that contains", "group that contains a list of network security rules. Network security groups enable", "to be enabled or denied. ~> **NOTE on Network Security Groups and Network", "Rule resources. Doing so will cause a conflict of rule settings and will", "be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options", "Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand", "if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a", "so will cause a conflict of rule settings and will overwrite rules. \"\"\"", "in-line within the Network Security Group resource. At this time you cannot use", "Network Security Rules in conjunction with any Network Security Rule resources. Doing so", "raise TypeError('Expected resource name to be a string') if __opts__ and not isinstance(__opts__,", "# *** Do not edit by hand unless you're certain you know what", "Security Rules:** Terraform currently provides both a standalone Network Security Rule resource, and", "and Network Security Rules:** Terraform currently provides both a standalone Network Security Rule", "what you are doing! *** import pulumi import pulumi.runtime from .. import utilities,", "resource, and allows for Network Security Rules to be defined in-line within the", "coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge", "the Network Security Group resource. At this time you cannot use a Network", "URN creation)') if not isinstance(__name__, str): raise TypeError('Expected resource name to be a", "that contains a list of network security rules. Network security groups enable inbound", "traffic to be enabled or denied. ~> **NOTE on Network Security Groups and", "*** Do not edit by hand unless you're certain you know what you", "be defined in-line within the Network Security Group resource. At this time you", "a NetworkSecurityGroup resource with the given unique name, props, and options.\"\"\" if not", "cannot use a Network Security Group with in-line Network Security Rules in conjunction", "# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform", "resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with the given unique name, props,", "\"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup", "= dict() if not location: raise TypeError('Missing required property location') __props__['location'] = location", "props, and options.\"\"\" if not __name__: raise TypeError('Missing resource name argument (for URN", "and will overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None,", "a list of network security rules. Network security groups enable inbound or outbound", "security rules. Network security groups enable inbound or outbound traffic to be enabled", "resource options to be a ResourceOptions instance') __props__ = dict() if not location:", "Network Security Rule resources. Doing so will cause a conflict of rule settings", "__name__: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(__name__, str):", "outbound traffic to be enabled or denied. ~> **NOTE on Network Security Groups", "**NOTE on Network Security Groups and Network Security Rules:** Terraform currently provides both", "be enabled or denied. ~> **NOTE on Network Security Groups and Network Security", "isinstance(__name__, str): raise TypeError('Expected resource name to be a string') if __opts__ and", "(tfgen) Tool. *** # *** Do not edit by hand unless you're certain", "name if not resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules']", "this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** #", "you're certain you know what you are doing! *** import pulumi import pulumi.runtime", "inbound or outbound traffic to be enabled or denied. ~> **NOTE on Network", "overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create", "location') __props__['location'] = location __props__['name'] = name if not resource_group_name: raise TypeError('Missing required", "a Network Security Group with in-line Network Security Rules in conjunction with any", "know what you are doing! *** import pulumi import pulumi.runtime from .. import", "a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to", "# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen)", "ResourceOptions instance') __props__ = dict() if not location: raise TypeError('Missing required property location')", "rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a", "__props__['name'] = name if not resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] =", "Network Security Rules:** Terraform currently provides both a standalone Network Security Rule resource,", "groups enable inbound or outbound traffic to be enabled or denied. ~> **NOTE", "Security Rules to be defined in-line within the Network Security Group resource. At", "argument (for URN creation)') if not isinstance(__name__, str): raise TypeError('Expected resource name to", "from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group", "network security group that contains a list of network security rules. Network security", "both a standalone Network Security Rule resource, and allows for Network Security Rules", "this time you cannot use a Network Security Group with in-line Network Security", "security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with the given unique name, props, and", "location: raise TypeError('Missing required property location') __props__['location'] = location __props__['name'] = name if", "Network Security Rule resource, and allows for Network Security Rules to be defined", "if not resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] =", "NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group that contains a list of network", "'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self,", "__name__, __props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop):", "not __name__: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(__name__,", "conjunction with any Network Security Rule resources. Doing so will cause a conflict", "Rules:** Terraform currently provides both a standalone Network Security Rule resource, and allows", "and allows for Network Security Rules to be defined in-line within the Network", "Network Security Group resource. At this time you cannot use a Network Security", "to be a ResourceOptions instance') __props__ = dict() if not location: raise TypeError('Missing", "the given unique name, props, and options.\"\"\" if not __name__: raise TypeError('Missing resource", "Tool. *** # *** Do not edit by hand unless you're certain you", "currently provides both a standalone Network Security Rule resource, and allows for Network", "property location') __props__['location'] = location __props__['name'] = name if not resource_group_name: raise TypeError('Missing", "__init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with", "on Network Security Groups and Network Security Rules:** Terraform currently provides both a", "TypeError('Missing required property location') __props__['location'] = location __props__['name'] = name if not resource_group_name:", "__opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions", "Network Security Group with in-line Network Security Rules in conjunction with any Network", "hand unless you're certain you know what you are doing! *** import pulumi", "__opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop)", "and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance')", ".. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group that", "\"\"\" Manages a network security group that contains a list of network security", "to be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource", "standalone Network Security Rule resource, and allows for Network Security Rules to be", "name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource with the given unique name,", "given unique name, props, and options.\"\"\" if not __name__: raise TypeError('Missing resource name", "dict() if not location: raise TypeError('Missing required property location') __props__['location'] = location __props__['name']", "class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group that contains a list of", "raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(__name__, str): raise", "edit by hand unless you're certain you know what you are doing! ***", "Doing so will cause a conflict of rule settings and will overwrite rules.", "tags=None): \"\"\"Create a NetworkSecurityGroup resource with the given unique name, props, and options.\"\"\"", "resource with the given unique name, props, and options.\"\"\" if not __name__: raise", "if not __name__: raise TypeError('Missing resource name argument (for URN creation)') if not", "or denied. ~> **NOTE on Network Security Groups and Network Security Rules:** Terraform", "cause a conflict of rule settings and will overwrite rules. \"\"\" def __init__(__self__,", "unique name, props, and options.\"\"\" if not __name__: raise TypeError('Missing resource name argument", "resources. Doing so will cause a conflict of rule settings and will overwrite", "certain you know what you are doing! *** import pulumi import pulumi.runtime from", "TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags", "enabled or denied. ~> **NOTE on Network Security Groups and Network Security Rules:**", "or outbound traffic to be enabled or denied. ~> **NOTE on Network Security", "def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None): \"\"\"Create a NetworkSecurityGroup resource", "At this time you cannot use a Network Security Group with in-line Network", "required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup,", "resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__)", "Security Group with in-line Network Security Rules in conjunction with any Network Security", "time you cannot use a Network Security Group with in-line Network Security Rules", "settings and will overwrite rules. \"\"\" def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None,", "Security Groups and Network Security Rules:** Terraform currently provides both a standalone Network", "and options.\"\"\" if not __name__: raise TypeError('Missing resource name argument (for URN creation)')", "allows for Network Security Rules to be defined in-line within the Network Security", "with the given unique name, props, and options.\"\"\" if not __name__: raise TypeError('Missing", "name argument (for URN creation)') if not isinstance(__name__, str): raise TypeError('Expected resource name", "not isinstance(__name__, str): raise TypeError('Expected resource name to be a string') if __opts__", "use a Network Security Group with in-line Network Security Rules in conjunction with", "(for URN creation)') if not isinstance(__name__, str): raise TypeError('Expected resource name to be", "string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be", "Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless", "Network Security Rules to be defined in-line within the Network Security Group resource.", "__props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def", "creation)') if not isinstance(__name__, str): raise TypeError('Expected resource name to be a string')", "to be defined in-line within the Network Security Group resource. At this time", "str): raise TypeError('Expected resource name to be a string') if __opts__ and not", "file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # ***", "raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if", "resource name to be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise", "Manages a network security group that contains a list of network security rules.", "__self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def", "tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\" Manages a network security group that contains a list", "*** import pulumi import pulumi.runtime from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource): \"\"\"", "network security rules. Network security groups enable inbound or outbound traffic to be", "a ResourceOptions instance') __props__ = dict() if not location: raise TypeError('Missing required property", "you are doing! *** import pulumi import pulumi.runtime from .. import utilities, tables", "__props__['location'] = location __props__['name'] = name if not resource_group_name: raise TypeError('Missing required property", "TypeError('Missing resource name argument (for URN creation)') if not isinstance(__name__, str): raise TypeError('Expected", "was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do", "doing! *** import pulumi import pulumi.runtime from .. import utilities, tables class NetworkSecurityGroup(pulumi.CustomResource):", "within the Network Security Group resource. At this time you cannot use a", "location __props__['name'] = name if not resource_group_name: raise TypeError('Missing required property resource_group_name') __props__['resource_group_name']", "resource. At this time you cannot use a Network Security Group with in-line", "in conjunction with any Network Security Rule resources. Doing so will cause a", "in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing", "TypeError('Expected resource name to be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):", "of network security rules. Network security groups enable inbound or outbound traffic to", "__props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags'] = tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__,", "Groups and Network Security Rules:** Terraform currently provides both a standalone Network Security", "enable inbound or outbound traffic to be enabled or denied. ~> **NOTE on", "denied. ~> **NOTE on Network Security Groups and Network Security Rules:** Terraform currently", "__props__ = dict() if not location: raise TypeError('Missing required property location') __props__['location'] =", "raise TypeError('Missing required property resource_group_name') __props__['resource_group_name'] = resource_group_name __props__['security_rules'] = security_rules __props__['tags'] =", "= tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop)", "a conflict of rule settings and will overwrite rules. \"\"\" def __init__(__self__, __name__,", "Security Group resource. At this time you cannot use a Network Security Group", "for Network Security Rules to be defined in-line within the Network Security Group", "instance') __props__ = dict() if not location: raise TypeError('Missing required property location') __props__['location']", "rules. Network security groups enable inbound or outbound traffic to be enabled or", "options.\"\"\" if not __name__: raise TypeError('Missing resource name argument (for URN creation)') if", "tags super(NetworkSecurityGroup, __self__).__init__( 'azure:network/networkSecurityGroup:NetworkSecurityGroup', __name__, __props__, __opts__) def translate_output_property(self, prop): return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or", "a standalone Network Security Rule resource, and allows for Network Security Rules to", "with any Network Security Rule resources. Doing so will cause a conflict of", "Rule resource, and allows for Network Security Rules to be defined in-line within" ]
[ "or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index += 1 #breaks once all bullets", "return [] else: index = 0 while True: if len(bullets) == 0: break", "checks if bullets have crossed the display. If a bullet has passed the", "remove it #returns an empty list if the bullet list is empty if", "if index == len(bullets): break # returns empty list if all bullets have", "crossed # the game window if bullets[index][0] < 0 or bullets[index][0]> 900 or", "have been removed if len(bullets) == 0: return [] # returns the new", "0: return [] else: index = 0 while True: if len(bullets) == 0:", "display. If a bullet has passed the # current game window, remove it", "y coordinates (in relation to pixels) have crossed # the game window if", "if len(bullets) == 0: return [] # returns the new list of bullets", "else: index = 0 while True: if len(bullets) == 0: break # checks", "bullets have crossed the display. If a bullet has passed the # current", "game window if bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1]", "> 900: bullets.remove(bullets[index]) else: index += 1 #breaks once all bullets within list", "the display. If a bullet has passed the # current game window, remove", "or y coordinates (in relation to pixels) have crossed # the game window", "window, remove it #returns an empty list if the bullet list is empty", "relation to pixels) have crossed # the game window if bullets[index][0] < 0", "if bullet's x or y coordinates (in relation to pixels) have crossed #", "list has been checked if index == len(bullets): break # returns empty list", "within list has been checked if index == len(bullets): break # returns empty", "# current game window, remove it #returns an empty list if the bullet", "have crossed the display. If a bullet has passed the # current game", "900: bullets.remove(bullets[index]) else: index += 1 #breaks once all bullets within list has", "if the bullet list is empty if len(bullets) == 0: return [] else:", "checks if bullet's x or y coordinates (in relation to pixels) have crossed", "index = 0 while True: if len(bullets) == 0: break # checks if", "index == len(bullets): break # returns empty list if all bullets have been", "# checks if bullet's x or y coordinates (in relation to pixels) have", "# the game window if bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0", "bullets have been removed if len(bullets) == 0: return [] # returns the", "empty list if the bullet list is empty if len(bullets) == 0: return", "bullets_remove(bullets): # checks if bullets have crossed the display. If a bullet has", "== len(bullets): break # returns empty list if all bullets have been removed", "0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index", "window if bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] >", "empty list if all bullets have been removed if len(bullets) == 0: return", "been checked if index == len(bullets): break # returns empty list if all", "has been checked if index == len(bullets): break # returns empty list if", "else: index += 1 #breaks once all bullets within list has been checked", "been removed if len(bullets) == 0: return [] # returns the new list", "passed the # current game window, remove it #returns an empty list if", "is empty if len(bullets) == 0: return [] else: index = 0 while", "bullet has passed the # current game window, remove it #returns an empty", "it #returns an empty list if the bullet list is empty if len(bullets)", "if bullets have crossed the display. If a bullet has passed the #", "list is empty if len(bullets) == 0: return [] else: index = 0", "# checks if bullets have crossed the display. If a bullet has passed", "0 while True: if len(bullets) == 0: break # checks if bullet's x", "+= 1 #breaks once all bullets within list has been checked if index", "current game window, remove it #returns an empty list if the bullet list", "x or y coordinates (in relation to pixels) have crossed # the game", "[] else: index = 0 while True: if len(bullets) == 0: break #", "list if all bullets have been removed if len(bullets) == 0: return []", "bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index += 1", "== 0: return [] else: index = 0 while True: if len(bullets) ==", "if len(bullets) == 0: return [] else: index = 0 while True: if", "bullets[index][1] > 900: bullets.remove(bullets[index]) else: index += 1 #breaks once all bullets within", "== 0: break # checks if bullet's x or y coordinates (in relation", "or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index += 1 #breaks once", "If a bullet has passed the # current game window, remove it #returns", "# returns empty list if all bullets have been removed if len(bullets) ==", "bullet's x or y coordinates (in relation to pixels) have crossed # the", "has passed the # current game window, remove it #returns an empty list", "True: if len(bullets) == 0: break # checks if bullet's x or y", "1 #breaks once all bullets within list has been checked if index ==", "an empty list if the bullet list is empty if len(bullets) == 0:", "bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index])", "a bullet has passed the # current game window, remove it #returns an", "empty if len(bullets) == 0: return [] else: index = 0 while True:", "(in relation to pixels) have crossed # the game window if bullets[index][0] <", "bullet list is empty if len(bullets) == 0: return [] else: index =", "< 0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else:", "removed if len(bullets) == 0: return [] # returns the new list of", "game window, remove it #returns an empty list if the bullet list is", "all bullets have been removed if len(bullets) == 0: return [] # returns", "returns empty list if all bullets have been removed if len(bullets) == 0:", "<reponame>AnshDubey1999/Customized-Space-Shooter def bullets_remove(bullets): # checks if bullets have crossed the display. If a", "bullets within list has been checked if index == len(bullets): break # returns", "the game window if bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0 or", "once all bullets within list has been checked if index == len(bullets): break", "coordinates (in relation to pixels) have crossed # the game window if bullets[index][0]", "0: break # checks if bullet's x or y coordinates (in relation to", "break # returns empty list if all bullets have been removed if len(bullets)", "len(bullets) == 0: return [] else: index = 0 while True: if len(bullets)", "if len(bullets) == 0: break # checks if bullet's x or y coordinates", "bullets.remove(bullets[index]) else: index += 1 #breaks once all bullets within list has been", "pixels) have crossed # the game window if bullets[index][0] < 0 or bullets[index][0]>", "def bullets_remove(bullets): # checks if bullets have crossed the display. If a bullet", "while True: if len(bullets) == 0: break # checks if bullet's x or", "to pixels) have crossed # the game window if bullets[index][0] < 0 or", "len(bullets) == 0: break # checks if bullet's x or y coordinates (in", "= 0 while True: if len(bullets) == 0: break # checks if bullet's", "#returns an empty list if the bullet list is empty if len(bullets) ==", "if bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900:", "bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index += 1 #breaks once all", "len(bullets): break # returns empty list if all bullets have been removed if", "== 0: return [] # returns the new list of bullets else: return", "break # checks if bullet's x or y coordinates (in relation to pixels)", "all bullets within list has been checked if index == len(bullets): break #", "crossed the display. If a bullet has passed the # current game window,", "index += 1 #breaks once all bullets within list has been checked if", "len(bullets) == 0: return [] # returns the new list of bullets else:", "#breaks once all bullets within list has been checked if index == len(bullets):", "have crossed # the game window if bullets[index][0] < 0 or bullets[index][0]> 900", "or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index +=", "900 or bullets[index][1]<0 or bullets[index][1] > 900: bullets.remove(bullets[index]) else: index += 1 #breaks", "list if the bullet list is empty if len(bullets) == 0: return []", "if all bullets have been removed if len(bullets) == 0: return [] #", "0: return [] # returns the new list of bullets else: return bullets", "the # current game window, remove it #returns an empty list if the", "the bullet list is empty if len(bullets) == 0: return [] else: index", "checked if index == len(bullets): break # returns empty list if all bullets" ]
[]
[ "num + 1 print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \"))", "comun: Nombre Común :param orden: Orden :type n: int :type c: int :type", "num = 0 n=0 for n in range(24): \"\"\" La lista de los", "una función que despliega la lista de los 2 tipos de clases de", "int :type f: int :type g: int :type n_c: int :type o: int", "int :type c: int :type e: int :type f: int :type g: int", "Listas import os def clases(opc): \"\"\" Clases es una función que despliega la", ":type f: int :type g: int :type n_c: int :type o: int \"\"\"", "in range(24): \"\"\" La lista de los animales esta compuesto por: :param nombre:", "esta compuesto por: :param nombre: Nombre :param clase: Clase :param especie: Especie :param", "usuario todos los animales que hay en el parque y su taxonomía. \"\"\"", "e: int :type f: int :type g: int :type n_c: int :type o:", "+ \".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op - 1])", "clases\") num = 0 opm = int(input(\"Introduza una opcion: \")) if opm ==", "- 1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op -", "clase: Clase :param especie: Especie :param familia: Familia :param genero: Genero :param nombre", "especie: Especie :param familia: Familia :param genero: Genero :param nombre comun: Nombre Común", "print(Listas.orden[n]) return num if __name__ == '__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2.", "relacionado con el parque Nacional SUMMIT, mostrando al usuario todos los animales que", "tipos de clases de animales que hay en el parque, en este caso", "Nombre :param clase: Clase :param especie: Especie :param familia: Familia :param genero: Genero", "int :type e: int :type f: int :type g: int :type n_c: int", "su taxonomía. \"\"\" from app import Listas import os def clases(opc): \"\"\" Clases", ":param orden: Orden :type n: int :type c: int :type e: int :type", "= int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op", "1 print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op", "por clases\") num = 0 opm = int(input(\"Introduza una opcion: \")) if opm", "0 n=0 for n in range(24): \"\"\" La lista de los animales esta", "op = int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1])", "los 2 tipos de clases de animales que hay en el parque, en", "mostrando al usuario todos los animales que hay en el parque y su", "es una función que despliega la lista de los 2 tipos de clases", "Común :param orden: Orden :type n: int :type c: int :type e: int", "Lista de animales\") print(\"2. Lista de animales por clases\") num = 0 opm", "== \"Mammalia\": num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n])", "función que despliega la lista de los 2 tipos de clases de animales", "== 'Aves': num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n])", "SUMMIT, mostrando al usuario todos los animales que hay en el parque y", "0 for n in range(24): num = num + 1 print(str(num) + \".\",", ":param clase: Clase :param especie: Especie :param familia: Familia :param genero: Genero :param", "num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif", "\".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ == '__main__':", "La lista de los animales esta compuesto por: :param nombre: Nombre :param clase:", "__name__ == '__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista de animales por", ":type e: int :type f: int :type g: int :type n_c: int :type", "num = num + 1 print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca una", "print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc = int(input(\"Introduzca una opcion:", "1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\")", "1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc = int(input(\"Introduzca una opcion: \")) clases(opc)", "o: int \"\"\" if opc == 1: if Listas.clase[n] == 'Aves': num =", "de animales por clases\") num = 0 opm = int(input(\"Introduza una opcion: \"))", "num if __name__ == '__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista de", "print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else:", "print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ == '__main__': print(\"Menu\") print(\"1. Lista de animales\")", "nombre comun: Nombre Común :param orden: Orden :type n: int :type c: int", "los animales esta compuesto por: :param nombre: Nombre :param clase: Clase :param especie:", "Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if Listas.clase[n] ==", "== 2: if Listas.clase[n] == \"Mammalia\": num = num + 1 print(str(num) +", "este caso Aves y Mamalia \"\"\" num = 0 n=0 for n in", "y Mamalia \"\"\" num = 0 n=0 for n in range(24): \"\"\" La", "'Aves': num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n])", "print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op -", "for n in range(24): num = num + 1 print(str(num) + \".\", Listas.nombre[n])", "Mamalia \"\"\" num = 0 n=0 for n in range(24): \"\"\" La lista", "Nacional SUMMIT, mostrando al usuario todos los animales que hay en el parque", "for n in range(24): \"\"\" La lista de los animales esta compuesto por:", "de los animales esta compuesto por: :param nombre: Nombre :param clase: Clase :param", "print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__", "if opc == 1: if Listas.clase[n] == 'Aves': num = num + 1", "orden: Orden :type n: int :type c: int :type e: int :type f:", "hay en el parque y su taxonomía. \"\"\" from app import Listas import", "1: n = 0 for n in range(24): num = num + 1", "int \"\"\" if opc == 1: if Listas.clase[n] == 'Aves': num = num", "print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2.", "print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ == '__main__': print(\"Menu\") print(\"1. Lista", "if Listas.clase[n] == \"Mammalia\": num = num + 1 print(str(num) + \".\", Listas.nombre[n])", "n: int :type c: int :type e: int :type f: int :type g:", "es un programa relacionado con el parque Nacional SUMMIT, mostrando al usuario todos", "clases de animales que hay en el parque, en este caso Aves y", "if opm == 1: n = 0 for n in range(24): num =", "\"Mammalia\": num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n])", "\"\"\" Project Summit es un programa relacionado con el parque Nacional SUMMIT, mostrando", "print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if Listas.clase[n] == \"Mammalia\":", "en este caso Aves y Mamalia \"\"\" num = 0 n=0 for n", "2 tipos de clases de animales que hay en el parque, en este", "int(input(\"Introduza una opcion: \")) if opm == 1: n = 0 for n", "opc == 2: if Listas.clase[n] == \"Mammalia\": num = num + 1 print(str(num)", "= num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n])", "Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ == '__main__': print(\"Menu\")", "print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ == '__main__': print(\"Menu\") print(\"1.", "parque, en este caso Aves y Mamalia \"\"\" num = 0 n=0 for", "if Listas.clase[n] == 'Aves': num = num + 1 print(str(num) + \".\", Listas.nombre[n])", "print(Listas.orden[n]) elif opc == 2: if Listas.clase[n] == \"Mammalia\": num = num +", "la lista de los 2 tipos de clases de animales que hay en", "Especie :param familia: Familia :param genero: Genero :param nombre comun: Nombre Común :param", "print(\"2. Lista de animales por clases\") num = 0 opm = int(input(\"Introduza una", "parque Nacional SUMMIT, mostrando al usuario todos los animales que hay en el", "opc == 1: if Listas.clase[n] == 'Aves': num = num + 1 print(str(num)", "- 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc = int(input(\"Introduzca una opcion: \"))", "print(\"1. Lista de animales\") print(\"2. Lista de animales por clases\") num = 0", "g: int :type n_c: int :type o: int \"\"\" if opc == 1:", "== 1: if Listas.clase[n] == 'Aves': num = num + 1 print(str(num) +", "= 0 for n in range(24): num = num + 1 print(str(num) +", "el parque Nacional SUMMIT, mostrando al usuario todos los animales que hay en", "nombre: Nombre :param clase: Clase :param especie: Especie :param familia: Familia :param genero:", ":param nombre comun: Nombre Común :param orden: Orden :type n: int :type c:", "Listas.clase[n] == 'Aves': num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n])", "n in range(24): num = num + 1 print(str(num) + \".\", Listas.nombre[n]) op", "print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc =", ":param familia: Familia :param genero: Genero :param nombre comun: Nombre Común :param orden:", "1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc", "= 0 opm = int(input(\"Introduza una opcion: \")) if opm == 1: n", "Familia :param genero: Genero :param nombre comun: Nombre Común :param orden: Orden :type", "num = 0 opm = int(input(\"Introduza una opcion: \")) if opm == 1:", "animales que hay en el parque, en este caso Aves y Mamalia \"\"\"", "animales esta compuesto por: :param nombre: Nombre :param clase: Clase :param especie: Especie", "print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if Listas.clase[n] == \"Mammalia\": num = num", "- 1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op -", "1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc = int(input(\"Introduzca una", ":param genero: Genero :param nombre comun: Nombre Común :param orden: Orden :type n:", "el parque, en este caso Aves y Mamalia \"\"\" num = 0 n=0", ":type c: int :type e: int :type f: int :type g: int :type", "opm == 1: n = 0 for n in range(24): num = num", "una opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1])", "Clase :param especie: Especie :param familia: Familia :param genero: Genero :param nombre comun:", "= num + 1 print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion:", "print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista de animales por clases\") num =", "int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op -", "'__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista de animales por clases\") num", "elif opc == 2: if Listas.clase[n] == \"Mammalia\": num = num + 1", "el parque y su taxonomía. \"\"\" from app import Listas import os def", "1: if Listas.clase[n] == 'Aves': num = num + 1 print(str(num) + \".\",", "una opcion: \")) if opm == 1: n = 0 for n in", "Nombre Común :param orden: Orden :type n: int :type c: int :type e:", "todos los animales que hay en el parque y su taxonomía. \"\"\" from", "despliega la lista de los 2 tipos de clases de animales que hay", "import os def clases(opc): \"\"\" Clases es una función que despliega la lista", "Summit es un programa relacionado con el parque Nacional SUMMIT, mostrando al usuario", "taxonomía. \"\"\" from app import Listas import os def clases(opc): \"\"\" Clases es", "2: if Listas.clase[n] == \"Mammalia\": num = num + 1 print(str(num) + \".\",", "lista de los animales esta compuesto por: :param nombre: Nombre :param clase: Clase", "num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n])", "de los 2 tipos de clases de animales que hay en el parque,", ":type g: int :type n_c: int :type o: int \"\"\" if opc ==", "Genero :param nombre comun: Nombre Común :param orden: Orden :type n: int :type", "\".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op", "opcion: \")) if opm == 1: n = 0 for n in range(24):", "c: int :type e: int :type f: int :type g: int :type n_c:", "n=0 for n in range(24): \"\"\" La lista de los animales esta compuesto", "opm = int(input(\"Introduza una opcion: \")) if opm == 1: n = 0", "Aves y Mamalia \"\"\" num = 0 n=0 for n in range(24): \"\"\"", "\"\"\" Clases es una función que despliega la lista de los 2 tipos", "print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ == '__main__': print(\"Menu\") print(\"1. Lista de", "- 1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op -", "+ 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc", "print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if Listas.clase[n] == \"Mammalia\": num", "n_c: int :type o: int \"\"\" if opc == 1: if Listas.clase[n] ==", "de animales\") print(\"2. Lista de animales por clases\") num = 0 opm =", "= int(input(\"Introduza una opcion: \")) if opm == 1: n = 0 for", ":type n: int :type c: int :type e: int :type f: int :type", "import Listas import os def clases(opc): \"\"\" Clases es una función que despliega", "n in range(24): \"\"\" La lista de los animales esta compuesto por: :param", "os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1])", "hay en el parque, en este caso Aves y Mamalia \"\"\" num =", "if __name__ == '__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista de animales", "print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op", "int :type g: int :type n_c: int :type o: int \"\"\" if opc", "in range(24): num = num + 1 print(str(num) + \".\", Listas.nombre[n]) op =", "print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2:", "f: int :type g: int :type n_c: int :type o: int \"\"\" if", "los animales que hay en el parque y su taxonomía. \"\"\" from app", "1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1])", "que hay en el parque, en este caso Aves y Mamalia \"\"\" num", "- 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else: os.system('cls')", "1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op - 1])", "- 1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\") opc = int(input(\"Introduzca", "compuesto por: :param nombre: Nombre :param clase: Clase :param especie: Especie :param familia:", ":type n_c: int :type o: int \"\"\" if opc == 1: if Listas.clase[n]", "\"\"\" from app import Listas import os def clases(opc): \"\"\" Clases es una", "print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if Listas.clase[n] == \"Mammalia\": num =", "de animales que hay en el parque, en este caso Aves y Mamalia", "range(24): num = num + 1 print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca", "Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op -", "0 opm = int(input(\"Introduza una opcion: \")) if opm == 1: n =", "Orden :type n: int :type c: int :type e: int :type f: int", "== '__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista de animales por clases\")", "y su taxonomía. \"\"\" from app import Listas import os def clases(opc): \"\"\"", "- 1]) print(Listas.nombre_comun[op - 1]) print(Listas.orden[op - 1]) else: os.system('cls') print(\"1.Aves\") print(\"2. Mammalia\")", "app import Listas import os def clases(opc): \"\"\" Clases es una función que", "+ \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if", "print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1]) print(Listas.nombre_comun[op", "1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if", "animales que hay en el parque y su taxonomía. \"\"\" from app import", "n = 0 for n in range(24): num = num + 1 print(str(num)", "de clases de animales que hay en el parque, en este caso Aves", "en el parque, en este caso Aves y Mamalia \"\"\" num = 0", "programa relacionado con el parque Nacional SUMMIT, mostrando al usuario todos los animales", "en el parque y su taxonomía. \"\"\" from app import Listas import os", "que despliega la lista de los 2 tipos de clases de animales que", "\"\"\" num = 0 n=0 for n in range(24): \"\"\" La lista de", "= 0 n=0 for n in range(24): \"\"\" La lista de los animales", "Lista de animales por clases\") num = 0 opm = int(input(\"Introduza una opcion:", "os def clases(opc): \"\"\" Clases es una función que despliega la lista de", "+ 1 print(str(num) + \".\", Listas.nombre[n]) op = int(input(\"Introduzca una opcion: \")) os.system('cls')", ":param especie: Especie :param familia: Familia :param genero: Genero :param nombre comun: Nombre", "print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op", "from app import Listas import os def clases(opc): \"\"\" Clases es una función", "al usuario todos los animales que hay en el parque y su taxonomía.", "\"\"\" if opc == 1: if Listas.clase[n] == 'Aves': num = num +", "por: :param nombre: Nombre :param clase: Clase :param especie: Especie :param familia: Familia", "lista de los 2 tipos de clases de animales que hay en el", ":param nombre: Nombre :param clase: Clase :param especie: Especie :param familia: Familia :param", "def clases(opc): \"\"\" Clases es una función que despliega la lista de los", "clases(opc): \"\"\" Clases es una función que despliega la lista de los 2", "int :type n_c: int :type o: int \"\"\" if opc == 1: if", "\".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc == 2: if Listas.clase[n]", "+ \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num if __name__ ==", "\")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op -", "parque y su taxonomía. \"\"\" from app import Listas import os def clases(opc):", "1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op - 1]) print(Listas.genero[op - 1])", "opcion: \")) os.system('cls') print(Listas.nombre[op - 1]) print(Listas.clase[op - 1]) print(Listas.especies[op - 1]) print(Listas.familia[op", "familia: Familia :param genero: Genero :param nombre comun: Nombre Común :param orden: Orden", "\")) if opm == 1: n = 0 for n in range(24): num", "animales\") print(\"2. Lista de animales por clases\") num = 0 opm = int(input(\"Introduza", "\"\"\" La lista de los animales esta compuesto por: :param nombre: Nombre :param", "que hay en el parque y su taxonomía. \"\"\" from app import Listas", "range(24): \"\"\" La lista de los animales esta compuesto por: :param nombre: Nombre", "int :type o: int \"\"\" if opc == 1: if Listas.clase[n] == 'Aves':", "== 1: n = 0 for n in range(24): num = num +", "con el parque Nacional SUMMIT, mostrando al usuario todos los animales que hay", "animales por clases\") num = 0 opm = int(input(\"Introduza una opcion: \")) if", "caso Aves y Mamalia \"\"\" num = 0 n=0 for n in range(24):", "genero: Genero :param nombre comun: Nombre Común :param orden: Orden :type n: int", "num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return", "+ 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) return num", "Project Summit es un programa relacionado con el parque Nacional SUMMIT, mostrando al", "un programa relacionado con el parque Nacional SUMMIT, mostrando al usuario todos los", "1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n]) print(Listas.familia[n]) print(Listas.genero[n]) print(Listas.nombre_comun[n]) print(Listas.orden[n]) elif opc ==", "Listas.clase[n] == \"Mammalia\": num = num + 1 print(str(num) + \".\", Listas.nombre[n]) print(Listas.especies[n])", "return num if __name__ == '__main__': print(\"Menu\") print(\"1. Lista de animales\") print(\"2. Lista", ":type o: int \"\"\" if opc == 1: if Listas.clase[n] == 'Aves': num", "Clases es una función que despliega la lista de los 2 tipos de" ]
[ "class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a time there were three little sisters;", "href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and", "well.</p> <p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4", "import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import BeautifulSoup soup", "dados = r.text from bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title)", "a time there were three little sisters; and their names were <a href=\"http://example.com/elsie\"", "= \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon", "requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's", "of a well.</p> <p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text", "print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The", "= BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo =", "BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='')", "upon a time there were three little sisters; and their names were <a", "three little sisters; and their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\"", "requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify())", "import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(),", "3 - WebScraping/Teste2.py '''import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc", "<html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a time", "Dormouse's story</b></p> <p class=\"story\">Once upon a time there were three little sisters; and", "lived at the bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import requests r =", "names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\"", "<body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a time there were three", "class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import BeautifulSoup", "r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import BeautifulSoup soup = BeautifulSoup(dados,", "their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a", "print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+') arquivo.write(soup.get_text()) import webbrowser", "they lived at the bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import requests r", "bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)'''", "<filename>Python 3 - WebScraping/Teste2.py '''import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)'''", "class=\"story\">Once upon a time there were three little sisters; and their names were", "id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the bottom of", "href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the bottom of a well.</p> <p", "class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the bottom", "id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived", "id=\"link3\">Tillie</a>; and they lived at the bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import", "= requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser')", "time there were three little sisters; and their names were <a href=\"http://example.com/elsie\" class=\"sister\"", "and they lived at the bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import requests", "story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a time there were", "'''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+') arquivo.write(soup.get_text()) import", "- WebScraping/Teste2.py '''import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc =", "bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados =", "class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the bottom of a well.</p> <p class=\"story\">...</p>\"\"\"", "r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head>", "from bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name)", "print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p", "were three little sisters; and their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a", "Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a time there", "<p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a time there were three little", "\"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once upon a", "story</b></p> <p class=\"story\">Once upon a time there were three little sisters; and their", "print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+') arquivo.write(soup.get_text()) import webbrowser webbrowser.open('www.google.com',", "sisters; and their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a>", "there were three little sisters; and their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>,", "requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p", "were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\"", "<a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>;", "soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo", "r.text from bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string)", "<a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at", "a well.</p> <p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from", "'''import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The", "little sisters; and their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\"", "print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+') arquivo.write(soup.get_text()) import webbrowser webbrowser.open('www.google.com', new=0, autoraise=True)", "and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the bottom of a", "WebScraping/Teste2.py '''import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\"", "BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+')", "requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import BeautifulSoup soup =", "and their names were <a href=\"http://example.com/elsie\" class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and", "print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+') arquivo.write(soup.get_text()) import webbrowser webbrowser.open('www.google.com', new=0,", "html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p> <p class=\"story\">Once", "<a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the bottom of a well.</p>", "at the bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp')", "the bottom of a well.</p> <p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados", "<p class=\"story\">Once upon a time there were three little sisters; and their names", "print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's", "= requests.get('https://www.investopedia.com/terms/f/forex.asp') print(r.text) print(r.encoding) print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body>", "'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name) print(soup.title.string) print(soup.title.parent.name) print(soup.p)''' #print(soup.get_text(), end='') arquivo = open('Teste.txt','w+') arquivo.write(soup.get_text())", "print(r.headers) print(r.status_code)''' html_doc = \"\"\" <html><head><title>The Dormouse's story</title></head> <body> <p class=\"title\"><b>The Dormouse's story</b></p>", "<p class=\"story\">...</p>\"\"\" import requests r = requests.get('https://www.investopedia.com/terms/f/forex.asp') dados = r.text from bs4 import", "href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they lived at the", "class=\"sister\" id=\"link1\">Elsie</a>, <a href=\"http://example.com/lacie\" class=\"sister\" id=\"link2\">Lacie</a> and <a href=\"http://example.com/tillie\" class=\"sister\" id=\"link3\">Tillie</a>; and they", "= r.text from bs4 import BeautifulSoup soup = BeautifulSoup(dados, 'html.parser') '''print(soup.prettify()) print(soup.title) print(soup.title.name)" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # #", "= is_alive or each.is_alive() timeout = (time.time() - begin) if timeout >= 5:", ">= 5: break processes = [] for each in processes: each.start() is_alive =", "License. # You may obtain a copy of the License at # #", "5: break processes = [] for each in processes: each.start() is_alive = True", "under the License. import multiprocessing import time class MultipleProcess(object): @staticmethod def process(func, *args):", "= [args[0][node]] + [arg for arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p)", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "+ [arg for arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes)", "class MultipleProcess(object): @staticmethod def process(func, *args): processes = [] for node in args[0]:", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= time.time() while is_alive: is_alive = False for each in processes: is_alive =", "this file except in compliance with the License. # You may obtain a", "processes: each.start() begin = time.time() while is_alive: is_alive = False for each in", "for each in processes: each.start() is_alive = True while is_alive: is_alive = False", "4: is_alive = True for each in processes: each.start() begin = time.time() while", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "the specific language governing permissions and # limitations under the License. import multiprocessing", "*args): processes = [] for node in args[0]: new_args = [args[0][node]] + [arg", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "[args[0][node]] + [arg for arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if", "# You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0", "processes: each.start() is_alive = True while is_alive: is_alive = False for each in", "# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "(time.time() - begin) if timeout >= 5: break processes = [] for each", "arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive", "Google LLC # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "permissions and # limitations under the License. import multiprocessing import time class MultipleProcess(object):", "in compliance with the License. # You may obtain a copy of the", "args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive = True for each in processes:", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the", "use this file except in compliance with the License. # You may obtain", "is_alive = is_alive or each.is_alive() timeout = (time.time() - begin) if timeout >=", "not use this file except in compliance with the License. # You may", "= True while is_alive: is_alive = False for each in processes: is_alive =", "License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0", "args[0]: new_args = [args[0][node]] + [arg for arg in args[1:]] p = multiprocessing.Process(target=func,", "timeout = (time.time() - begin) if timeout >= 5: break processes = []", "See the License for the specific language governing permissions and # limitations under", "[arg for arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) ==", "is_alive = False for each in processes: is_alive = is_alive or each.is_alive() timeout", "language governing permissions and # limitations under the License. import multiprocessing import time", "You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 #", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "specific language governing permissions and # limitations under the License. import multiprocessing import", "License, Version 2.0 (the \"License\"); # you may not use this file except", "limitations under the License. import multiprocessing import time class MultipleProcess(object): @staticmethod def process(func,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "time.time() while is_alive: is_alive = False for each in processes: is_alive = is_alive", "copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "processes.append(p) if len(processes) == 4: is_alive = True for each in processes: each.start()", "is_alive = True while is_alive: is_alive = False for each in processes: is_alive", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "OF ANY KIND, either express or implied. # See the License for the", "[] for each in processes: each.start() is_alive = True while is_alive: is_alive =", "2.0 (the \"License\"); # you may not use this file except in compliance", "in processes: each.start() is_alive = True while is_alive: is_alive = False for each", "# # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "each.is_alive() timeout = (time.time() - begin) if timeout >= 5: break processes =", "https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "# you may not use this file except in compliance with the License.", "of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for each in processes: each.start() begin = time.time() while is_alive: is_alive = False", "agreed to in writing, software # distributed under the License is distributed on", "def process(func, *args): processes = [] for node in args[0]: new_args = [args[0][node]]", "== 4: is_alive = True for each in processes: each.start() begin = time.time()", "timeout >= 5: break processes = [] for each in processes: each.start() is_alive", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "for arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4:", "each.start() begin = time.time() while is_alive: is_alive = False for each in processes:", "obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless", "(the \"License\"); # you may not use this file except in compliance with", "if len(processes) == 4: is_alive = True for each in processes: each.start() begin", "- begin) if timeout >= 5: break processes = [] for each in", "each in processes: each.start() is_alive = True while is_alive: is_alive = False for", "# # Unless required by applicable law or agreed to in writing, software", "and # limitations under the License. import multiprocessing import time class MultipleProcess(object): @staticmethod", "True while is_alive: is_alive = False for each in processes: is_alive = is_alive", "express or implied. # See the License for the specific language governing permissions", "each in processes: each.start() begin = time.time() while is_alive: is_alive = False for", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "[] for node in args[0]: new_args = [args[0][node]] + [arg for arg in", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "while is_alive: is_alive = False for each in processes: is_alive = is_alive or", "LLC # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in args[0]: new_args = [args[0][node]] + [arg for arg in args[1:]] p =", "in processes: is_alive = is_alive or each.is_alive() timeout = (time.time() - begin) if", "= True for each in processes: each.start() begin = time.time() while is_alive: is_alive", "or each.is_alive() timeout = (time.time() - begin) if timeout >= 5: break processes", "either express or implied. # See the License for the specific language governing", "= [] for node in args[0]: new_args = [args[0][node]] + [arg for arg", "processes: is_alive = is_alive or each.is_alive() timeout = (time.time() - begin) if timeout", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "each.start() is_alive = True while is_alive: is_alive = False for each in processes:", "multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive = True for each in", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "the License. import multiprocessing import time class MultipleProcess(object): @staticmethod def process(func, *args): processes", "at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "governing permissions and # limitations under the License. import multiprocessing import time class", "for each in processes: is_alive = is_alive or each.is_alive() timeout = (time.time() -", "License. import multiprocessing import time class MultipleProcess(object): @staticmethod def process(func, *args): processes =", "import time class MultipleProcess(object): @staticmethod def process(func, *args): processes = [] for node", "# Copyright 2019 Google LLC # # Licensed under the Apache License, Version", "file except in compliance with the License. # You may obtain a copy", "# limitations under the License. import multiprocessing import time class MultipleProcess(object): @staticmethod def", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "False for each in processes: is_alive = is_alive or each.is_alive() timeout = (time.time()", "node in args[0]: new_args = [args[0][node]] + [arg for arg in args[1:]] p", "License for the specific language governing permissions and # limitations under the License.", "a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "len(processes) == 4: is_alive = True for each in processes: each.start() begin =", "the License. # You may obtain a copy of the License at #", "= (time.time() - begin) if timeout >= 5: break processes = [] for", "if timeout >= 5: break processes = [] for each in processes: each.start()", "to in writing, software # distributed under the License is distributed on an", "= multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive = True for each", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "is_alive: is_alive = False for each in processes: is_alive = is_alive or each.is_alive()", "= False for each in processes: is_alive = is_alive or each.is_alive() timeout =", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "processes = [] for each in processes: each.start() is_alive = True while is_alive:", "implied. # See the License for the specific language governing permissions and #", "import multiprocessing import time class MultipleProcess(object): @staticmethod def process(func, *args): processes = []", "= [] for each in processes: each.start() is_alive = True while is_alive: is_alive", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "required by applicable law or agreed to in writing, software # distributed under", "@staticmethod def process(func, *args): processes = [] for node in args[0]: new_args =", "applicable law or agreed to in writing, software # distributed under the License", "begin = time.time() while is_alive: is_alive = False for each in processes: is_alive", "break processes = [] for each in processes: each.start() is_alive = True while", "is_alive = True for each in processes: each.start() begin = time.time() while is_alive:", "args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive = True", "p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive = True for", "time class MultipleProcess(object): @staticmethod def process(func, *args): processes = [] for node in", "True for each in processes: each.start() begin = time.time() while is_alive: is_alive =", "or agreed to in writing, software # distributed under the License is distributed", "multiprocessing import time class MultipleProcess(object): @staticmethod def process(func, *args): processes = [] for", "processes = [] for node in args[0]: new_args = [args[0][node]] + [arg for", "or implied. # See the License for the specific language governing permissions and", "for node in args[0]: new_args = [args[0][node]] + [arg for arg in args[1:]]", "in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,)) processes.append(p) if len(processes) == 4: is_alive =", "<filename>silk/utils/multipleprocess.py # Copyright 2019 Google LLC # # Licensed under the Apache License,", "new_args = [args[0][node]] + [arg for arg in args[1:]] p = multiprocessing.Process(target=func, args=(new_args,))", "each in processes: is_alive = is_alive or each.is_alive() timeout = (time.time() - begin)", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "begin) if timeout >= 5: break processes = [] for each in processes:", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "in processes: each.start() begin = time.time() while is_alive: is_alive = False for each", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "process(func, *args): processes = [] for node in args[0]: new_args = [args[0][node]] +", "with the License. # You may obtain a copy of the License at", "MultipleProcess(object): @staticmethod def process(func, *args): processes = [] for node in args[0]: new_args", "in writing, software # distributed under the License is distributed on an \"AS", "is_alive or each.is_alive() timeout = (time.time() - begin) if timeout >= 5: break", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "for position in positions if position.side == \"short\" ] # Compute how much", "None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self):", "time.sleep(0.1) except Exception as e: logger.error(\"[!] Error: \", exc_info=True) raise Exception if __name__", "self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1)", "commission[\"spread\"]) ) / (1 - (commission[\"exit\"] + commission[\"spread\"])) ) if position.side == \"short\":", "= self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [ position for position", "True: assert self.order_criterion == \"capital\" def _set_params(self): # Set params which has dependency", "ordered is None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+]", "axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return inputs, ids", "last_trade_on is None: return True else: if int((now - last_trade_on).total_seconds() // 60) >=", "(pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <=", "self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\":", "+ 1 + (commission[\"entry\"] + commission[\"spread\"]) ) / (1 - (commission[\"exit\"] + commission[\"spread\"]))", "base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin in self.tradable_coins:", "from dataclasses import dataclass from config import CFG from trainer.models import PredictorV1 from", "Entry with capital base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) ) # Handle", "predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else:", ") if ordered is None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved is True:", "self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is not True: commission[\"entry\"] = 0 commission[\"exit\"]", "predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start: demon of trader\") n_traded = 0", ") positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [ position", "\"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, )", "int((now - last_sync_on).total_seconds() // 60) if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if", "None if self.skip_executable_order_check is True: assert self.order_criterion == \"capital\" def _set_params(self): # Set", "Set init to handle limit order self.assets_to_limit_order = [] # Entry order if", "): return True return False def check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount", "= last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on", "# Currently update_position_if_already_have is not supported. already_have = self.check_if_already_have( positions=positions, position=position ) if", "return True else: if int((now - last_trade_on).total_seconds() // 60) >= 1: return True", "= CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [", "Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)},", "positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ): # Set init to handle", "= 0 if position.side == \"long\": assert prediction >= 0 price_to_achieve = (", "= CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"]", "not in self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty,", "time import ccxt import requests import urllib3 import joblib import pandas as pd", "self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[", "self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded", "= getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase() possible_in_debt", "positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order if len(self.assets_to_limit_order) >", "<filename>services/src/trader/trader_v1.py import os import gc import time import ccxt import requests import urllib3", "# Set init to handle limit order self.assets_to_limit_order = [] # Entry order", "== 0: return # if opposite position exists, we dont entry if (", "# Set data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"),", "self.order_criterion == \"cache\": if cache > 0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio)", "import os import gc import time import ccxt import requests import urllib3 import", "self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp,", "return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 +", "now: pd.Timestamp): if last_sync_on is None: return False sync_min_delta = int((now - last_sync_on).total_seconds()", "probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\"))", "if position.is_exited is not True ] return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side):", "CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction =", "positions, order_asset, order_side): if order_side == \"long\": opposite_side = \"short\" if order_side ==", "self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH)", "for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if", "(position.side == \"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit:", "if n_traded == 60: self.custom_cli = CustomClient() n_traded = 0 # Main try:", "features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids", "continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price", "= { key.replace(\"-\", \"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] =", "supported. already_have = self.check_if_already_have( positions=positions, position=position ) if already_have is True: self.last_entry_at[position.asset] =", "( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index]", "# Entry with capital base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) ) #", "} def _set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True self.entry_ratio =", "Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is not supported.", "{} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"]", ") / (1 + (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions,", "CFG from trainer.models import PredictorV1 from database.usecase import Usecase from exchange.custom_client import CustomClient", "is None: return False sync_min_delta = int((now - last_sync_on).total_seconds() // 60) if sync_min_delta", "else: # Get extra 1 candle, cause it has potential to be changed.", "= Usecase() possible_in_debt = False commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004}", "in self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve(", "return entry_price = pricing[asset] qty = cache_to_order / entry_price position = Position( asset=asset,", "self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs", "is True: self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position) if executable_order is True:", "= {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True # To prevent", "pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing", "0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins = None", "self.position_side in (\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order,", "target_coin in self.tradable_coins ] return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on", "positions: if (exist_position.asset == position.asset) and ( exist_position.side == position.side ): return True", ") if position.side == \"short\": assert prediction <= 0 price_to_achieve = ( entry_price", "self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins = None if", "else: if int((now - last_trade_on).total_seconds() // 60) >= 1: return True return False", "= pricing[asset] qty = cache_to_order / entry_price position = Position( asset=asset, side=side, qty=qty,", "self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle exit signal", "= bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position,", "self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if", "build_positive_and_negative_assets(self, pred_dict): # Set assets which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >=", "= last_sync_on if self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else:", "if self.order_criterion == \"cache\": if cache > 0: cache_to_order = nan_to_zero( value=(cache *", "position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order +", "= build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin in self.tradable_coins: to_input", "assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions,", "pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) (", "opposite_side = \"long\" for exist_position in positions: if (exist_position.asset == order_asset) and (", "self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features):", "Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset] is not None:", "build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin in self.tradable_coins: to_input =", "if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue", "end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ]", "nan_to_zero( value=(capital * self.entry_ratio) ) # Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions,", "logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side == \"short\") and (position.asset in positive_assets):", "\"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is True:", "pricing, ] ).sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features =", "target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1)", "accept False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS", "return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features = build_X_and_BX(", "predictions=None): if predictions is not None: prediction = predictions[position.asset] else: if position.side ==", "= self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists, we skip it. if len(orders)", "== \"long\": opposite_side = \"short\" if order_side == \"short\": opposite_side = \"long\" for", "opposite_side ): return True return False def compute_cost_to_order(self, position): cache_to_order = position.entry_price *", "database.usecase import Usecase from exchange.custom_client import CustomClient from .utils import nan_to_zero from logging", "] return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is", "{capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is", "+ self.commission[\"spread\"] ) return cache_to_order + commission_to_order def check_if_already_have(self, positions, position): for exist_position", "1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None: return True else: if int((now", "self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None:", "feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"],", "column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] =", "probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins = None", "= [ position for position in positions if position.side == \"long\" ] short_positions", "it has potential to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing", "urllib3 import joblib import pandas as pd import numpy as np from dataclasses", "import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase", "+ 1 - (commission[\"entry\"] + commission[\"spread\"]) ) / (1 + (commission[\"exit\"] + commission[\"spread\"]))", "( (prediction * self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"]) ) / (1", "assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert", "else: cache_to_order = 0 elif self.order_criterion == \"capital\": # Entry with capital base", "= PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def", "logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now,", "if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at = {key:", "Exit: {str(position)}, opposite\") continue # Delete exited positions positions = [ position for", "self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[", "is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets,", "self.order_criterion == \"capital\" def _set_params(self): # Set params which has dependency on trader", "(position.side == \"long\") and (position.asset in positive_assets): continue if (position.side == \"short\") and", "prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -(", "= (now - position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes:", "/ (1 - (commission[\"exit\"] + commission[\"spread\"])) ) if position.side == \"short\": assert prediction", "assert self.order_criterion == \"capital\" def _set_params(self): # Set params which has dependency on", "= pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on)", "entry_price, predictions=None): if predictions is not None: prediction = predictions[position.asset] else: if position.side", "= probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins =", "Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order = self.entry_ratio else: if", "self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing = pricing pricing =", "CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\",", "# Keep position if matched if (position.side == \"long\") and (position.asset in positive_assets):", "\"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for", "for target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0,", "= CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission =", "from .utils import nan_to_zero from logging import getLogger from common_utils_svc import initialize_trader_logger, Position", "if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index]", "order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start:", "== \"short\": opposite_side = \"long\" for exist_position in positions: if (exist_position.asset == order_asset)", "price_to_achieve def entry_order(self, positions, asset, side, cache_to_order, pricing, now): if cache_to_order == 0:", "(prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None", "position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None):", "CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins =", ") return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets which has signals positive_assets", "return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is None:", "which has dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio", "on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] *", "== \"short\": assert prediction <= 0 price_to_achieve = ( entry_price * ( (prediction", "def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if", "positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle exit signal if", "= ( entry_price * ( (prediction * self.achieve_ratio) + 1 + (commission[\"entry\"] +", "def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) #", "candle, cause it has potential to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on", ">= 1: return True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order(", "* self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"]) ) / (1 + (commission[\"exit\"]", "CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins,", "True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all()", "if (position.side == \"long\") and (position.asset in positive_assets): continue if (position.side == \"short\")", "To prevent api limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params()", "CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we", "isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] *", "not supported. already_have = self.check_if_already_have( positions=positions, position=position ) if already_have is True: self.last_entry_at[position.asset]", "label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def", "max_holding\") continue # Handle exit signal if (position.side == \"long\") and (position.asset in", "= pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs,", "def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ): # Set", "in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins", "= 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins", "cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\"", "entry_price * ( (prediction * self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"]) )", "None: prediction = predictions[position.asset] else: if position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if", "self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) &", "Handle relogin if n_traded == 60: self.custom_cli = CustomClient() n_traded = 0 #", "prediction = predictions[position.asset] else: if position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if position.side", "limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins,", "already limit order exists, we skip it. if len(orders) >= 1: continue assert", "cache > 0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order =", "continue # Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-]", "prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins =", "orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists, we skip it. if", "= self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] )", "self.positive_probability_bins = None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold:", "changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[ query_start_on", "is True: assert self.order_criterion == \"capital\" def _set_params(self): # Set params which has", "| Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False:", "exchange.custom_client import CustomClient from .utils import nan_to_zero from logging import getLogger from common_utils_svc", "= {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ]", "import time import ccxt import requests import urllib3 import joblib import pandas as", "order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital:", "if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True ): return entry_price =", "[ position for position in positions if position.is_exited is not True ] return", "+ (commission[\"entry\"] + commission[\"spread\"]) ) / (1 - (commission[\"exit\"] + commission[\"spread\"])) ) if", "0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset not in self.assets_to_limit_order:", "LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase() possible_in_debt = False commission", "- 1) ) query_end_on = last_sync_on if self.cached_pricing is None: pricing = self.usecase.get_pricing(", "(pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self,", "import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\"", "initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase() possible_in_debt = False", "for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is True: assert", "probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if", "is not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at", "\"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\")", "/ (1 + (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions, asset,", "if order_side == \"long\": opposite_side = \"short\" if order_side == \"short\": opposite_side =", "= DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR,", "dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH =", "# Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except Exception", ") # Limit order if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position", "nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order = 0 elif self.order_criterion == \"capital\":", "if self.skip_executable_order_check is True: assert self.order_criterion == \"capital\" def _set_params(self): # Set params", "self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions", "= 0 while True: # Handle relogin if n_traded == 60: self.custom_cli =", "\"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, )", "probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = (", ") else: # Get extra 1 candle, cause it has potential to be", "= np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin]", "(1 + (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions, asset, side,", "for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for", "0: return # if opposite position exists, we dont entry if ( self.check_if_opposite_position_exists(", "import joblib import pandas as pd import numpy as np from dataclasses import", "self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"]", "logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete exited positions positions = [ position", "= 0 commission[\"exit\"] = 0 commission[\"spread\"] = 0 if position.side == \"long\": assert", "gc import time import ccxt import requests import urllib3 import joblib import pandas", "possible_in_debt = False commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check =", "Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes >=", "= now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self,", "= pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books()", "= self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >= 0) is_enough_ammount = bool( position.qty", "\"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase() possible_in_debt = False commission = {\"entry\":", "if \"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins", "exists, we dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True", "self.entry_ratio else: if self.order_criterion == \"cache\": if cache > 0: cache_to_order = nan_to_zero(", "self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"]", "(position.side == \"short\") and (position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes =", "to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs =", "class TraderV1: usecase = Usecase() possible_in_debt = False commission = {\"entry\": 0.0004, \"exit\":", "self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] =", "cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >= 0)", "self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders", "// 60) if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None:", "<= 0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) + 1", "order_asset, order_side): if order_side == \"long\": opposite_side = \"short\" if order_side == \"short\":", "self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [ position for position in", "side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in (\"short\", \"longshort\"): for order_asset in", "run(self): logger.info(f\"[O] Start: demon of trader\") n_traded = 0 while True: # Handle", "* self.entry_ratio) ) else: cache_to_order = 0 elif self.order_criterion == \"capital\": # Entry", "[ position for position in positions if position.side == \"short\" ] # Compute", "= self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit(", "CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items()", "entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start: demon of trader\") n_traded =", "position for position in positions if position.is_exited is not True ] return positions", "value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is", "if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ]", "== \"capital\" def _set_params(self): # Set params which has dependency on trader logic", "signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets", "order_side=side ) is True ): return entry_price = pricing[asset] qty = cache_to_order /", "position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start: demon", "= bool((cache - cost) >= 0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] )", "pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict(", "CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for", "self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in (\"short\", \"longshort\"):", "import nan_to_zero from logging import getLogger from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset", "probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is", "self.last_entry_at[position.asset] = now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry(", ") if self.position_side in (\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset,", "\"short\": assert prediction <= 0 price_to_achieve = ( entry_price * ( (prediction *", "not True: commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"] = 0 if position.side", "== \"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)},", "True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"]", "import urllib3 import joblib import pandas as pd import numpy as np from", "None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get extra 1 candle,", ") is True ): return entry_price = pricing[asset] qty = cache_to_order / entry_price", "= CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder()", "{ key.replace(\"-\", \"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = {", "= position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders =", "0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info:", "Currently we accept only 0 assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold", "(\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now,", "\"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True # To prevent api limitation def", "positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in (\"short\", \"longshort\"): for", "][index] if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] *", "is True return def handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx, position in", "0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[", "key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is", "= self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get extra 1 candle, cause it", "0 elif self.order_criterion == \"capital\": # Entry with capital base cache_to_order = nan_to_zero(", "self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True ): return entry_price = pricing[asset] qty", ") return cache_to_order + commission_to_order def check_if_already_have(self, positions, position): for exist_position in positions:", "CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ]", "\"long\" ] short_positions = [ position for position in positions if position.side ==", "return False def compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty commission_to_order = cache_to_order", "exist_position in positions: if (exist_position.asset == position.asset) and ( exist_position.side == position.side ):", "= self.check_if_executable_order(position=position) if executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty,", "in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index]", "is not True ] return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side", "key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for key,", "continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price,", "ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: assert", "long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order =", "\"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is not True: commission[\"entry\"]", "== 60: self.custom_cli = CustomClient() n_traded = 0 # Main try: # Use", "cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals:", "if ordered is None: assert CFG.TEST_MODE is True return def handle_exit(self, positions, positive_assets,", "if position.asset not in self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\",", "self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict):", "\"long\": prediction = self.positive_entry_bins[position.asset] if position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission =", "self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is True: assert self.order_criterion == \"capital\"", "{ value: key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if", "self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order", "now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict =", "pricing = pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing", "pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[ query_start_on :", "return False sync_min_delta = int((now - last_sync_on).total_seconds() // 60) if sync_min_delta == 1:", "0 commission[\"exit\"] = 0 commission[\"spread\"] = 0 if position.side == \"long\": assert prediction", "now=now, ) # Limit order if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for", "Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except Exception as", "now): if cache_to_order == 0: return # if opposite position exists, we dont", "if position.side == \"short\" ] # Compute how much use cache to order", ") def run(self): logger.info(f\"[O] Start: demon of trader\") n_traded = 0 while True:", "Limit order if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions:", "is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is", "self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\")", "True # To prevent api limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins =", "base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value", "True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing,", "is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data builder params", "float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\"", "self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get extra 1 candle, cause it has", "False def compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty commission_to_order = cache_to_order *", "order_side == \"short\": opposite_side = \"long\" for exist_position in positions: if (exist_position.asset ==", "self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle exit", "if self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get", "positions: if (exist_position.asset == order_asset) and ( exist_position.side == opposite_side ): return True", "exist_position.side == position.side ): return True return False def check_if_executable_order(self, position): if self.skip_executable_order_check", "self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position,", "logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest =", "True ] return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side == \"long\":", "pricing = pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict", "False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, )", ") return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is", "- last_sync_on).total_seconds() // 60) if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on", "continue if (position.side == \"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True", "positions if position.side == \"short\" ] # Compute how much use cache to", "if position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is", "(prediction * self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"]) ) / (1 -", "= self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self,", "= self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ |", "] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset", ") return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features =", "True return def handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx, position in enumerate(positions):", "self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position) if executable_order is True: ordered =", ">= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins =", "entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is not supported. already_have = self.check_if_already_have( positions=positions,", "pandas as pd import numpy as np from dataclasses import dataclass from config", "isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] *", "continue # Handle exit signal if (position.side == \"long\") and (position.asset in negative_assets):", "= CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0 assert self.max_n_updated == 0 self.positive_entry_threshold", "= True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle exit signal if (position.side", "0.0002, \"spread\": 0.0004} skip_executable_order_check = True # To prevent api limitation def __post_init__(self):", "= ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold", "exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered", "is not supported. already_have = self.check_if_already_have( positions=positions, position=position ) if already_have is True:", "pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"),", "{CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"]", "self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins", "CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"]", "trader\") n_traded = 0 while True: # Handle relogin if n_traded == 60:", "is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >=", "0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True # To prevent api limitation", "cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >= 0) is_enough_ammount = bool(", "* float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler =", "ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: return", "# Delete exited positions positions = [ position for position in positions if", "!= 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), )", "self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data builder", "pricing=pricing, now=now, ) # Limit order if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False)", ") return price_to_achieve def entry_order(self, positions, asset, side, cache_to_order, pricing, now): if cache_to_order", "{str(position)}, max_holding\") continue # Handle exit signal if (position.side == \"long\") and (position.asset", "predictions is not None: prediction = predictions[position.asset] else: if position.side == \"long\": prediction", "position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache", "logger.error(\"[!] Error: \", exc_info=True) raise Exception if __name__ == \"__main__\": import fire fire.Fire(TraderV1)", "# Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit:", "CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0", "init to handle limit order self.assets_to_limit_order = [] # Entry order if self.position_side", "positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded", "int((now - last_trade_on).total_seconds() // 60) >= 1: return True return False def exit_order(self,", "query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0,", "self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >= 0) is_enough_ammount =", "* ( (prediction * self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"]) ) /", "= self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self,", "traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except Exception as e:", "order_side): if order_side == \"long\": opposite_side = \"short\" if order_side == \"short\": opposite_side", "sync_min_delta = int((now - last_sync_on).total_seconds() // 60) if sync_min_delta == 1: last_trade_on =", "def check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset]", "dataclass from config import CFG from trainer.models import PredictorV1 from database.usecase import Usecase", "self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[", "def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder(", "commission[\"spread\"]) ) / (1 + (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def entry_order(self,", "<= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self, last_sync_on:", "we dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True ):", "Set data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1])", "Start: demon of trader\") n_traded = 0 while True: # Handle relogin if", "as pd import numpy as np from dataclasses import dataclass from config import", "None: return True else: if int((now - last_trade_on).total_seconds() // 60) >= 1: return", "== position.asset) and ( exist_position.side == position.side ): return True return False def", "logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" )", "label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", )", "== order_asset) and ( exist_position.side == opposite_side ): return True return False def", "\"long\": assert prediction >= 0 price_to_achieve = ( entry_price * ( (prediction *", "has dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio =", "features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\",", "positions, asset, side, cache_to_order, pricing, now): if cache_to_order == 0: return # if", "self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is True: assert self.order_criterion ==", "assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ),", "last_sync_on).total_seconds() // 60) if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is", "str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1])", "CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model()", "\"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"]", "( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False", "\"short\": opposite_side = \"long\" for exist_position in positions: if (exist_position.asset == order_asset) and", "price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) + 1 + (commission[\"entry\"]", "positive_assets): continue if (position.side == \"short\") and (position.asset in negative_assets): continue position_entry_at =", "leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion", "opposite_side = \"short\" if order_side == \"short\": opposite_side = \"long\" for exist_position in", "last_trade_on).total_seconds() // 60) >= 1: return True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset)", "second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True:", "(position.side == \"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit:", "] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() }", "prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if", "isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] *", "max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\")", "if already_have is True: self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position) if executable_order", "(position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue #", "== 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold =", "features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin in", "symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order", "= predictions[position.asset] else: if position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if position.side ==", "requests import urllib3 import joblib import pandas as pd import numpy as np", "_set_params(self): # Set params which has dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"]", "[ (column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\"", "Currently we accept False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins", "self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = {", "0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >=", "\"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, )", "positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [ position for position in positions if", "Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest", "we accept only 0 assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold =", "self.entry_ratio) ) else: cache_to_order = 0 elif self.order_criterion == \"capital\": # Entry with", "+ commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions, asset, side, cache_to_order, pricing, now):", "to order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_]", "position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists, we", "order if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if", ") / (1 - (commission[\"exit\"] + commission[\"spread\"])) ) if position.side == \"short\": assert", "] short_positions = [ position for position in positions if position.side == \"short\"", "self.order_criterion == \"capital\": # Entry with capital base cache_to_order = nan_to_zero( value=(capital *", "self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache", "np from dataclasses import dataclass from config import CFG from trainer.models import PredictorV1", "exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if", "__post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins,", "self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold", "self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])]", "len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset not", "in positive_assets): continue if (position.side == \"short\") and (position.asset in negative_assets): continue position_entry_at", "positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side == \"short\") and", "cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) |", "if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index]", "True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit", "self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins =", "position): cache_to_order = position.entry_price * position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"] +", "enumerate(positions): # Keep position if matched if (position.side == \"long\") and (position.asset in", "* float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if", "index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is True:", "last_sync_on is None: return False sync_min_delta = int((now - last_sync_on).total_seconds() // 60) if", "if last_sync_on is None: return False sync_min_delta = int((now - last_sync_on).total_seconds() // 60)", "info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict", "pd.Timestamp): if last_sync_on is None: return False sync_min_delta = int((now - last_sync_on).total_seconds() //", "position in enumerate(positions): # Keep position if matched if (position.side == \"long\") and", "return self.last_entry_at[position.asset] = now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def", "for target_coin in self.tradable_coins ] return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on =", "self.probability_bins = CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] =", "in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input)", "position for position in positions if position.side == \"long\" ] short_positions = [", "= self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >= 0) is_enough_ammount", "} self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() }", "self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get extra", "if self.achieved_with_commission is not True: commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"] =", ") logger.info(f\"[O] Info: initialized order books\") def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing)", "for position in positions: if position.asset not in self.assets_to_limit_order: continue assert position.entry_price !=", "= CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently", "Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now,", "bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price,", "predictions[position.asset] else: if position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if position.side == \"short\":", "Exception as e: logger.error(\"[!] Error: \", exc_info=True) raise Exception if __name__ == \"__main__\":", "bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position)", "0 if position.side == \"long\": assert prediction >= 0 price_to_achieve = ( entry_price", "cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$", "import Usecase from exchange.custom_client import CustomClient from .utils import nan_to_zero from logging import", "= -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins =", "0 assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold =", "== \"capital\": # Entry with capital base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio)", "column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\",", "0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order = 0 elif", "is True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache =", ">= self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if", "def run(self): logger.info(f\"[O] Start: demon of trader\") n_traded = 0 while True: #", "self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[", "True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle exit signal if (position.side ==", "== 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None: return True else: if", "# Set assets which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) &", "\"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\",", "= None if self.skip_executable_order_check is True: assert self.order_criterion == \"capital\" def _set_params(self): #", "now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1)", "import ccxt import requests import urllib3 import joblib import pandas as pd import", "trainer.models import PredictorV1 from database.usecase import Usecase from exchange.custom_client import CustomClient from .utils", "already_have = self.check_if_already_have( positions=positions, position=position ) if already_have is True: self.last_entry_at[position.asset] = now", "return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets which has signals positive_assets =", "== \"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)},", "Exit: {str(position)}, max_holding\") continue # Handle exit signal if (position.side == \"long\") and", "position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is", "/ entry_price position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently", "update_position_if_already_have is not supported. already_have = self.check_if_already_have( positions=positions, position=position ) if already_have is", "def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1)", "asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is not supported. already_have", "def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is None: return False sync_min_delta", "positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is None: return", "position.side == \"short\" ] # Compute how much use cache to order cache_dict", "long_positions = [ position for position in positions if position.side == \"long\" ]", "None if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0])", "def compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty commission_to_order = cache_to_order * (", "last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None: return True else: if int((now -", "] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] =", "0.0004} skip_executable_order_check = True # To prevent api limitation def __post_init__(self): self.custom_cli =", "cache_to_order == 0: return # if opposite position exists, we dont entry if", "60: self.custom_cli = CustomClient() n_traded = 0 # Main try: # Use timestamp", "except Exception as e: logger.error(\"[!] Error: \", exc_info=True) raise Exception if __name__ ==", "CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest", "n_traded == 60: self.custom_cli = CustomClient() n_traded = 0 # Main try: #", "commission_to_order def check_if_already_have(self, positions, position): for exist_position in positions: if (exist_position.asset == position.asset)", "dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"]", "Usecase() possible_in_debt = False commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check", "\"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction assert self.adjust_prediction", "True: assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index):", "cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order = 0 elif self.order_criterion", "self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins", ">= 0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) + 1", "self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, )", "def entry_order(self, positions, asset, side, cache_to_order, pricing, now): if cache_to_order == 0: return", "self.assets_to_limit_order = [] # Entry order if self.position_side in (\"long\", \"longshort\"): for order_asset", "last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets,", "in (\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing,", "].sort_index() def _build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs =", "_initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When", "else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler", "of trader\") n_traded = 0 while True: # Handle relogin if n_traded ==", "in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists, we skip", "for position_idx, position in enumerate(positions): # Keep position if matched if (position.side ==", "= Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is not", "amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start: demon of", "def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins,", "if (position.side == \"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-]", "[] # Entry order if self.position_side in (\"long\", \"longshort\"): for order_asset in positive_assets:", "inputs = np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ]", "is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get extra 1", "def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all()", "in enumerate(positions): # Keep position if matched if (position.side == \"long\") and (position.asset", "Handle exit signal if (position.side == \"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited", "str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1])", "from database.usecase import Usecase from exchange.custom_client import CustomClient from .utils import nan_to_zero from", "+ (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions, asset, side, cache_to_order,", "& (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now:", "position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order books\") def", "now=now, ) long_positions = [ position for position in positions if position.side ==", "None self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins = None", "continue if (position.side == \"short\") and (position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset]", "== \"short\") and (position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now", "joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler,", "None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\")", "is_enough_cache = bool((cache - cost) >= 0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset]", ") # Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing,", "self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"]) ) / (1 + (commission[\"exit\"] +", "False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params", "= now return executable_order = self.check_if_executable_order(position=position) if executable_order is True: ordered = self.custom_cli.entry_order(", "cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ): # Set init to handle limit", ") def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def", "check_if_already_have(self, positions, position): for exist_position in positions: if (exist_position.asset == position.asset) and (", "= cache_to_order / entry_price position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now )", "side, cache_to_order, pricing, now): if cache_to_order == 0: return # if opposite position", "symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: return self.last_entry_at[position.asset] = now", "TraderV1: usecase = Usecase() possible_in_debt = False commission = {\"entry\": 0.0004, \"exit\": 0.0002,", "position=position ) if already_have is True: self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position)", "in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side", "\"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"]", "joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model", "device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at =", "assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins =", "entry_price position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have", "if order_side == \"short\": opposite_side = \"long\" for exist_position in positions: if (exist_position.asset", "assert (probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins = None", "def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is not None: prediction = predictions[position.asset]", "self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on", "Use timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on,", "ccxt import requests import urllib3 import joblib import pandas as pd import numpy", "qty = cache_to_order / entry_price position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now", "positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now})", "* self.entry_ratio) ) # Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets,", "position.asset) and ( exist_position.side == position.side ): return True return False def check_if_executable_order(self,", "= CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes", "ordered is None: assert CFG.TEST_MODE is True return def handle_exit(self, positions, positive_assets, negative_assets,", "now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except Exception as e: logger.error(\"[!] Error:", "position for position in positions if position.side == \"short\" ] # Compute how", "logger.info(f\"[O] Start: demon of trader\") n_traded = 0 while True: # Handle relogin", "position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order books\") def _build_features(self, pricing): features,", "= pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs, ids =", "opposite\") continue # Delete exited positions positions = [ position for position in", "cache_to_order = position.entry_price * position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"]", "position in positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset]", "logging import getLogger from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from", "start_on=query_start_on, end_on=query_end_on ) else: # Get extra 1 candle, cause it has potential", "self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ],", "base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]],", "* self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"]) ) / (1 - (commission[\"exit\"]", "asset, side, cache_to_order, pricing, now): if cache_to_order == 0: return # if opposite", "probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def", "pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict): #", "self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset not in self.assets_to_limit_order: continue assert position.entry_price", "now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) #", "( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order + commission_to_order def check_if_already_have(self, positions, position):", "self.tradable_coins ] return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta(", "we skip it. if len(orders) >= 1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order(", "] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str):", "position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset)", "self.usecase.get_last_trade_on() if last_trade_on is None: return True else: if int((now - last_trade_on).total_seconds() //", "self.entry_ratio) ) # Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets,", "= CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction", "for position in positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max( position.entry_at,", "= cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)})", "asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order if len(self.assets_to_limit_order) > 0:", "initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\")", "# Set params which has dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side", "= False commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True", "(\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now,", "feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\",", "self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on", "accept only 0 assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"]", "0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def", "- last_trade_on).total_seconds() // 60) >= 1: return True return False def exit_order(self, position):", "( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True ): return entry_price = pricing[asset]", "+ commission[\"spread\"]) ) / (1 + (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def", "= \"long\" for exist_position in positions: if (exist_position.asset == order_asset) and ( exist_position.side", "self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets", "positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in (\"short\",", "def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at", "= None if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[", "ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return inputs, ids def", "self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission", "position in positions if position.side == \"long\" ] short_positions = [ position for", "joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\")", "] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return", "self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions,", "self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets def", "= self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: return self.last_entry_at[position.asset]", "cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in (\"short\", \"longshort\"): for order_asset in negative_assets:", "return True return False def compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty commission_to_order", ": self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1,", "builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for column", "# Currently we accept only 0 assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"]", ")[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\"))", "return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty,", "= CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins", "] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str):", "self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept", "= self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset not in self.assets_to_limit_order: continue assert", "logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase()", "exists, we skip it. if len(orders) >= 1: continue assert position.entry_price != 0.0", ") else: cache_to_order = 0 elif self.order_criterion == \"capital\": # Entry with capital", "inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"]", "features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin in self.tradable_coins: to_input = pd.concat([base_features,", "= self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"]", "def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side == \"long\": opposite_side = \"short\" if", "str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1])", "executable_order = self.check_if_executable_order(position=position) if executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side,", "import dataclass from config import CFG from trainer.models import PredictorV1 from database.usecase import", "\"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\")", "last_entry_at\") else: self.last_entry_at = {key: None for key in self.tradable_coins} # Initialize positions", "positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False )", "# Entry order if self.position_side in (\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order(", "Compute how much use cache to order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"]", "params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for column in", "in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins", "exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now,", "for position in positions if position.side == \"long\" ] short_positions = [ position", "has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ]", "False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS #", "_build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins,", "for position in positions if position.is_exited is not True ] return positions def", "cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order if len(self.assets_to_limit_order) > 0: positions =", "symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O]", "is True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >=", "else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold:", "DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS,", "= CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only", "if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ]", "self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order + commission_to_order def check_if_already_have(self, positions, position): for", "self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ]", "Keep position if matched if (position.side == \"long\") and (position.asset in positive_assets): continue", "= self.positive_entry_bins[position.asset] if position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission if", "( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index]", "] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept", "position=position.side, amount=position.qty, ) if ordered is None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved", "= CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[", "from config import CFG from trainer.models import PredictorV1 from database.usecase import Usecase from", "if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None: return True", "in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for key, value in", "= -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold", "self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in", "\"spread\": 0.0004} skip_executable_order_check = True # To prevent api limitation def __post_init__(self): self.custom_cli", "cache_to_order, pricing, now): if cache_to_order == 0: return # if opposite position exists,", "positions, positive_assets, negative_assets, now): for position_idx, position in enumerate(positions): # Keep position if", "self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated", "Usecase from exchange.custom_client import CustomClient from .utils import nan_to_zero from logging import getLogger", "dataclasses import dataclass from config import CFG from trainer.models import PredictorV1 from database.usecase", "def check_if_already_have(self, positions, position): for exist_position in positions: if (exist_position.asset == position.asset) and", "in (\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing,", "if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset]", "_build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for", "if ordered is None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset)", "position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), )", "only 0 assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold", "if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset", "== opposite_side ): return True return False def compute_cost_to_order(self, position): cache_to_order = position.entry_price", "elif self.order_criterion == \"capital\": # Entry with capital base cache_to_order = nan_to_zero( value=(capital", "int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold,", "return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side == \"long\": opposite_side =", "= ( entry_price * ( (prediction * self.achieve_ratio) + 1 - (commission[\"entry\"] +", "-( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index]", "use cache to order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"]", "= [ (column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[", "and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue", "relogin if n_traded == 60: self.custom_cli = CustomClient() n_traded = 0 # Main", "position.side ): return True return False def check_if_executable_order(self, position): if self.skip_executable_order_check is True:", "self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at = {key: None for", "True ): return entry_price = pricing[asset] qty = cache_to_order / entry_price position =", "position_idx, position in enumerate(positions): # Keep position if matched if (position.side == \"long\")", "value=(capital * self.entry_ratio) ) # Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order,", "prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check", "Set assets which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"]", "True return False def compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty commission_to_order =", "True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None:", "position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ),", "joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at = {key: None for key in", "position): if self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return", "positions=positions, order_asset=asset, order_side=side ) is True ): return entry_price = pricing[asset] qty =", "self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if", "class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[", "import requests import urllib3 import joblib import pandas as pd import numpy as", "order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order books\")", "1 else: time.sleep(0.1) except Exception as e: logger.error(\"[!] Error: \", exc_info=True) raise Exception", "for key in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions:", "positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset]", "for exist_position in positions: if (exist_position.asset == position.asset) and ( exist_position.side == position.side", "Currently update_position_if_already_have is not supported. already_have = self.check_if_already_have( positions=positions, position=position ) if already_have", "pd import numpy as np from dataclasses import dataclass from config import CFG", "= self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is not True: commission[\"entry\"] = 0", "self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs, ids", "joblib import pandas as pd import numpy as np from dataclasses import dataclass", "self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if passed_minutes", "tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS,", "== \"short\" ] # Compute how much use cache to order cache_dict =", "return executable_order = self.check_if_executable_order(position=position) if executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\",", "now=now, ) if self.position_side in (\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions,", "= nan_to_zero( value=(capital * self.entry_ratio) ) # Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry(", ") if self.compound_interest is False: cache_to_order = self.entry_ratio else: if self.order_criterion == \"cache\":", "self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold,", "def build_positive_and_negative_assets(self, pred_dict): # Set assets which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"]", ") # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except", "positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset not in self.assets_to_limit_order: continue", "= [ position for position in positions if position.side == \"short\" ] #", "if self.position_side in (\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\",", "with capital base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) ) # Handle entry", "= CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction", "None for key in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in", "\"long\") and (position.asset in positive_assets): continue if (position.side == \"short\") and (position.asset in", "if matched if (position.side == \"long\") and (position.asset in positive_assets): continue if (position.side", "self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"]", "start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing,", "len(orders) >= 1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty,", "CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\"", "float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold:", "self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio", "self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model = PredictorV1(", "][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder =", "* float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in", "pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order = self.entry_ratio else: if self.order_criterion", "@dataclass class TraderV1: usecase = Usecase() possible_in_debt = False commission = {\"entry\": 0.0004,", "* ( (prediction * self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"]) ) /", "if len(orders) >= 1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side,", "True: # Handle relogin if n_traded == 60: self.custom_cli = CustomClient() n_traded =", "logger.info(f\"[O] Info: initialized order books\") def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features", "np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for", ").sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs,", "= np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return", "\"capital\" def _set_params(self): # Set params which has dependency on trader logic self.base_currency", "adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set", "getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase() possible_in_debt =", "mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH)", "True: commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"] = 0 if position.side ==", ")[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins", "assert self.max_n_updated == 0 self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"]", "return # if opposite position exists, we dont entry if ( self.check_if_opposite_position_exists( positions=positions,", "self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"]", ") return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache -", "base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) ) # Handle entry pricing =", "self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache", "float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\"", "= CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] #", "* float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if", "[ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"):", "], pricing, ] ).sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features", "value=(cache * self.entry_ratio) ) else: cache_to_order = 0 elif self.order_criterion == \"capital\": #", "(position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if", "position=position.side, amount=position.qty, ) if ordered is None: assert CFG.TEST_MODE is True return def", "skip it. if len(orders) >= 1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset,", "(probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins", "def _set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001", "for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for key,", "def _set_params(self): # Set params which has dependency on trader logic self.base_currency =", "self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert", "\"capital\": # Entry with capital base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) )", "# Get extra 1 candle, cause it has potential to be changed. pricing", "= [] # Entry order if self.position_side in (\"long\", \"longshort\"): for order_asset in", "check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] )", "position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if position.side == \"short\": prediction = self.negative_entry_bins[position.asset]", "order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side", "cache_to_order / entry_price position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now ) #", "float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR,", "if (position.side == \"short\") and (position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes", "= pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict =", "= self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] =", "== \"long\": assert prediction >= 0 price_to_achieve = ( entry_price * ( (prediction", "self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: assert CFG.TEST_MODE is", "PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self):", "in positions: if (exist_position.asset == order_asset) and ( exist_position.side == opposite_side ): return", ") = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions =", "CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently", "self.compound_interest is False: cache_to_order = self.entry_ratio else: if self.order_criterion == \"cache\": if cache", "= None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins", "handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ): # Set init", "\"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else:", "60) >= 1: return True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered =", "negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is None: return False", "id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets which has", "commission[\"spread\"] = 0 if position.side == \"long\": assert prediction >= 0 price_to_achieve =", "is True ): return entry_price = pricing[asset] qty = cache_to_order / entry_price position", "self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, )", "position.entry_price * position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return", "self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def", "self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except Exception as e: logger.error(\"[!] Error: \",", "= self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record", "CustomClient from .utils import nan_to_zero from logging import getLogger from common_utils_svc import initialize_trader_logger,", "minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on if self.cached_pricing is None:", "and (position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds()", "if position.side == \"short\": assert prediction <= 0 price_to_achieve = ( entry_price *", "CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"]", "for exist_position in positions: if (exist_position.asset == order_asset) and ( exist_position.side == opposite_side", "= prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[", "# To prevent api limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins)", "// 60 # Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes", "0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) + 1 +", "1 candle, cause it has potential to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1],", "commission[\"spread\"])) ) if position.side == \"short\": assert prediction <= 0 price_to_achieve = (", "numpy as np from dataclasses import dataclass from config import CFG from trainer.models", "short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order = self.entry_ratio", "cache to order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info(", "isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index]", "try: # Use timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on()", "self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists, we skip it. if len(orders) >=", "] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction assert self.adjust_prediction is", "[ position for position in positions if position.side == \"long\" ] short_positions =", "last_sync_on if self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: #", "order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit", "d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at", "int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold,", "CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"]", "self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins =", "# Compute how much use cache to order cache_dict = self.custom_cli.get_cache_dict() capital =", "Set params which has dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side =", "order_asset=asset, order_side=side ) is True ): return entry_price = pricing[asset] qty = cache_to_order", "prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if", "self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is True: assert self.order_criterion", "is None: return True else: if int((now - last_trade_on).total_seconds() // 60) >= 1:", "key.replace(\"-\", \"/\"): value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value:", "self.positive_entry_threshold = CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\"", "which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins)", "= probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR,", "self.commission[\"spread\"] ) return cache_to_order + commission_to_order def check_if_already_have(self, positions, position): for exist_position in", "False: cache_to_order = self.entry_ratio else: if self.order_criterion == \"cache\": if cache > 0:", "\"short\") and (position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now -", "features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = [] for target_coin", "position exists, we dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is", "True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete exited positions positions = [", "entry_at=now ) # Currently update_position_if_already_have is not supported. already_have = self.check_if_already_have( positions=positions, position=position", "exist_position.side == opposite_side ): return True return False def compute_cost_to_order(self, position): cache_to_order =", "self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects(", "in self.tradable_coins ] return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on -", "( exist_position.side == position.side ): return True return False def check_if_executable_order(self, position): if", "positions if position.side == \"long\" ] short_positions = [ position for position in", "how much use cache to order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache", "executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered", "self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins =", "cost) >= 0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache &", "None self.positive_probability_bins = None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if \"*\" in", "def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info:", "return True return False def check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount =", "in positive_assets: self.entry_order( positions=positions, asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in", "logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle exit signal if (position.side == \"long\")", "positions=positions, position=position ) if already_have is True: self.last_entry_at[position.asset] = now return executable_order =", "passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited", "Exit: {str(position)}, opposite\") continue if (position.side == \"short\") and (position.asset in positive_assets): self.exit_order(position=position)", "params which has dependency on trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"]", "CFG.TEST_MODE is True return def handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx, position", "import pandas as pd import numpy as np from dataclasses import dataclass from", "& (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"]", "trader logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE", "\"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else:", "position.asset not in self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side,", "positions, position): for exist_position in positions: if (exist_position.asset == position.asset) and ( exist_position.side", "== \"long\": prediction = self.positive_entry_bins[position.asset] if position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission", "= CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params = {}", "loaded last_entry_at\") else: self.last_entry_at = {key: None for key in self.tradable_coins} # Initialize", "in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins", "* ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order + commission_to_order def check_if_already_have(self, positions,", "sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None: return True else:", "else: if position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if position.side == \"short\": prediction", "in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins", "negative_assets, pricing, predictions, now, ): # Set init to handle limit order self.assets_to_limit_order", "(column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ]", "self.position_side in (\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order,", "cause it has potential to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on )", "positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [ position for position in positions", "self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions,", "= True # To prevent api limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins", "position if matched if (position.side == \"long\") and (position.asset in positive_assets): continue if", "continue # Delete exited positions positions = [ position for position in positions", "query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on =", "), ) def run(self): logger.info(f\"[O] Start: demon of trader\") n_traded = 0 while", "dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True ): return", "common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger", "now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions,", "{str(position)}, opposite\") continue # Delete exited positions positions = [ position for position", "signal if (position.side == \"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True", "from logging import getLogger from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder", "self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position", "os import gc import time import ccxt import requests import urllib3 import joblib", "prediction >= 0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) +", ") def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O]", "and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue", "max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False)", "Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"]", "passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue #", "position.side == \"long\": assert prediction >= 0 price_to_achieve = ( entry_price * (", "str): if \"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else:", "now, ): # Set init to handle limit order self.assets_to_limit_order = [] #", "False sync_min_delta = int((now - last_sync_on).total_seconds() // 60) if sync_min_delta == 1: last_trade_on", "if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index]", "= self.entry_ratio else: if self.order_criterion == \"cache\": if cache > 0: cache_to_order =", "if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ]", "self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized", "= self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: assert CFG.TEST_MODE", "(exist_position.asset == order_asset) and ( exist_position.side == opposite_side ): return True return False", "CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes =", "last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on", "passed_minutes = (now - position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if passed_minutes <=", "is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is not None: prediction =", ">= 0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount", "0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins =", "(exist_position.asset == position.asset) and ( exist_position.side == position.side ): return True return False", "self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"]) ) / (1 - (commission[\"exit\"] +", "0 while True: # Handle relogin if n_traded == 60: self.custom_cli = CustomClient()", "positive_assets, negative_assets, now): for position_idx, position in enumerate(positions): # Keep position if matched", "self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None", "(commission[\"entry\"] + commission[\"spread\"]) ) / (1 + (commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve", "side=side, qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is not supported. already_have =", "with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [", "cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) ) # Handle entry pricing = self.custom_cli.get_last_pricing()", "= self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit", "Get extra 1 candle, cause it has potential to be changed. pricing =", "self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved", "= [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return inputs, ids def build_prediction_dict(self,", "\"cache\": if cache > 0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) ) else:", "!= 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O]", "CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"]", "self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1 else: time.sleep(0.1) except Exception as e: logger.error(\"[!]", "CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold", "is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle", ") if already_have is True: self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position) if", "continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() // 60 # Handle", "positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete exited positions positions", "if position.side == \"long\" ] short_positions = [ position for position in positions", "= self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets,", "position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is", "- (commission[\"entry\"] + commission[\"spread\"]) ) / (1 + (commission[\"exit\"] + commission[\"spread\"])) ) return", "float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\"", "): return entry_price = pricing[asset] qty = cache_to_order / entry_price position = Position(", "self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete exited positions", "value: key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE", "e: logger.error(\"[!] Error: \", exc_info=True) raise Exception if __name__ == \"__main__\": import fire", "prediction = self.positive_entry_bins[position.asset] if position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission", "Info: loaded last_entry_at\") else: self.last_entry_at = {key: None for key in self.tradable_coins} #", "now return executable_order = self.check_if_executable_order(position=position) if executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset,", "from trainer.models import PredictorV1 from database.usecase import Usecase from exchange.custom_client import CustomClient from", "cache_to_order = self.entry_ratio else: if self.order_criterion == \"cache\": if cache > 0: cache_to_order", "self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs", "neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order = self.entry_ratio else: if self.order_criterion ==", "is None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry:", ") # Currently update_position_if_already_have is not supported. already_have = self.check_if_already_have( positions=positions, position=position )", "= joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler,", "inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins", "import numpy as np from dataclasses import dataclass from config import CFG from", "self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info:", "commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True # To", "if last_trade_on is None: return True else: if int((now - last_trade_on).total_seconds() // 60)", "capital base cache_to_order = nan_to_zero( value=(capital * self.entry_ratio) ) # Handle entry pricing", "return False def check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty", ">= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins)", "winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features", "if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0]) ]", "( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index]", "position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() // 60 # Handle min_holding_minutes", "None: assert CFG.TEST_MODE is True return def handle_exit(self, positions, positive_assets, negative_assets, now): for", "Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger()", "= None self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins =", "n_traded += 1 else: time.sleep(0.1) except Exception as e: logger.error(\"[!] Error: \", exc_info=True)", "def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], )", "( (prediction * self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"]) ) / (1", "class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"],", "prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\"", "negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order if", "negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets,", "f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if", "self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets,", "config import CFG from trainer.models import PredictorV1 from database.usecase import Usecase from exchange.custom_client", "self.cached_pricing = None if self.skip_executable_order_check is True: assert self.order_criterion == \"capital\" def _set_params(self):", "return def handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx, position in enumerate(positions): #", "to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [", "= self.usecase.get_last_trade_on() if last_trade_on is None: return True else: if int((now - last_trade_on).total_seconds()", "if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def _set_bins(self,", "| Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order = self.entry_ratio else:", "\"*\" in self.positive_entry_threshold: self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else:", "is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is None: return False sync_min_delta =", "+= 1 else: time.sleep(0.1) except Exception as e: logger.error(\"[!] Error: \", exc_info=True) raise", "self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0 assert self.max_n_updated == 0", "if passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position)", "potential to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat(", "positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete exited", "id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets which has signals", "end_on=query_end_on ) else: # Get extra 1 candle, cause it has potential to", "+ commission[\"spread\"]) ) / (1 - (commission[\"exit\"] + commission[\"spread\"])) ) if position.side ==", "- pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on if self.cached_pricing", "order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: return self.last_entry_at[position.asset] = now if", "base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\", \"/\"): value for key, value", "= self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set", "* position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order", ">= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, max_holding\") continue # Handle", "+ CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on if self.cached_pricing is None: pricing", "60 # Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes if", "np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return inputs,", "we accept False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins =", "self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False", "if int((now - last_trade_on).total_seconds() // 60) >= 1: return True return False def", "to handle limit order self.assets_to_limit_order = [] # Entry order if self.position_side in", "pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features,", ")[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in", "self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions", "self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self):", "in positions if position.side == \"short\" ] # Compute how much use cache", "is not True: commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"] = 0 if", "features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features,", "When already limit order exists, we skip it. if len(orders) >= 1: continue", "(commission[\"exit\"] + commission[\"spread\"])) ) if position.side == \"short\": assert prediction <= 0 price_to_achieve", "commission[\"exit\"] = 0 commission[\"spread\"] = 0 if position.side == \"long\": assert prediction >=", "if executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if", "- (commission[\"exit\"] + commission[\"spread\"])) ) if position.side == \"short\": assert prediction <= 0", "\"long\" for exist_position in positions: if (exist_position.asset == order_asset) and ( exist_position.side ==", "positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else:", "_set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins", "position.side == \"long\" ] short_positions = [ position for position in positions if", "not True ] return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side ==", "position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is not", "self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if", "in positions: if position.asset not in self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order(", "n_traded = 0 # Main try: # Use timestamp without second info now", "if predictions is not None: prediction = predictions[position.asset] else: if position.side == \"long\":", "), ) logger.info(f\"[O] Info: initialized order books\") def _build_features(self, pricing): features, class_features =", ">= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if", "Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ):", "= pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing =", "Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets,", "CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"]", "negative_assets, now): for position_idx, position in enumerate(positions): # Keep position if matched if", "in positions if position.is_exited is not True ] return positions def check_if_opposite_position_exists(self, positions,", "# Handle exit signal if (position.side == \"long\") and (position.asset in negative_assets): self.exit_order(position=position)", "is not None: prediction = predictions[position.asset] else: if position.side == \"long\": prediction =", "= ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold", "import getLogger from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset", "pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing = pricing", "\"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we", "order self.assets_to_limit_order = [] # Entry order if self.position_side in (\"long\", \"longshort\"): for", "order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: assert CFG.TEST_MODE is True return", ">= 1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve(", "exist_position in positions: if (exist_position.asset == order_asset) and ( exist_position.side == opposite_side ):", "return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is not", "= nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order = 0 elif self.order_criterion ==", "def _build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self):", "(position.asset in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() //", "( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if", "== \"cache\": if cache > 0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) )", "(commission[\"entry\"] + commission[\"spread\"]) ) / (1 - (commission[\"exit\"] + commission[\"spread\"])) ) if position.side", "] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler", "False def check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty >=", "if self.skip_executable_order_check is True: is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount", "self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] =", "if cache > 0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order", "* CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"]", "= CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio =", "True else: if int((now - last_trade_on).total_seconds() // 60) >= 1: return True return", "axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def _build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], )", "self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index()", "ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] -", "limit order self.assets_to_limit_order = [] # Entry order if self.position_side in (\"long\", \"longshort\"):", "{\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True # To prevent api", "self.positive_entry_bins = ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[", "1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in", "if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1])", "60) if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on() if last_trade_on is None: return", "CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True self.entry_ratio", "self.check_if_executable_order(position=position) if executable_order is True: ordered = self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, )", "cache_to_order = 0 elif self.order_criterion == \"capital\": # Entry with capital base cache_to_order", "positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions = [ position for", "_load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at =", "if self.position_side in (\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\",", "= 0 commission[\"spread\"] = 0 if position.side == \"long\": assert prediction >= 0", "positive_assets, negative_assets, pricing, predictions, now, ): # Set init to handle limit order", "position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue # Handle", "{str(position)}\") def handle_entry( self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ): #", "order exists, we skip it. if len(orders) >= 1: continue assert position.entry_price !=", "value for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\")", "while True: # Handle relogin if n_traded == 60: self.custom_cli = CustomClient() n_traded", "else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold:", "LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else:", ") if ordered is None: assert CFG.TEST_MODE is True return def handle_exit(self, positions,", "self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins", "# Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes", "self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = (", "n_traded = 0 while True: # Handle relogin if n_traded == 60: self.custom_cli", "commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions, asset, side, cache_to_order, pricing, now): if", "pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features)", "negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions", "{key: None for key in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position", "in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for key, value in", "amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order books\") def _build_features(self,", "from exchange.custom_client import CustomClient from .utils import nan_to_zero from logging import getLogger from", "= ( prediction_abs_bins.loc[ int(self.positive_entry_threshold.split(\"*\")[0]) ] * float(self.positive_entry_threshold.split(\"*\")[-1]) )[index] else: self.positive_entry_bins = prediction_abs_bins.loc[ self.positive_entry_threshold", "and (position.asset in positive_assets): continue if (position.side == \"short\") and (position.asset in negative_assets):", "opposite position exists, we dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side )", "isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] *", "is_enough_cache & is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is not None:", ">= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins", "int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold,", "= max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions =", "self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at()", "self.check_if_already_have( positions=positions, position=position ) if already_have is True: self.last_entry_at[position.asset] = now return executable_order", "else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions:", "# Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset] is not", "# Handle exit positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets,", "is False: cache_to_order = self.entry_ratio else: if self.order_criterion == \"cache\": if cache >", "== \"long\") and (position.asset in positive_assets): continue if (position.side == \"short\") and (position.asset", "= self.check_if_already_have( positions=positions, position=position ) if already_have is True: self.last_entry_at[position.asset] = now return", "return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost)", "= [ position for position in positions if position.is_exited is not True ]", "getLogger from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import", "= CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion = CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved =", "if (position.side == \"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-]", "in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset]", "( entry_price * ( (prediction * self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"])", "( exist_position.side == opposite_side ): return True return False def compute_cost_to_order(self, position): cache_to_order", "asset=order_asset, side=\"long\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) if self.position_side in (\"short\", \"longshort\"): for order_asset", "CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items()", "positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side == \"long\": opposite_side = \"short\"", "CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction assert", "= int((now - last_sync_on).total_seconds() // 60) if sync_min_delta == 1: last_trade_on = self.usecase.get_last_trade_on()", "amount=position.qty, ) if ordered is None: assert CFG.TEST_MODE is True return def handle_exit(self,", "negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() // 60 #", "symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: assert CFG.TEST_MODE is True", "if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index]", "features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features],", "timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now)", "Main try: # Use timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on =", "] * float(self.negative_entry_threshold.split(\"*\")[-1]) )[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str):", "and ( exist_position.side == opposite_side ): return True return False def compute_cost_to_order(self, position):", "pd.Timestamp, now: pd.Timestamp): if last_sync_on is None: return False sync_min_delta = int((now -", "1 + (commission[\"entry\"] + commission[\"spread\"]) ) / (1 - (commission[\"exit\"] + commission[\"spread\"])) )", "\"long\": opposite_side = \"short\" if order_side == \"short\": opposite_side = \"long\" for exist_position", "self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins =", "= joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at = {key: None for key", "None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins =", "][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0])", "= self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2]", "1, axis=1) features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs,", "0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) + 1 -", "import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger =", "return cache_to_order + commission_to_order def check_if_already_have(self, positions, position): for exist_position in positions: if", "self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler =", "as e: logger.error(\"[!] Error: \", exc_info=True) raise Exception if __name__ == \"__main__\": import", ")[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in", "price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order books\") def _build_features(self, pricing):", "= CustomClient() n_traded = 0 # Main try: # Use timestamp without second", "(commission[\"exit\"] + commission[\"spread\"])) ) return price_to_achieve def entry_order(self, positions, asset, side, cache_to_order, pricing,", "from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX", "in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order", "if (exist_position.asset == position.asset) and ( exist_position.side == position.side ): return True return", "axis=1) features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids,", "if cache_to_order == 0: return # if opposite position exists, we dont entry", "side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order if len(self.assets_to_limit_order) > 0: positions", "data builder params self.dataset_builder_params = {} self.dataset_builder_params[\"features_columns\"] = [ (column[0].replace(\"-\", \"/\"), column[1]) for", "self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[", "prediction <= 0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) +", "import CustomClient from .utils import nan_to_zero from logging import getLogger from common_utils_svc import", "> 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: if position.asset not in", "inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict", "not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def", "positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets =", "> 0: cache_to_order = nan_to_zero( value=(cache * self.entry_ratio) ) else: cache_to_order = 0", "# if opposite position exists, we dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset,", "in positions: if (exist_position.asset == position.asset) and ( exist_position.side == position.side ): return", "self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions =", "-prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\" in self.exit_threshold: self.exit_bins = (", "predictions, now, ): # Set init to handle limit order self.assets_to_limit_order = []", "if self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) =", "PredictorV1 from database.usecase import Usecase from exchange.custom_client import CustomClient from .utils import nan_to_zero", "negative_assets=negative_assets, now=now, ) long_positions = [ position for position in positions if position.side", "CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is", "// 60) >= 1: return True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered", "= True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side == \"short\") and (position.asset", "= self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[", "# Currently we accept False adjust_prediction assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS", "= [ base_feature_asset.replace(\"-\", \"/\") for base_feature_asset in CFG.EXP_DATA_PARAMS[\"base_feature_assets\"] ] self.dataset_builder_params[\"asset_to_id\"] = { key.replace(\"-\",", "= CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes", "it. if len(orders) >= 1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\",", "position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start: demon of trader\") n_traded", "without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if self.is_executable(last_sync_on=last_sync_on, now=now) is", "CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on if self.cached_pricing is None: pricing =", "# Handle relogin if n_traded == 60: self.custom_cli = CustomClient() n_traded = 0", "self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max(", "self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0 assert", "is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes = CFG.REPORT_PARAMS[\"max_holding_minutes\"] self.compound_interest = CFG.REPORT_PARAMS[\"compound_interest\"] self.order_criterion =", "CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage is {CFG.LEVERAGE}\") self.min_holding_minutes = CFG.REPORT_PARAMS[\"min_holding_minutes\"] self.max_holding_minutes =", "return True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\",", "= cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order + commission_to_order def", "None: return False sync_min_delta = int((now - last_sync_on).total_seconds() // 60) if sync_min_delta ==", "(pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp):", "if opposite position exists, we dont entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side", "positions = [ position for position in positions if position.is_exited is not True", "= self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, ) long_positions", "exited positions positions = [ position for position in positions if position.is_exited is", "last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on if", "demon of trader\") n_traded = 0 while True: # Handle relogin if n_traded", "last_sync_on: pd.Timestamp, now: pd.Timestamp): if last_sync_on is None: return False sync_min_delta = int((now", "prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0])", "much use cache to order cache_dict = self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache =", "][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins = -( prediction_abs_bins.loc[ int(self.negative_entry_threshold.split(\"*\")[0])", "def _build_inputs(self, features): features, base_features = build_X_and_BX( features=features.astype(\"float32\"), base_feature_assets=self.dataset_builder_params[\"base_feature_assets\"], ) inputs = []", "# Use timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on = self.usecase.get_last_sync_on() if", "# When already limit order exists, we skip it. if len(orders) >= 1:", "query_end_on = last_sync_on if self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on )", "negative_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side ==", "assert self.adjust_prediction is False self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data", "self.achieved_with_commission is not True: commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"] = 0", "prevent api limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params()", "qty=qty, entry_price=entry_price, entry_at=now ) # Currently update_position_if_already_have is not supported. already_have = self.check_if_already_have(", "else: if self.order_criterion == \"cache\": if cache > 0: cache_to_order = nan_to_zero( value=(cache", "self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ] self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"]", "(position.asset in positive_assets): continue if (position.side == \"short\") and (position.asset in negative_assets): continue", "cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at()", "negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded +=", "position in positions: if position.asset not in self.assets_to_limit_order: continue assert position.entry_price != 0.0", "not None: prediction = predictions[position.asset] else: if position.side == \"long\": prediction = self.positive_entry_bins[position.asset]", "logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at = {key: None for key in self.tradable_coins}", ") query_end_on = last_sync_on if self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on", "if position.side == \"long\": prediction = self.positive_entry_bins[position.asset] if position.side == \"short\": prediction =", "DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass", "prediction = self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is not True: commission[\"entry\"] =", "pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets which has signals positive_assets = self.tradable_coins[", "- cost) >= 0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache", "inputs = [] for target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input", ") else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in", "= True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete exited positions positions =", "short_positions = [ position for position in positions if position.side == \"short\" ]", "position, entry_price, predictions=None): if predictions is not None: prediction = predictions[position.asset] else: if", "assert prediction >= 0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio)", "Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})\" ) if self.compound_interest is False: cache_to_order", "ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def", "in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True", "in positions if position.side == \"long\" ] short_positions = [ position for position", "for column in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [", "self._set_params() self._set_test_params() self._set_bins( prediction_abs_bins=self.prediction_abs_bins, probability_bins=self.probability_bins, index=self.tradable_coins, ) self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing =", "= CFG.REPORT_PARAMS[\"positive_entry_threshold\"] self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ]", "= self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions =", "commission_to_order = cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order + commission_to_order", "self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side == \"short\")", "handle limit order self.assets_to_limit_order = [] # Entry order if self.position_side in (\"long\",", "matched if (position.side == \"long\") and (position.asset in positive_assets): continue if (position.side ==", "1: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position,", "assert CFG.TEST_MODE is True return def handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx,", "else: self.last_entry_at = {key: None for key in self.tradable_coins} # Initialize positions =", "\"short\" if order_side == \"short\": opposite_side = \"long\" for exist_position in positions: if", "str): if \"*\" in self.negative_probability_threshold: self.negative_probability_bins = ( probability_bins.loc[ int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1])", "commission = self.commission if self.achieved_with_commission is not True: commission[\"entry\"] = 0 commission[\"exit\"] =", "= CFG.REPORT_PARAMS[\"order_criterion\"] self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated =", "self.negative_probability_threshold ][index] def _build_dataset_builder(self): feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"feature_scaler.pkl\")) label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder", "_set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def", "self.positive_entry_bins[position.asset] if position.side == \"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission", "= cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}),", "\"*\" in self.exit_threshold: self.exit_bins = ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins =", "self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if \"*\" in self.positive_entry_threshold: self.positive_entry_bins = (", "self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return", "self.last_entry_at = {key: None for key in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True)", "pricing = self.usecase.get_pricing( start_on=query_start_on, end_on=query_end_on ) else: # Get extra 1 candle, cause", "pricing=pricing, now=now, ) if self.position_side in (\"short\", \"longshort\"): for order_asset in negative_assets: self.entry_order(", "= ( prediction_abs_bins.loc[int(self.exit_threshold.split(\"*\")[0])] * float(self.exit_threshold.split(\"*\")[-1]) )[index] else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str):", "self.compute_cost_to_order(position=position) is_enough_cache = bool((cache - cost) >= 0) is_enough_ammount = bool( position.qty >=", "is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost", "limit order exists, we skip it. if len(orders) >= 1: continue assert position.entry_price", "] return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side == \"long\": opposite_side", "= [] for target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input =", "_store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded", "axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids =", "entry_order(self, positions, asset, side, cache_to_order, pricing, now): if cache_to_order == 0: return #", "extra 1 candle, cause it has potential to be changed. pricing = self.usecase.get_pricing(", "self.exit_if_achieved = CFG.REPORT_PARAMS[\"exit_if_achieved\"] self.achieve_ratio = CFG.REPORT_PARAMS[\"achieve_ratio\"] self.achieved_with_commission = CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] #", "pricing, predictions, now, ): # Set init to handle limit order self.assets_to_limit_order =", "is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_cache & is_enough_ammount def compute_price_to_achieve(self,", ".utils import nan_to_zero from logging import getLogger from common_utils_svc import initialize_trader_logger, Position from", "X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"] ) return pred_dict def build_positive_and_negative_assets(self, pred_dict): # Set assets which", "if (exist_position.asset == order_asset) and ( exist_position.side == opposite_side ): return True return", "self.skip_executable_order_check is True: assert self.order_criterion == \"capital\" def _set_params(self): # Set params which", "0 # Main try: # Use timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\")", ">= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost = self.compute_cost_to_order(position=position) is_enough_cache =", "in positions: if self.last_entry_at[position.asset] is not None: self.last_entry_at[position.asset] = max( position.entry_at, self.last_entry_at[position.asset] )", "== position.side ): return True return False def check_if_executable_order(self, position): if self.skip_executable_order_check is", "features = self._build_features(pricing=pricing) inputs, ids = self._build_inputs(features=features) pred_dict = self.model.predict( X=inputs, id=ids, id_to_asset=self.dataset_builder_params[\"id_to_asset\"]", "(now - position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue", "pricing[asset] qty = cache_to_order / entry_price position = Position( asset=asset, side=side, qty=qty, entry_price=entry_price,", "[ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index() self.cached_pricing = pricing pricing", "amount=position.qty, ) if ordered is None: return self.last_entry_at[position.asset] = now if self.exit_if_achieved is", "if self.compound_interest is False: cache_to_order = self.entry_ratio else: if self.order_criterion == \"cache\": if", "has potential to be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing =", "+ commission[\"spread\"])) ) if position.side == \"short\": assert prediction <= 0 price_to_achieve =", "skip_executable_order_check = True # To prevent api limitation def __post_init__(self): self.custom_cli = CustomClient()", "position.is_exited is not True ] return positions def check_if_opposite_position_exists(self, positions, order_asset, order_side): if", "True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue if (position.side == \"short\") and (position.asset in", "= 0 elif self.order_criterion == \"capital\": # Entry with capital base cache_to_order =", "is True: assert self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins,", "= 0 # Main try: # Use timestamp without second info now =", "self.is_executable(last_sync_on=last_sync_on, now=now) is True: pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict)", ">= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins)", "= \"short\" if order_side == \"short\": opposite_side = \"long\" for exist_position in positions:", "order_asset) and ( exist_position.side == opposite_side ): return True return False def compute_cost_to_order(self,", "if self.exit_if_achieved is True: self.assets_to_limit_order.append(position.asset) logger.info(f\"[+] Entry: {str(position)}\") def handle_entry( self, positions, cache_to_order,", "as np from dataclasses import dataclass from config import CFG from trainer.models import", "self.positive_entry_bins = None self.negative_entry_bins = None self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins", "( entry_price * ( (prediction * self.achieve_ratio) + 1 + (commission[\"entry\"] + commission[\"spread\"])", "in CFG.DATASET_PARAMS[\"features_columns\"] ] self.dataset_builder_params[\"winsorize_threshold\"] = CFG.DATASET_PARAMS[ \"winsorize_threshold\" ] self.dataset_builder_params[\"base_feature_assets\"] = [ base_feature_asset.replace(\"-\", \"/\")", "assets which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins) & (pred_dict[\"probabilities\"] >=", "= self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >= self.negative_probability_bins) ] return positive_assets, negative_assets", "[] for target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values,", "= \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase = Usecase() possible_in_debt = False commission =", "entry_price = pricing[asset] qty = cache_to_order / entry_price position = Position( asset=asset, side=side,", "pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) # Record traded self.usecase.insert_trade({\"timestamp\": now}) self._store_last_entry_at() n_traded += 1", "Entry order if self.position_side in (\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions,", "compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"]", "] # Compute how much use cache to order cache_dict = self.custom_cli.get_cache_dict() capital", "= position.entry_price * position.qty commission_to_order = cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] )", "cache_to_order + commission_to_order def check_if_already_have(self, positions, position): for exist_position in positions: if (exist_position.asset", "trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1:", "order_side == \"long\": opposite_side = \"short\" if order_side == \"short\": opposite_side = \"long\"", "os.path.exists(LAST_ENTRY_AT_FILE_PATH): self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH) logger.info(f\"[O] Info: loaded last_entry_at\") else: self.last_entry_at = {key: None", "position.side == \"short\": assert prediction <= 0 price_to_achieve = ( entry_price * (", ") pricing = pd.concat( [ self.cached_pricing[ query_start_on : self.cached_pricing.index.levels[0][-2] ], pricing, ] ).sort_index()", "1: return True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset,", "self.custom_cli.entry_order( symbol=position.asset, order_type=\"market\", position=position.side, amount=position.qty, ) if ordered is None: return self.last_entry_at[position.asset] =", "position in positions if position.side == \"short\" ] # Compute how much use", "1) ) query_end_on = last_sync_on if self.cached_pricing is None: pricing = self.usecase.get_pricing( start_on=query_start_on,", "- position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue #", "+ commission_to_order def check_if_already_have(self, positions, position): for exist_position in positions: if (exist_position.asset ==", "key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self): if CFG.TEST_MODE is True: assert self.custom_cli.test_mode", "== \"long\" ] short_positions = [ position for position in positions if position.side", "= {key: None for key in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for", "initialized order books\") def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features(", "\"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self): self.model =", "price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio) + 1 - (commission[\"entry\"]", "self.negative_entry_threshold = CFG.REPORT_PARAMS[\"negative_entry_threshold\"] self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold =", "for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists,", "to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids = [", "CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0 assert self.max_n_updated ==", "in negative_assets): continue position_entry_at = self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() // 60", "api limitation def __post_init__(self): self.custom_cli = CustomClient() self.tradable_coins = pd.Index(self.custom_cli.tradable_coins) self._set_params() self._set_test_params() self._set_bins(", "usecase = Usecase() possible_in_debt = False commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\":", "# Main try: # Use timestamp without second info now = pd.Timestamp.utcnow().floor(\"T\") last_sync_on", "] ).sort_index() self.cached_pricing = pricing pricing = pricing.unstack().swaplevel(0, 1, axis=1) features = self._build_features(pricing=pricing)", "CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0 assert self.max_n_updated == 0 self.positive_entry_threshold =", "True return False def check_if_executable_order(self, position): if self.skip_executable_order_check is True: is_enough_ammount = bool(", "assert prediction <= 0 price_to_achieve = ( entry_price * ( (prediction * self.achieve_ratio)", "Delete exited positions positions = [ position for position in positions if position.is_exited", "pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, ) #", "= { value: key.replace(\"-\", \"/\") for key, value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } def _set_test_params(self):", "entry_price=position.entry_price ), ) logger.info(f\"[O] Info: initialized order books\") def _build_features(self, pricing): features, class_features", "& is_enough_ammount def compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is not None: prediction", "pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) ) query_end_on = last_sync_on if self.cached_pricing is", "positions: if position.asset not in self.assets_to_limit_order: continue assert position.entry_price != 0.0 self.custom_cli.exit_order( symbol=position.asset,", "== \"short\": prediction = self.negative_entry_bins[position.asset] commission = self.commission if self.achieved_with_commission is not True:", "pricing, now): if cache_to_order == 0: return # if opposite position exists, we", "key in self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if", "= CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O] Info: leverage", "self.custom_cli.test_mode is True self.entry_ratio = 0.0001 def _set_bins(self, prediction_abs_bins, probability_bins, index): assert (prediction_abs_bins", "positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already limit order exists, we skip it.", "False commission = {\"entry\": 0.0004, \"exit\": 0.0002, \"spread\": 0.0004} skip_executable_order_check = True #", "] return inputs, ids def build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320", "features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return pd.concat([features, class_features], axis=1)[ self.dataset_builder_params[\"features_columns\"] ].sort_index() def", "= self.last_entry_at[position.asset] passed_minutes = (now - position_entry_at).total_seconds() // 60 # Handle min_holding_minutes if", "else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if \"*\" in self.negative_probability_threshold:", "from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class", "self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited = True", "_build_model(self): self.model = PredictorV1( exp_dir=CFG.EXP_DIR, m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at,", "position.entry_at, self.last_entry_at[position.asset] ) else: self.last_entry_at[position.asset] = position.entry_at def _initialize_order_books(self): positions = self.custom_cli.get_position_objects(with_entry_at=False) for", "(prediction * self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"]) ) / (1 +", "{str(position)}, opposite\") continue if (position.side == \"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited", "value in CFG.EXP_PARAMS[\"asset_to_id\"].items() } self.dataset_builder_params[\"id_to_asset\"] = { value: key.replace(\"-\", \"/\") for key, value", "else: time.sleep(0.1) except Exception as e: logger.error(\"[!] Error: \", exc_info=True) raise Exception if", "): # Set init to handle limit order self.assets_to_limit_order = [] # Entry", "build_prediction_dict(self, last_sync_on): query_start_on = last_sync_on - pd.Timedelta( minutes=(1320 + CFG.EXP_MODEL_PARAMS[\"lookback_window\"] - 1) )", "True return False def exit_order(self, position): self.custom_cli.cancel_orders(symbol=position.asset) ordered = self.custom_cli.exit_order( symbol=position.asset, order_type=\"market\", position=position.side,", "<= self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes: self.exit_order(position=position) positions[position_idx].is_exited =", "0.0 self.custom_cli.exit_order( symbol=position.asset, order_type=\"limit\", position=position.side, amount=position.qty, price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def", "= prediction_abs_bins.loc[ self.positive_entry_threshold ][index] if isinstance(self.negative_entry_threshold, str): if \"*\" in self.negative_entry_threshold: self.negative_entry_bins =", "entry_price * ( (prediction * self.achieve_ratio) + 1 - (commission[\"entry\"] + commission[\"spread\"]) )", "# Limit order if len(self.assets_to_limit_order) > 0: positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in", "self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS self.probability_bins = CFG.PROBABILITY_BINS # Set data builder params self.dataset_builder_params =", ") long_positions = [ position for position in positions if position.side == \"long\"", "for order_asset in negative_assets: self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) #", "commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"] = 0 if position.side == \"long\":", "CustomClient() n_traded = 0 # Main try: # Use timestamp without second info", "now): for position_idx, position in enumerate(positions): # Keep position if matched if (position.side", "self.commission if self.achieved_with_commission is not True: commission[\"entry\"] = 0 commission[\"exit\"] = 0 commission[\"spread\"]", "price=self.compute_price_to_achieve( position=position, entry_price=position.entry_price, predictions=predictions, ), ) def run(self): logger.info(f\"[O] Start: demon of trader\")", "is None: assert CFG.TEST_MODE is True return def handle_exit(self, positions, positive_assets, negative_assets, now):", "bool((cache - cost) >= 0) is_enough_ammount = bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return", "cache_to_order * ( self.commission[\"entry\"] + self.commission[\"spread\"] ) return cache_to_order + commission_to_order def check_if_already_have(self,", "(1 - (commission[\"exit\"] + commission[\"spread\"])) ) if position.side == \"short\": assert prediction <=", "1 - (commission[\"entry\"] + commission[\"spread\"]) ) / (1 + (commission[\"exit\"] + commission[\"spread\"])) )", "books\") def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"],", "order if self.position_side in (\"long\", \"longshort\"): for order_asset in positive_assets: self.entry_order( positions=positions, asset=order_asset,", "capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}),", "from dataset_builder.build_dataset import DatasetBuilder from trainer.datasets.dataset import build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH", "already_have is True: self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position) if executable_order is", "[ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin in self.tradable_coins ] return inputs, ids def build_prediction_dict(self, last_sync_on):", "import gc import time import ccxt import requests import urllib3 import joblib import", "import PredictorV1 from database.usecase import Usecase from exchange.custom_client import CustomClient from .utils import", "min_holding_minutes if passed_minutes <= self.min_holding_minutes: continue # Handle max_holding_minutes if passed_minutes >= self.max_holding_minutes:", ") self._build_dataset_builder() self._build_model() self._load_last_entry_at() self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is True: assert", ") inputs = [] for target_coin in self.tradable_coins: to_input = pd.concat([base_features, features[target_coin]], axis=1)", "else: self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index] if isinstance(self.positive_probability_threshold, str): if \"*\" in self.positive_probability_threshold: self.positive_probability_bins =", "positions = self.custom_cli.get_position_objects(with_entry_at=False) for position in positions: orders = self.custom_cli.get_open_orders(symbol=position.asset) # When already", "and ( exist_position.side == position.side ): return True return False def check_if_executable_order(self, position):", "self._initialize_order_books() self.cached_pricing = None if self.skip_executable_order_check is True: assert self.order_criterion == \"capital\" def", "in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited = True logger.info(f\"[-] Exit: {str(position)}, opposite\") continue # Delete", "= self.commission if self.achieved_with_commission is not True: commission[\"entry\"] = 0 commission[\"exit\"] = 0", "check_if_opposite_position_exists(self, positions, order_asset, order_side): if order_side == \"long\": opposite_side = \"short\" if order_side", "positions positions = [ position for position in positions if position.is_exited is not", "order books\") def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features,", "if position.side == \"long\": assert prediction >= 0 price_to_achieve = ( entry_price *", "handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx, position in enumerate(positions): # Keep position", "nan_to_zero from logging import getLogger from common_utils_svc import initialize_trader_logger, Position from dataset_builder.build_dataset import", "0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0) ids = [ self.dataset_builder_params[\"asset_to_id\"][target_coin] for target_coin", "entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"], now=now, )", "self, positions, cache_to_order, positive_assets, negative_assets, pricing, predictions, now, ): # Set init to", "_build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features = self.dataset_builder.preprocess_features( features=features, winsorize_threshold=self.dataset_builder_params[\"winsorize_threshold\"], ) return", "exit signal if (position.side == \"long\") and (position.asset in negative_assets): self.exit_order(position=position) positions[position_idx].is_exited =", "self.custom_cli = CustomClient() n_traded = 0 # Main try: # Use timestamp without", "None self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str):", "Info: initialized order books\") def _build_features(self, pricing): features, class_features = self.dataset_builder.build_features(rawdata=pricing) features =", "be changed. pricing = self.usecase.get_pricing( start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on ) pricing = pd.concat( [ self.cached_pricing[", "positions if position.is_exited is not True ] return positions def check_if_opposite_position_exists(self, positions, order_asset,", ")[index] else: self.negative_entry_bins = -prediction_abs_bins.loc[ self.negative_entry_threshold ][index] if isinstance(self.exit_threshold, str): if \"*\" in", "positions = self.custom_cli.get_position_objects( with_entry_at=False ) positions = self.handle_exit( positions=positions, positive_assets=positive_assets, negative_assets=negative_assets, now=now, )", "pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on) ( positive_assets, negative_assets, ) = self.build_positive_and_negative_assets(pred_dict=pred_dict) # Handle exit positions", "self.tradable_coins} # Initialize positions = self.custom_cli.get_position_objects(with_entry_at=True) for position in positions: if self.last_entry_at[position.asset] is", "def handle_exit(self, positions, positive_assets, negative_assets, now): for position_idx, position in enumerate(positions): # Keep", "= None self.positive_probability_bins = None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold, str): if \"*\"", "position): for exist_position in positions: if (exist_position.asset == position.asset) and ( exist_position.side ==", "): return True return False def compute_cost_to_order(self, position): cache_to_order = position.entry_price * position.qty", "build_X_and_BX logger = getLogger(\"trader\") initialize_trader_logger() LAST_ENTRY_AT_FILE_PATH = \"/app/storage/trader/last_entry_at.pkl\" @dataclass class TraderV1: usecase =", "entry if ( self.check_if_opposite_position_exists( positions=positions, order_asset=asset, order_side=side ) is True ): return entry_price", "m_config=CFG.EXP_MODEL_PARAMS, d_config=CFG.EXP_DATA_PARAMS, device=\"cpu\", mode=\"predict\", ) def _store_last_entry_at(self): joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH) def _load_last_entry_at(self): if os.path.exists(LAST_ENTRY_AT_FILE_PATH):", "self.entry_order( positions=positions, asset=order_asset, side=\"short\", cache_to_order=cache_to_order, pricing=pricing, now=now, ) # Limit order if len(self.assets_to_limit_order)", "\"*\" in self.positive_probability_threshold: self.positive_probability_bins = ( probability_bins.loc[ int(self.positive_probability_threshold.split(\"*\")[0]) ] * float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else:", "= CFG.REPORT_PARAMS[\"achieved_with_commission\"] self.max_n_updated = CFG.REPORT_PARAMS[\"max_n_updated\"] # Currently we accept only 0 assert self.max_n_updated", "int(self.negative_probability_threshold.split(\"*\")[0]) ] * float(self.negative_probability_threshold.split(\"*\")[-1]) )[index] else: self.negative_probability_bins = probability_bins.loc[ self.negative_probability_threshold ][index] def _build_dataset_builder(self):", "\"short\" ] # Compute how much use cache to order cache_dict = self.custom_cli.get_cache_dict()", "# Handle entry pricing = self.custom_cli.get_last_pricing() self.handle_entry( positions=positions, cache_to_order=cache_to_order, positive_assets=positive_assets, negative_assets=negative_assets, pricing=pricing, predictions=pred_dict[\"predictions\"],", "self.adjust_prediction = CFG.REPORT_PARAMS[\"adjust_prediction\"] # Currently we accept False adjust_prediction assert self.adjust_prediction is False", "position in positions if position.is_exited is not True ] return positions def check_if_opposite_position_exists(self,", "pred_dict): # Set assets which has signals positive_assets = self.tradable_coins[ (pred_dict[\"predictions\"] >= self.positive_entry_bins)", "opposite\") continue if (position.side == \"short\") and (position.asset in positive_assets): self.exit_order(position=position) positions[position_idx].is_exited =", "0 commission[\"spread\"] = 0 if position.side == \"long\": assert prediction >= 0 price_to_achieve", "self.exit_threshold = CFG.REPORT_PARAMS[\"exit_threshold\"] self.positive_probability_threshold = CFG.REPORT_PARAMS[ \"positive_probability_threshold\" ] self.negative_probability_threshold = CFG.REPORT_PARAMS[ \"negative_probability_threshold\" ]", "= joblib.load(os.path.join(CFG.EXP_DIR, \"label_scaler.pkl\")) self.dataset_builder = DatasetBuilder( tradable_coins=self.tradable_coins, features_columns=self.dataset_builder_params[\"features_columns\"], feature_scaler=feature_scaler, label_scaler=label_scaler, ) def _build_model(self):", "return price_to_achieve def entry_order(self, positions, asset, side, cache_to_order, pricing, now): if cache_to_order ==", "(pred_dict[\"probabilities\"] >= self.positive_probability_bins) ] negative_assets = self.tradable_coins[ (pred_dict[\"predictions\"] <= self.negative_entry_bins) & (pred_dict[\"probabilities\"] >=", "compute_price_to_achieve(self, position, entry_price, predictions=None): if predictions is not None: prediction = predictions[position.asset] else:", "index): assert (prediction_abs_bins >= 0).all().all() assert (probability_bins >= 0).all().all() self.positive_entry_bins = None self.negative_entry_bins", "logic self.base_currency = CFG.REPORT_PARAMS[\"base_currency\"] self.position_side = CFG.REPORT_PARAMS[\"position_side\"] self.entry_ratio = CFG.REPORT_PARAMS[\"entry_ratio\"] * CFG.LEVERAGE logger.info(f\"[O]", "= bool( position.qty >= self.custom_cli.ammount_constraints[position.asset] ) return is_enough_ammount cache = self.custom_cli.get_cache_dict()[\"free\"] cost =", "self.custom_cli.get_cache_dict() capital = cache_dict[\"total\"] cache = cache_dict[\"free\"] logger.info( f\"[_] Capital: {capital:.2f}$ | Holds:", "pd.concat([base_features, features[target_coin]], axis=1) to_input = np.swapaxes(to_input.values, 0, 1) inputs.append(to_input) inputs = np.stack(inputs, axis=0)", "* float(self.positive_probability_threshold.split(\"*\")[-1]) )[index] else: self.positive_probability_bins = probability_bins.loc[ self.positive_probability_threshold ][index] if isinstance(self.negative_probability_threshold, str): if", "True: self.last_entry_at[position.asset] = now return executable_order = self.check_if_executable_order(position=position) if executable_order is True: ordered", "= None self.exit_bins = None self.positive_probability_bins = None self.negative_probability_bins = None if isinstance(self.positive_entry_threshold,", "import CFG from trainer.models import PredictorV1 from database.usecase import Usecase from exchange.custom_client import" ]
[ "-*- \"\"\"Tests for Truthcoin's consensus functions. Verifies that the consensus algorithm works as", "os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase):", "scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2,", "import numpy.ma as ma if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else: import", "0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del self.votes if __name__ == \"__main__\": suite", "self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def", "unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3,", "0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1},", "[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0,", "consensus algorithm works as expected. Check test_answers.txt for expected results. \"\"\" from __future__", "consensus functions. Verifies that the consensus algorithm works as expected. Check test_answers.txt for", "0, 1, 1], [0, 0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def", "0, \"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\": 1}, ] outcome = consensus.Factory(self.votes,", "outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True,", "0, 0], [1, 1, 1, 0], [0, 0, 1, 1], [0, 0, 1,", "< \"2.7\": unittest = __import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE,", "]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11)", "[ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7},", "import numpy as np import numpy.ma as ma if platform.python_version() < \"2.7\": unittest", "0, \"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self):", "Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del self.votes if __name__ ==", "# -*- coding: utf-8 -*- \"\"\"Tests for Truthcoin's consensus functions. Verifies that the", "\"2.7\": unittest = __import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir))", "for Truthcoin's consensus functions. Verifies that the consensus algorithm works as expected. Check", "{\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\":", "as expected. Check test_answers.txt for expected results. \"\"\" from __future__ import division, unicode_literals,", "0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 1, 1],", "self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1, \"max\":", "False, \"min\": 0, \"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11)", "import platform import json import numpy as np import numpy.ma as ma if", "\"min\": 0, \"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def", "1, 1, 0], [0, 0, 1, 1], [0, 0, 1, 1], ]) self.votes", "= consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del self.votes if", "__future__ import division, unicode_literals, absolute_import import os import sys import platform import json", "\"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\":", "functions. Verifies that the consensus algorithm works as expected. Check test_answers.txt for expected", "import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome,", "= np.array([ [1, 1, 0, 0], [1, 0, 0, 0], [1, 1, 0,", "unicode_literals, absolute_import import os import sys import platform import json import numpy as", "places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\":", "works as expected. Check test_answers.txt for expected results. \"\"\" from __future__ import division,", "True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False,", "algorithm works as expected. Check test_answers.txt for expected results. \"\"\" from __future__ import", "0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\": 1},", "ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params", "import division, unicode_literals, absolute_import import os import sys import platform import json import", "0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0,", "sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def", "\"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del", "1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"],", "= [ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\":", "else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o):", "-*- coding: utf-8 -*- \"\"\"Tests for Truthcoin's consensus functions. Verifies that the consensus", "\"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params)", "1, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1,", "expected results. \"\"\" from __future__ import division, unicode_literals, absolute_import import os import sys", "python # -*- coding: utf-8 -*- \"\"\"Tests for Truthcoin's consensus functions. Verifies that", "{\"scaled\": False, \"min\": 0, \"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804,", "numpy.ma as ma if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else: import unittest", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"Tests for Truthcoin's consensus functions. Verifies", "0], [1, 1, 1, 0], [0, 0, 1, 1], [0, 0, 1, 1],", "results. \"\"\" from __future__ import division, unicode_literals, absolute_import import os import sys import", "1, 0, 0], [1, 1, 1, 0], [0, 0, 1, 1], [0, 0,", "[1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 1, 1], [0,", "1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked", "setUp(self): self.votes_unmasked = np.array([ [1, 1, 0, 0], [1, 0, 0, 0], [1,", "tearDown(self): del self.votes_unmasked del self.votes if __name__ == \"__main__\": suite = unittest.TestLoader().loadTestsFromTestCase(TestConsensus) unittest.TextTestRunner(verbosity=2).run(suite)", "consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del self.votes if __name__", "numpy as np import numpy.ma as ma if platform.python_version() < \"2.7\": unittest =", "\"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\": 1}, ] outcome =", "self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del self.votes if __name__ == \"__main__\":", "{\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\":", "\"\"\" from __future__ import division, unicode_literals, absolute_import import os import sys import platform", "def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1,", "os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked", "import sys import platform import json import numpy as np import numpy.ma as", "np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params =", "[0, 0, 1, 1], [0, 0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked))", "1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613,", "json import numpy as np import numpy.ma as ma if platform.python_version() < \"2.7\":", "= os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class", "] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del", "0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],", "from __future__ import division, unicode_literals, absolute_import import os import sys import platform import", "prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1, 1,", "sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1, 1, 0, 0], [1,", "1], [0, 0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome", "HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True))", "utf-8 -*- \"\"\"Tests for Truthcoin's consensus functions. Verifies that the consensus algorithm works", "[1, 1, 1, 0], [0, 0, 1, 1], [0, 0, 1, 1], ])", "absolute_import import os import sys import platform import json import numpy as np", "def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True,", "0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes)", "indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1, 1, 0, 0],", "outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"], 0.618113325804, places=11) def tearDown(self): del self.votes_unmasked del self.votes", "platform import json import numpy as np import numpy.ma as ma if platform.python_version()", "0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0,", "0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5},", "that the consensus algorithm works as expected. Check test_answers.txt for expected results. \"\"\"", "os import sys import platform import json import numpy as np import numpy.ma", "division, unicode_literals, absolute_import import os import sys import platform import json import numpy", "ma if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else: import unittest HERE =", "unittest = __import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import", "True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\": False,", "\"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\":", "sys import platform import json import numpy as np import numpy.ma as ma", "coding: utf-8 -*- \"\"\"Tests for Truthcoin's consensus functions. Verifies that the consensus algorithm", "Truthcoin's consensus functions. Verifies that the consensus algorithm works as expected. Check test_answers.txt", "1, 1], [0, 0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self):", "def tearDown(self): del self.votes_unmasked del self.votes if __name__ == \"__main__\": suite = unittest.TestLoader().loadTestsFromTestCase(TestConsensus)", "0, 0], [1, 1, 0, 0], [1, 1, 1, 0], [0, 0, 1,", "import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked =", "test_answers.txt for expected results. \"\"\" from __future__ import division, unicode_literals, absolute_import import os", "the consensus algorithm works as expected. Check test_answers.txt for expected results. \"\"\" from", "self.votes_unmasked = np.array([ [1, 1, 0, 0], [1, 0, 0, 0], [1, 1,", "\"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\":", "print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1, 1, 0,", "class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1, 1, 0, 0], [1, 0,", "as ma if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else: import unittest HERE", "def setUp(self): self.votes_unmasked = np.array([ [1, 1, 0, 0], [1, 0, 0, 0],", "[1, 1, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1,", "False, \"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\": 1}, ] outcome", "expected. Check test_answers.txt for expected results. \"\"\" from __future__ import division, unicode_literals, absolute_import", "platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0,", "= ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self):", "1}, {\"scaled\": False, \"min\": 0, \"max\": 1}, ] outcome = consensus.Factory(self.votes, Scales=scalar_decision_params) self.assertAlmostEquals(outcome[\"Certainty\"],", "1, 0], [0, 0, 1, 1], [0, 0, 1, 1], ]) self.votes =", "0], [0, 0, 1, 1], [0, 0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked,", "consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1,", "Check test_answers.txt for expected results. \"\"\" from __future__ import division, unicode_literals, absolute_import import", "0, 0], [1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1,", "places=11) def tearDown(self): del self.votes_unmasked del self.votes if __name__ == \"__main__\": suite =", "__import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus def", "os.path.join(HERE, os.pardir)) import consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self):", "{\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\": 0, \"max\": 1}, ]", "consensus def prp(o): print(json.dumps(outcome, indent=3, sort_keys=True)) class TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([", "test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\":", "Verifies that the consensus algorithm works as expected. Check test_answers.txt for expected results.", "\"min\": 0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\":", "0.2, \"max\": 0.7}, {\"scaled\": False, \"min\": 0, \"max\": 1}, {\"scaled\": False, \"min\": 0,", "= consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\":", "\"\"\"Tests for Truthcoin's consensus functions. Verifies that the consensus algorithm works as expected.", "as np import numpy.ma as ma if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\")", "for expected results. \"\"\" from __future__ import division, unicode_literals, absolute_import import os import", "def test_Factory(self): outcome = consensus.Factory(self.votes) self.assertAlmostEquals(outcome[\"Certainty\"], 0.228237569613, places=11) def test_Factory_scaled(self): scalar_decision_params = [", "import json import numpy as np import numpy.ma as ma if platform.python_version() <", "TestConsensus(unittest.TestCase): def setUp(self): self.votes_unmasked = np.array([ [1, 1, 0, 0], [1, 0, 0,", "import os import sys import platform import json import numpy as np import", "= __import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(HERE, os.pardir)) import consensus", "[0, 0, 1, 1], ]) self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked)) def test_Factory(self): outcome =", "np import numpy.ma as ma if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else:", "if platform.python_version() < \"2.7\": unittest = __import__(\"unittest2\") else: import unittest HERE = os.path.dirname(os.path.realpath(__file__))", "test_Factory_scaled(self): scalar_decision_params = [ {\"scaled\": True, \"min\": 0.1, \"max\": 0.5}, {\"scaled\": True, \"min\":", "np.array([ [1, 1, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0]," ]
[ "\"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i = InterestFactory() i.save()", "\"\"\" We can search interests \"\"\" Interest.objects.delete() for i in range(3): interest =", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for", "for moderator response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id':", "prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data,", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "\"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u", "interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data =", "data in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def", "data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self):", "= LogEntry.objects.count() i = InterestFactory() i.save() auth_url = prepare_url('login') data = { 'username':", "for non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data", "url = prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': True,", "u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data =", "\"\"\" we can update interest \"\"\" cities = map(lambda x: str(CityFactory().id), range(3)) interest", "def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for non-staff users \"\"\" u =", "self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can activate interest through API \"\"\" u", "url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']),", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertFalse(i.is_active)", "\"\"\" Resourse is available with authentication only and for staff \"\"\" u =", "log_n = LogEntry.objects.count() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>'", "with api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) inter =", "= self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self):", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None)", "self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available with authentication only", "\"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u", "prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities': [],", "staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = {", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest", "self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for non-staff users \"\"\"", "self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can", "token = response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "= User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role", "= self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2)", "else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that we can get only children", "u.role = User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{}", "for data in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0)", "data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)})", "self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can update interest \"\"\" cities = map(lambda", "str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that we can", "format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that we", "self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=True) url", "'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def", "self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role = User.ROOT u.save()", "status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search interests \"\"\" Interest.objects.delete() for i in", "self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we", "response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest name', 'parent_id':", "i = InterestFactory() i.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password':", "name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can update interest \"\"\" cities =", "data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok", "kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i =", "UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = { 'title':", "InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url =", "inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data =", "= Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list')", "( UserFactory, InterestFactory, CityFactory, ) from happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self):", "only and for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login')", "Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n)", "data = { 'username': u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json')", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self):", "self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that we can get only", "token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json')", "\"\"\" we can create interest \"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data =", "'is_global': False, 'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "= self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role = User.ADMINISTRATOR", "'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can delete interest \"\"\" u", "token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json')", "rest_framework import status from rest_framework.test import APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models", "token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest", "we can get only children with api \"\"\" Interest.objects.delete() for i in range(3):", "u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data = { 'username': u.username,", "self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that we can get", "InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data", "self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def", "= self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search interests \"\"\"", "\"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username':", "prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\"", "name', 'parent_id': None, 'is_global': True, 'local_cities': [], } auth_url = prepare_url('login') data =", "we can get only categories with api \"\"\" Interest.objects.delete() for i in range(3):", "= InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url,", "APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models import User, Interest, LogEntry from happ.factories", "= UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password':", "test_activate(self): \"\"\" we can activate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id)", "token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)", "'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url =", "prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'],", "format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can activate", "Ensure that we can get only children with api \"\"\" Interest.objects.delete() for i", "format='json') token = response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{}", "n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can update interest", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)})", "we can activate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n", "import status from rest_framework.test import APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models import", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id':", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertFalse(i.is_active) self.assertEqual(LogEntry.objects.count(),", "= User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "i in range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "= prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } # restricted for", "\"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username':", "1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that we can get only", "self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can", "str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def test_deactivate(self): \"\"\" we can deactivate interest through API \"\"\" u = UserFactory(role=User.MODERATOR)", "def test_get_without_auth(self): \"\"\" Resourse is not available without authentication \"\"\" url = prepare_url('admin-interests-list')", "happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not available without", "for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data =", "{ 'title': 'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities': cities, } n =", "prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': False,", "response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\"", "url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def", "= self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure", "Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u =", "data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = {", "format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role = User.ADMINISTRATOR u.save() response =", "response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)", "i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "False, 'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url,", "= prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is", "\"\"\" we can delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n =", "LogEntry.objects.count() i = InterestFactory() i.save() auth_url = prepare_url('login') data = { 'username': u.username,", "url = prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse", "self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role = User.ADMINISTRATOR u.save()", "'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i =", "token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertFalse(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1)", "can deactivate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n =", "status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1)", "= prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3)", "format='json') token = response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "name', 'parent_id': None, 'is_global': False, 'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{}", "response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role =", "Ensure that we can get only categories with api \"\"\" Interest.objects.delete() for i", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list',", "token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "can update interest \"\"\" cities = map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory()", "interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url =", "self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can create interest", "'username': u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token']", "prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not", "response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if", "{ 'username': u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token =", "prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json')", "= response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url,", "= prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url,", "for administrator u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json') token =", "self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) #", "range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url", "None) def test_activate(self): \"\"\" we can activate interest through API \"\"\" u =", "1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can activate interest through API \"\"\"", "happ.factories import ( UserFactory, InterestFactory, CityFactory, ) from happ.tests import * class Tests(APISimpleTestCase):", "LogEntry.objects.count() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } response", "is not available without authentication \"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url, format='json')", "administrator u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token']", "token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available", "from rest_framework_jwt.settings import api_settings from happ.models import User, Interest, LogEntry from happ.factories import", "interest \"\"\" cities = map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory() u =", "interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data", "with authentication only and for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url", "data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\"", "u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i = InterestFactory() i.save() auth_url", "status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can", "map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n", "= response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code,", "InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = {", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n", "can activate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n =", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can activate interest", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate',", "data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json')", "self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\"", "\"\"\" Resourse is not available for non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>')", "status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available with authentication only and for staff", "interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i = InterestFactory()", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=False) url", "response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "restricted for moderator response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "only children with api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None)", "self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if data['id'] ==", "response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1)", "User, Interest, LogEntry from happ.factories import ( UserFactory, InterestFactory, CityFactory, ) from happ.tests", "= UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i = InterestFactory() i.save() auth_url =", "prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } # restricted for moderator", "def test_delete_interest(self): \"\"\" we can delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self):", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def", "None, 'is_global': False, 'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'],", "for root u.role = User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json') token =", "= prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1)", "interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url =", "authentication only and for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url =", "i.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } response", "get only categories with api \"\"\" Interest.objects.delete() for i in range(3): interest =", "we can deactivate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n", "} # restricted for moderator response = self.client.post(auth_url, data=data, format='json') token = response.data['token']", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id':", "data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role = User.ADMINISTRATOR u.save() response", "test_deactivate(self): \"\"\" we can deactivate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "x: str(CityFactory().id), range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n =", "n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url =", "} auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } #", "token = response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\"", "= prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self):", "data = { 'username': u.username, 'password': '<PASSWORD>' } # restricted for moderator response", "\"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\"", "i in range(3): interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u =", "from rest_framework import status from rest_framework.test import APISimpleTestCase from rest_framework_jwt.settings import api_settings from", "{ 'title': 'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities': [], } auth_url =", ") from happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not", "search interests \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(title='Hockey') interest.save() interest", "data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities': cities, }", "\"\"\" Ensure that we can get only children with api \"\"\" Interest.objects.delete() for", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\"", "= { 'title': 'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities': [], } auth_url", "interest_data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities': [], }", "'is_global': True, 'local_cities': [], } auth_url = prepare_url('login') data = { 'username': u.username,", "authentication \"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self):", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'],", "= prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url,", "is not available for non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url", "token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name')", "Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data", "def test_search_interests(self): \"\"\" We can search interests \"\"\" Interest.objects.delete() for i in range(3):", "url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code,", "ok for administrator u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json') token", "api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest)", "0) def test_get_children(self): \"\"\" Ensure that we can get only children with api", "import ( UserFactory, InterestFactory, CityFactory, ) from happ.tests import * class Tests(APISimpleTestCase): def", "3) for data in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']),", "True, 'local_cities': [], } auth_url = prepare_url('login') data = { 'username': u.username, 'password':", "without authentication \"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def", "format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search interests \"\"\" Interest.objects.delete() for", "self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can deactivate interest through API \"\"\" u", "in range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url =", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-children')", "# ok for administrator u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json')", "= response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) #", "range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url", "'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{}", "LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global':", "test_get_categories(self): \"\"\" Ensure that we can get only categories with api \"\"\" Interest.objects.delete()", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest name', 'parent_id': None,", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(),", "cities = map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data = { 'username':", "= prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global':", "interest \"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count()", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{}", "token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name')", "available for non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login')", "for i in range(3): interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u", "moderator response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i", "we can delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count()", "url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code,", "# ok for root u.role = User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json')", "UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i = InterestFactory() i.save() auth_url = prepare_url('login')", "u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest", "Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can deactivate interest through API", "in range(3): interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR)", "[], } auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' }", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def", "None, 'is_global': True, 'local_cities': [], } auth_url = prepare_url('login') data = { 'username':", "LogEntry from happ.factories import ( UserFactory, InterestFactory, CityFactory, ) from happ.tests import *", "response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\"", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We", "response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)", "root u.role = User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token']", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "is available with authentication only and for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-categories')", "data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count()", "from happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not available", "test_get_children(self): \"\"\" Ensure that we can get only children with api \"\"\" Interest.objects.delete()", "u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i = InterestFactory() i.save() auth_url = prepare_url('login') data", "delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i =", "u.username, 'password': '<PASSWORD>' } # restricted for moderator response = self.client.post(auth_url, data=data, format='json')", "u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i", "response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available", "self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role = User.ROOT u.save() response = self.client.post(auth_url,", "get only children with api \"\"\" Interest.objects.delete() for i in range(3): interest =", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator", "n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that we can get only categories", "that we can get only categories with api \"\"\" Interest.objects.delete() for i in", "UserFactory, InterestFactory, CityFactory, ) from happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\"", "= response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(),", "\"\"\" cities = map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR)", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest", "log_n+1) def test_delete_interest(self): \"\"\" we can delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)})", "format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can create interest \"\"\"", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def", "We can search interests \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(title='Hockey')", "url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def", "self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can create interest \"\"\" n", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'],", "= InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login')", "UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>'", "self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok for administrator u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url,", "Interest.objects.delete() for i in range(3): interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save()", "format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available with authentication only and", "format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we", "ok for root u.role = User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json') token", "format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\"", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)})", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list', query={'search':", "token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self):", "from happ.models import User, Interest, LogEntry from happ.factories import ( UserFactory, InterestFactory, CityFactory,", "interest.save() interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login')", "'parent_id': None, 'is_global': True, 'local_cities': [], } auth_url = prepare_url('login') data = {", "i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can deactivate interest", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data", "= self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we", "= InterestFactory() i.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>'", "n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role = User.ROOT", "status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that we can get", "UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>'", "self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can update interest \"\"\"", "response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for non-staff", "format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code,", "self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else:", "self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can delete interest \"\"\"", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code,", "auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } response =", "log_n+2) def test_update_interest(self): \"\"\" we can update interest \"\"\" cities = map(lambda x:", "{ 'username': u.username, 'password': '<PASSWORD>' } # restricted for moderator response = self.client.post(auth_url,", "format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title':", "response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available with", "response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(),", "status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can create interest \"\"\" n =", "= prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "categories with api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) inter", "data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search interests", "available with authentication only and for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "update interest \"\"\" cities = map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory() u", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=False)", "Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u =", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "'<PASSWORD>' } # restricted for moderator response = self.client.post(auth_url, data=data, format='json') token =", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'})", "= Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(),", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse", "token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json')", "i in range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "def test_activate(self): \"\"\" we can activate interest through API \"\"\" u = UserFactory(role=User.MODERATOR)", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail',", "children with api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) interest", "str(CityFactory().id), range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count()", "\"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer')", "self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can delete", "test_search_interests(self): \"\"\" We can search interests \"\"\" Interest.objects.delete() for i in range(3): interest", "= Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can deactivate interest through", "User.ROOT u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "we can create interest \"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest)", "data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can delete interest \"\"\" u = UserFactory(role=User.MODERATOR)", "response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data = { 'username': u.username, 'password':", "in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self):", "str(interest.id)}) data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities': cities,", "interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url =", "\"\"\" we can deactivate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json')", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest", "= response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code,", "only categories with api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None)", "== str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that we", "can delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() i", "def test_update_interest(self): \"\"\" we can update interest \"\"\" cities = map(lambda x: str(CityFactory().id),", "we can update interest \"\"\" cities = map(lambda x: str(CityFactory().id), range(3)) interest =", "= InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url,", "CityFactory, ) from happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is", "status.HTTP_403_FORBIDDEN) # ok for administrator u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data,", "= UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data = {", "in range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "from happ.factories import ( UserFactory, InterestFactory, CityFactory, ) from happ.tests import * class", "token = response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role =", "InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json')", "u.role = User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{}", "Resourse is not available for non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save()", "self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search interests \"\"\" Interest.objects.delete() for i", "# restricted for moderator response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{}", "format='json') token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url,", "def test_get_with_auth(self): \"\"\" Resourse is available with authentication only and for staff \"\"\"", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data,", "status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data =", "for i in range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR)", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active)", "= { 'username': u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token", "self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can create interest \"\"\" n = Interest.objects.count()", "= prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self):", "status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can", "= UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password':", "test_create_interest(self): \"\"\" we can create interest \"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR)", "format='json') token = response.data['token'] url = prepare_url('admin-interests-list') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url,", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n =", "from rest_framework.test import APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models import User, Interest,", "= response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "= Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1)", "u.save() log_n = LogEntry.objects.count() i = InterestFactory() i.save() auth_url = prepare_url('login') data =", "def test_get_children(self): \"\"\" Ensure that we can get only children with api \"\"\"", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "import APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models import User, Interest, LogEntry from", "= self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available with authentication", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data", "'title': 'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities': [], } auth_url = prepare_url('login')", "import api_settings from happ.models import User, Interest, LogEntry from happ.factories import ( UserFactory,", "i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can", "format='json') token = response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{}", "= self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if data['id']", "= self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for", "Resourse is not available without authentication \"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url,", "= LogEntry.objects.count() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' }", "response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\"", "status from rest_framework.test import APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models import User,", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=False) url =", "not available for non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url =", "'parent_id': None, 'is_global': False, 'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if data['id'] == str(interest.id): self.assertEqual(len(data['children']),", "= InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login')", "self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "activate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count()", "= InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url", "can get only categories with api \"\"\" Interest.objects.delete() for i in range(3): interest", "prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i", "response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can", "can create interest \"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n", "for i in range(3): interest = InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR)", "create interest \"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n =", "InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username':", "url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code,", "'local_cities': [], } auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>'", "Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not available without authentication \"\"\" url =", "status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for non-staff users \"\"\" u", "= map(lambda x: str(CityFactory().id), range(3)) interest = InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response =", "can search interests \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(title='Hockey') interest.save()", "import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not available without authentication", "interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username':", "token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name')", "test_delete_interest(self): \"\"\" we can delete interest \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n", "format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{}", "u.username, 'password': '<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url", "u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name',", "= InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = {", "= response.data['token'] i = InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "User.ADMINISTRATOR u.save() response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for non-staff users \"\"\" u = UserFactory()", "'title': 'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities': cities, } n = Interest.objects.count()", "self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1)", "= prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "= response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest name',", "interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)}) data", "format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_with_auth_not_staff(self): \"\"\" Resourse is not available for non-staff users", "log_n+1) def test_get_categories(self): \"\"\" Ensure that we can get only categories with api", "can get only children with api \"\"\" Interest.objects.delete() for i in range(3): interest", "range(3): interest = InterestFactory(title='Hockey') interest.save() interest = InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>')", "cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json')", "interests \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(title='Hockey') interest.save() interest =", "if data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure", "u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username,", "'password': '<PASSWORD>' } # restricted for moderator response = self.client.post(auth_url, data=data, format='json') token", "token)) response = self.client.post(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1)", "not available without authentication \"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code,", "def test_create_interest(self): \"\"\" we can create interest \"\"\" n = Interest.objects.count() u =", "format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for", "} response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{}", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX,", "InterestFactory(is_active=False) url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, format='json')", "log_n+1) # ok for root u.role = User.ROOT u.save() response = self.client.post(auth_url, data=data,", "response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=True) url =", "API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login')", "query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3)", "= InterestFactory(title='Beer') interest.save() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data =", "available without authentication \"\"\" url = prepare_url('admin-interests-list') response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "\"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data", "non-staff users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data =", "auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } # restricted", "token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'],", "'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities': [], } auth_url = prepare_url('login') data", "= prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)", "response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2)", "3) def test_create_interest(self): \"\"\" we can create interest \"\"\" n = Interest.objects.count() u", "response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) # ok", "= self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1)", "n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=True)", "n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can delete interest", "= response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)}) self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))", "self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def", "api_settings from happ.models import User, Interest, LogEntry from happ.factories import ( UserFactory, InterestFactory,", "format='json') token = response.data['token'] url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url,", "deactivate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count()", "\"\"\" Ensure that we can get only categories with api \"\"\" Interest.objects.delete() for", "url = prepare_url('admin-interests-children') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'],", "Interest, LogEntry from happ.factories import ( UserFactory, InterestFactory, CityFactory, ) from happ.tests import", "test_get_without_auth(self): \"\"\" Resourse is not available without authentication \"\"\" url = prepare_url('admin-interests-list') response", "status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can activate interest through", "InterestFactory() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url = prepare_url('login') data", "rest_framework_jwt.settings import api_settings from happ.models import User, Interest, LogEntry from happ.factories import (", "Resourse is available with authentication only and for staff \"\"\" u = UserFactory(role=User.MODERATOR)", "test_get_with_auth(self): \"\"\" Resourse is available with authentication only and for staff \"\"\" u", "u.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' } response", "rest_framework.test import APISimpleTestCase from rest_framework_jwt.settings import api_settings from happ.models import User, Interest, LogEntry", "data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self):", "data['id'] == str(interest.id): self.assertEqual(len(data['children']), 1) else: self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that", "= prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': True, 'local_cities':", "def test_get_categories(self): \"\"\" Ensure that we can get only categories with api \"\"\"", "log_n = LogEntry.objects.count() i = InterestFactory() i.save() auth_url = prepare_url('login') data = {", "= self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 3) def test_create_interest(self): \"\"\" we can create", "name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role = User.ROOT u.save() response =", "InterestFactory() i.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' }", "that we can get only children with api \"\"\" Interest.objects.delete() for i in", "class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not available without authentication \"\"\" url", "'<PASSWORD>' } response = self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list')", "self.assertEqual(len(data['children']), 0) def test_get_children(self): \"\"\" Ensure that we can get only children with", "self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is available with authentication only and for", "= LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name', 'parent_id': None,", "self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that we can", "self.assertEqual(LogEntry.objects.count(), log_n+1) def test_get_categories(self): \"\"\" Ensure that we can get only categories with", "self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can update", "kwargs={'id': str(interest.id)}) data = { 'title': 'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities':", "log_n+1) def test_deactivate(self): \"\"\" we can deactivate interest through API \"\"\" u =", "interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() auth_url", "status.HTTP_204_NO_CONTENT) i = Interest.objects.get(id=i.id) self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can deactivate", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] url = prepare_url('admin-interests-list', query={'search': 'hoc'}) self.client.credentials(HTTP_AUTHORIZATION='{}", "import User, Interest, LogEntry from happ.factories import ( UserFactory, InterestFactory, CityFactory, ) from", "Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(), n-1) self.assertEqual(LogEntry.objects.count(),", "response.data['token'] url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)}) n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response", "prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for", "\"\"\" we can activate interest through API \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save()", "= UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = {", "and for staff \"\"\" u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data", "* class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse is not available without authentication \"\"\"", "u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username,", "response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(),", "n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.delete(url, format='json') self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Interest.objects.count(),", "users \"\"\" u = UserFactory() u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = {", "log_n = LogEntry.objects.count() url = prepare_url('admin-interests-list') interest_data = { 'title': 'NewInterest name', 'parent_id':", "range(3): interest = InterestFactory(parent=None) interest = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url", "test_update_interest(self): \"\"\" we can update interest \"\"\" cities = map(lambda x: str(CityFactory().id), range(3))", "format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']: if data['id'] == str(interest.id):", "} n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data, format='json') self.assertEqual(response.status_code,", "InterestFactory, CityFactory, ) from happ.tests import * class Tests(APISimpleTestCase): def test_get_without_auth(self): \"\"\" Resourse", "= self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+1) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1)", "= response.data['token'] url = prepare_url('admin-interests-categories') self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code,", "name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we can delete interest \"\"\" u =", "= InterestFactory(parent=None) inter = InterestFactory(parent=interest) u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login')", "self.assertEqual(response.data['count'], 1) self.assertNotEqual(response.data['results'][0]['parent'], None) def test_activate(self): \"\"\" we can activate interest through API", "self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search interests \"\"\" Interest.objects.delete()", "self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Interest.objects.count(), n) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) def test_delete_interest(self): \"\"\" we", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in", "u.set_password('<PASSWORD>') u.save() auth_url = prepare_url('login') data = { 'username': u.username, 'password': '<PASSWORD>' }", "response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Interest.objects.count(), n+2) self.assertEqual(response.data['title'], 'NewInterest name') self.assertEqual(LogEntry.objects.count(),", "= { 'username': u.username, 'password': '<PASSWORD>' } # restricted for moderator response =", "'local_cities': cities, } n = Interest.objects.count() self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.patch(url, data=data,", "token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 3) for data in response.data['results']:", "'username': u.username, 'password': '<PASSWORD>' } # restricted for moderator response = self.client.post(auth_url, data=data,", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] i = InterestFactory(is_active=True) url = prepare_url('admin-interests-deactivate',", "happ.models import User, Interest, LogEntry from happ.factories import ( UserFactory, InterestFactory, CityFactory, )", "with api \"\"\" Interest.objects.delete() for i in range(3): interest = InterestFactory(parent=None) interest =", "self.assertTrue(i.is_active) self.assertEqual(LogEntry.objects.count(), log_n+1) def test_deactivate(self): \"\"\" we can deactivate interest through API \"\"\"", "= { 'title': 'NewInterest name', 'parent_id': None, 'is_global': False, 'local_cities': cities, } n", "= self.client.post(auth_url, data=data, format='json') token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url,", "token = response.data['token'] self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.post(url, data=interest_data, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+1) # ok for root u.role = User.ROOT u.save() response", "'NewInterest name') self.assertEqual(LogEntry.objects.count(), log_n+2) def test_update_interest(self): \"\"\" we can update interest \"\"\" cities", "token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) def test_search_interests(self): \"\"\" We can search", "\"\"\" n = Interest.objects.count() u = UserFactory(role=User.MODERATOR) u.set_password('<PASSWORD>') u.save() log_n = LogEntry.objects.count() url", "\"\"\" Resourse is not available without authentication \"\"\" url = prepare_url('admin-interests-list') response =", "{}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token)) response = self.client.get(url, format='json') self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_get_with_auth(self): \"\"\" Resourse is" ]
[ "self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con", "qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico", "Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\"))", "640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje", "de fuente apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_() # También se", "Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de fuente", "con PyQt.\") # Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y", "self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip, puede usar RTF", "es un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\",", "__init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es", "fuente apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_() # También se puede", "300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\")", "self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip, puede usar", "hecho con PyQt.\") # Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente", "QtGui import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700,", "usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de fuente apli = qw.QApplication(sys.argv)", "qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de fuente apli = qw.QApplication(sys.argv) tip =", "apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_() # También se puede poner", "# Fuente y tamaño de fuente apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show()", "PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640,", "sys from PyQt5 import QtGui import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self,", "03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip, puede", "Fuente y tamaño de fuente apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_()", "PyQt.\") # Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño", "tamaño de fuente apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_() # También", "parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un", "tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de fuente apli", "puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de fuente apli =", "11)) # Fuente y tamaño de fuente apli = qw.QApplication(sys.argv) tip = Mensaje()", "y tamaño de fuente apli = qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_() #", "PyQt5 import QtGui import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self,", "parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho", "as qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640)", "un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11))", "= qw.QApplication(sys.argv) tip = Mensaje() tip.show() apli.exec_() # También se puede poner así.", "qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b>", "class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\")", "640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\") #", "import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300,", "import QtGui import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self, parent=None): qw.QWidget.__init__(self, parent)", "self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto es un <b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip,", "<b><i>Widget</i></b> hecho con PyQt.\") # Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) #", "# Mensaje tooltip, puede usar RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de", "from PyQt5 import QtGui import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def __init__(self, parent=None):", "def __init__(self, parent=None): qw.QWidget.__init__(self, parent) self.setGeometry(700, 300, 640, 640) self.setWindowTitle(\"Basico 03\") self.setWindowIcon(QtGui.QIcon(\"Recursos/Icon-Python_PyQt5.png\")) self.setToolTip(\"Esto", "import sys from PyQt5 import QtGui import PyQt5.QtWidgets as qw class Mensaje(qw.QWidget): def", "RTF qw.QToolTip.setFont(QtGui.QFont(\"OldEnglish\", 11)) # Fuente y tamaño de fuente apli = qw.QApplication(sys.argv) tip" ]
[ "coding: utf-8 -*- from __future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers.", "utf-8 -*- from __future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes", "division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it possible to assign a", "Makes it possible to assign a keyboard shortcut to this command via Preferences", "assign a keyboard shortcut to this command via Preferences > Shortcuts (in Glyphs", "(in Glyphs 3) or System Preferences > Keyboard > Shortcuts > App Shortcuts", "__doc__=\"\"\" Re-interpolates selected layers. Makes it possible to assign a keyboard shortcut to", "via Preferences > Shortcuts (in Glyphs 3) or System Preferences > Keyboard >", "selected layers. Makes it possible to assign a keyboard shortcut to this command", "shortcut to this command via Preferences > Shortcuts (in Glyphs 3) or System", "#MenuTitle: Re-interpolate # -*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals", "# -*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates", "import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it possible to assign", "> Shortcuts > App Shortcuts (in Glyphs 2). \"\"\" thisFont = Glyphs.font for", "Glyphs 3) or System Preferences > Keyboard > Shortcuts > App Shortcuts (in", "Re-interpolate # -*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals __doc__=\"\"\"", "-*- from __future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it", "Keyboard > Shortcuts > App Shortcuts (in Glyphs 2). \"\"\" thisFont = Glyphs.font", "this command via Preferences > Shortcuts (in Glyphs 3) or System Preferences >", "it possible to assign a keyboard shortcut to this command via Preferences >", "keyboard shortcut to this command via Preferences > Shortcuts (in Glyphs 3) or", "print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it possible to assign a keyboard", "Preferences > Shortcuts (in Glyphs 3) or System Preferences > Keyboard > Shortcuts", "Preferences > Keyboard > Shortcuts > App Shortcuts (in Glyphs 2). \"\"\" thisFont", "from __future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it possible", "> Shortcuts (in Glyphs 3) or System Preferences > Keyboard > Shortcuts >", "a keyboard shortcut to this command via Preferences > Shortcuts (in Glyphs 3)", "3) or System Preferences > Keyboard > Shortcuts > App Shortcuts (in Glyphs", "to assign a keyboard shortcut to this command via Preferences > Shortcuts (in", "command via Preferences > Shortcuts (in Glyphs 3) or System Preferences > Keyboard", "possible to assign a keyboard shortcut to this command via Preferences > Shortcuts", "App Shortcuts (in Glyphs 2). \"\"\" thisFont = Glyphs.font for thisLayer in thisFont.selectedLayers:", "Re-interpolates selected layers. Makes it possible to assign a keyboard shortcut to this", "System Preferences > Keyboard > Shortcuts > App Shortcuts (in Glyphs 2). \"\"\"", "> App Shortcuts (in Glyphs 2). \"\"\" thisFont = Glyphs.font for thisLayer in", "-*- coding: utf-8 -*- from __future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected", "to this command via Preferences > Shortcuts (in Glyphs 3) or System Preferences", "<filename>Paths/Re-interpolate.py<gh_stars>10-100 #MenuTitle: Re-interpolate # -*- coding: utf-8 -*- from __future__ import division, print_function,", "Shortcuts (in Glyphs 3) or System Preferences > Keyboard > Shortcuts > App", "Shortcuts > App Shortcuts (in Glyphs 2). \"\"\" thisFont = Glyphs.font for thisLayer", "Shortcuts (in Glyphs 2). \"\"\" thisFont = Glyphs.font for thisLayer in thisFont.selectedLayers: thisLayer.reinterpolate()", "> Keyboard > Shortcuts > App Shortcuts (in Glyphs 2). \"\"\" thisFont =", "or System Preferences > Keyboard > Shortcuts > App Shortcuts (in Glyphs 2).", "layers. Makes it possible to assign a keyboard shortcut to this command via", "unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it possible to assign a keyboard shortcut", "__future__ import division, print_function, unicode_literals __doc__=\"\"\" Re-interpolates selected layers. Makes it possible to" ]
[ "= int(psutil.virtual_memory().total / 1024 / 1024) self.vmav = int(psutil.virtual_memory().available / 1024 / 1024)", "class. ''' self.os = os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine self.threads =", "the class. ''' self.os = os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine self.threads", "import os import psutil class InfoOS: ''' This class retrieves and prints information", "available virtual memory (in MB). ''' def __init__(self): ''' Constructor of the class.", "of the class. ''' self.os = os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine", "= psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024) self.vmav = int(psutil.virtual_memory().available /", "''' def __init__(self): ''' Constructor of the class. ''' self.os = os.uname().sysname self.kernel", "freq: the current CPU frequency, freqm: the maximum CPU frequency, vmtot: the total", "total virtual memory (in MB), vmav: the available virtual memory (in MB). '''", "CPU frequency, freqm: the maximum CPU frequency, vmtot: the total virtual memory (in", "of available CPU threads, freq: the current CPU frequency, freqm: the maximum CPU", "InfoOS: ''' This class retrieves and prints information on the current OS. Public", "the current release, arch: the current architecture, threads: the number of available CPU", "OS, kernel: the current release, arch: the current architecture, threads: the number of", "psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024) self.vmav =", "retrieves and prints information on the current OS. Public methods: Attributes: os: the", "Constructor of the class. ''' self.os = os.uname().sysname self.kernel = os.uname().release self.arch =", "memory (in MB), vmav: the available virtual memory (in MB). ''' def __init__(self):", "class InfoOS: ''' This class retrieves and prints information on the current OS.", "This class retrieves and prints information on the current OS. Public methods: Attributes:", "the current CPU frequency, freqm: the maximum CPU frequency, vmtot: the total virtual", "MB), vmav: the available virtual memory (in MB). ''' def __init__(self): ''' Constructor", "frequency, freqm: the maximum CPU frequency, vmtot: the total virtual memory (in MB),", "''' This class retrieves and prints information on the current OS. Public methods:", "CPU threads, freq: the current CPU frequency, freqm: the maximum CPU frequency, vmtot:", "OS. Public methods: Attributes: os: the current OS, kernel: the current release, arch:", "self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot", "vmtot: the total virtual memory (in MB), vmav: the available virtual memory (in", "threads, freq: the current CPU frequency, freqm: the maximum CPU frequency, vmtot: the", "= os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm =", "current architecture, threads: the number of available CPU threads, freq: the current CPU", "frequency, vmtot: the total virtual memory (in MB), vmav: the available virtual memory", "on the current OS. Public methods: Attributes: os: the current OS, kernel: the", "Public methods: Attributes: os: the current OS, kernel: the current release, arch: the", "self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total /", "current release, arch: the current architecture, threads: the number of available CPU threads,", "''' Constructor of the class. ''' self.os = os.uname().sysname self.kernel = os.uname().release self.arch", "available CPU threads, freq: the current CPU frequency, freqm: the maximum CPU frequency,", "and prints information on the current OS. Public methods: Attributes: os: the current", "__init__(self): ''' Constructor of the class. ''' self.os = os.uname().sysname self.kernel = os.uname().release", "architecture, threads: the number of available CPU threads, freq: the current CPU frequency,", "threads: the number of available CPU threads, freq: the current CPU frequency, freqm:", "self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024)", "the current architecture, threads: the number of available CPU threads, freq: the current", "current OS, kernel: the current release, arch: the current architecture, threads: the number", "= psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024) self.vmav", "Attributes: os: the current OS, kernel: the current release, arch: the current architecture,", "os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current", "def __init__(self): ''' Constructor of the class. ''' self.os = os.uname().sysname self.kernel =", "psutil class InfoOS: ''' This class retrieves and prints information on the current", "psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024) self.vmav = int(psutil.virtual_memory().available / 1024", "maximum CPU frequency, vmtot: the total virtual memory (in MB), vmav: the available", "os: the current OS, kernel: the current release, arch: the current architecture, threads:", "= os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq =", "methods: Attributes: os: the current OS, kernel: the current release, arch: the current", "virtual memory (in MB). ''' def __init__(self): ''' Constructor of the class. '''", "self.kernel = os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm", "prints information on the current OS. Public methods: Attributes: os: the current OS,", "psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 /", "the current OS, kernel: the current release, arch: the current architecture, threads: the", "CPU frequency, vmtot: the total virtual memory (in MB), vmav: the available virtual", "release, arch: the current architecture, threads: the number of available CPU threads, freq:", "os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total", "(in MB), vmav: the available virtual memory (in MB). ''' def __init__(self): '''", "class retrieves and prints information on the current OS. Public methods: Attributes: os:", "the current OS. Public methods: Attributes: os: the current OS, kernel: the current", "MB). ''' def __init__(self): ''' Constructor of the class. ''' self.os = os.uname().sysname", "virtual memory (in MB), vmav: the available virtual memory (in MB). ''' def", "self.os = os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq", "freqm: the maximum CPU frequency, vmtot: the total virtual memory (in MB), vmav:", "kernel: the current release, arch: the current architecture, threads: the number of available", "= os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot =", "vmav: the available virtual memory (in MB). ''' def __init__(self): ''' Constructor of", "self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024) self.vmav = int(psutil.virtual_memory().available / 1024 /", "os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max", "information on the current OS. Public methods: Attributes: os: the current OS, kernel:", "import psutil class InfoOS: ''' This class retrieves and prints information on the", "the maximum CPU frequency, vmtot: the total virtual memory (in MB), vmav: the", "the number of available CPU threads, freq: the current CPU frequency, freqm: the", "current CPU frequency, freqm: the maximum CPU frequency, vmtot: the total virtual memory", "the total virtual memory (in MB), vmav: the available virtual memory (in MB).", "(in MB). ''' def __init__(self): ''' Constructor of the class. ''' self.os =", "= psutil.cpu_count() self.freq = psutil.cpu_freq().current self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024", "self.freqm = psutil.cpu_freq().max self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024) self.vmav = int(psutil.virtual_memory().available", "the available virtual memory (in MB). ''' def __init__(self): ''' Constructor of the", "os import psutil class InfoOS: ''' This class retrieves and prints information on", "arch: the current architecture, threads: the number of available CPU threads, freq: the", "memory (in MB). ''' def __init__(self): ''' Constructor of the class. ''' self.os", "current OS. Public methods: Attributes: os: the current OS, kernel: the current release,", "number of available CPU threads, freq: the current CPU frequency, freqm: the maximum", "''' self.os = os.uname().sysname self.kernel = os.uname().release self.arch = os.uname().machine self.threads = psutil.cpu_count()" ]
[ "not look like a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n =", "= f.read().splitlines() combo_list = [(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa", "open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items(): flatten(dp, dn,", "= pd, negative = nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis", "is defined as (larger / smaller) < ratio \"\"\" if m not in", "four_folder = four_folder, positive = positive, negative = negative) if not os.path.exists(fourier_file): return", "r\"E:\\eda\\fourier\" # where all the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file =", "{2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last = thd def flatten(positive, negative,", "e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format(", "float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion =", "0 or fa.distortion[n] == 0: return False biggest = fa.distortion[m] / fa.distortion[n] if", "filename): self.filename = filename with open(filename, \"r\") as f: buffer = f.read() m", "fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 = {k : v for k, v", "not in fa.distortion: return False if fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m]", "= {k : v for k, v in fa.items() if is_close(v, 2, 3)}", "negative, thd = fa.thd, harmonics = \",\".join(h) )) return def is_close(fa, m, n,", "for (dp, dn), v in close_2_3.items(): flatten(dp, dn, four_folder, four_table) if __name__ ==", "= FourierAnalysis (fourier_file) h = [] last = 0 for i in range(2,", "#diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as f: diode_list = f.read().splitlines()", "<filename>four_table.py #! python3 r\"\"\" THD = sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1]", "dn), v in close_2_3.items(): flatten(dp, dn, four_folder, four_table) if __name__ == '__main__': main()", "self.filename = filename with open(filename, \"r\") as f: buffer = f.read() m =", "if fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m] == 0 or fa.distortion[n] ==", "a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd =", "{Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis", "fa.distortion[n] / fa.distortion[m] return biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\"", "for.') class Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude", "(fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive =", "range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd", "n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse import re import collections import math", "buffer = f.read() m = self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does not", "itertools import os parser = argparse.ArgumentParser(description='Create fourier tables out of a collection of", "fa.thd, harmonics = \",\".join(h) )) return def is_close(fa, m, n, ratio=2): \"\"\" Is", "import collections import math import itertools import os parser = argparse.ArgumentParser(description='Create fourier tables", "not m: raise ValueError(\"{filename} does not look like a fourier analysis file. File", "i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0", "print (e) return h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive,", "fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd'))", "distortion at m close to the percent harmonic distortion at harmonic n? Close", "class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic", "thd - last last = thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def", "- last)) last = thd def flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format(", "of a collection of fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of the diode", "- last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative, thd", "at harmonic n? Close is defined as (larger / smaller) < ratio \"\"\"", "= thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k", "collections import math import itertools import os parser = argparse.ArgumentParser(description='Create fourier tables out", "fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest", "r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list =", "python3 r\"\"\" THD = sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import", "self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does not look like a fourier analysis", "sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse import re import", "1 # because the arrary is zero based SumMag2 = 0 for i", "not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h = [] last = 0", "(e) continue self.distortion[i] = thd - last last = thd def __repr__(self): return", "of the diode model(s) to make a test circuit for.') class Harmonic(): def", "for i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2)", "re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename = filename with open(filename, \"r\") as f:", "re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename = filename with open(filename, \"r\")", "= int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag =", "harmonic n? Close is defined as (larger / smaller) < ratio \"\"\" if", ": v for k, v in fa.items() if is_close(v, 2, 3)} with open(out_file,", "\",\".join(h) )) return def is_close(fa, m, n, ratio=2): \"\"\" Is the percent harmonic", "if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest < ratio def", "return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print (fa)", "= filename with open(filename, \"r\") as f: buffer = f.read() m = self.re_fourier_analysis.search(buffer)", "k + 1 # because the arrary is zero based SumMag2 = 0", "= \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE)", "( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse import re import collections", "fa.harmonic_distortion(i) except KeyError as e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last))", "(fa) SumNormMag = 0 for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag", "diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd, nd in combo_list: fourier_file =", "i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd, nd", "f: buffer = f.read() m = self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does", "thd = self.harmonic_distortion(i) except KeyError as e: print (self.filename) print (e) continue self.distortion[i]", "to the percent harmonic distortion at harmonic n? Close is defined as (larger", "ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all the .four files", "fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest < ratio def main():", "= int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase =", "h = [] last = 0 for i in range(2, fa.n): try: thd", "= 0 for i in range(2, self.n): try: thd = self.harmonic_distortion(i) except KeyError", "diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as f:", "SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in", "ratio=2): \"\"\" Is the percent harmonic distortion at m close to the percent", "THD = sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse import", "== 0: return False biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n]", "\"\"\" Is the percent harmonic distortion at m close to the percent harmonic", "not in fa.distortion or n not in fa.distortion: return False if fa.distortion[m] ==", "self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class", "arrary is zero based SumMag2 = 0 for i in range(2, k): SumMag2", "Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename = filename with", "__str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\"", "test circuit for.') class Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency =", "self.distortion = {} last = 0 for i in range(2, self.n): try: thd", "{SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in range(2, fa.n): thd = fa.harmonic_distortion(i) print", "float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return", "fa.distortion: return False if fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m] == 0", "else fa.distortion[n] / fa.distortion[m] return biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder =", "i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) /", "the diode model(s) to make a test circuit for.') class Harmonic(): def __init__(self,", "range(2, self.n): try: thd = self.harmonic_distortion(i) except KeyError as e: print (self.filename) print", "0 for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last", "fa.distortion[m] return biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where", "a collection of fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s)", "in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd, nd in combo_list: fourier_file", "negative) if not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h = [] last", "if fa.distortion[m] == 0 or fa.distortion[n] == 0: return False biggest = fa.distortion[m]", "def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all the .four files are", "FourierAnalysis (fourier_file) close_2_3 = {k : v for k, v in fa.items() if", "in fa.distortion or n not in fa.distortion: return False if fa.distortion[m] == fa.distortion[n]:", "for i in range(2, fa.n): try: thd = fa.harmonic_distortion(i) except KeyError as e:", "import os parser = argparse.ArgumentParser(description='Create fourier tables out of a collection of fourier", "= r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd, negative = nd) if not", "3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items():", "in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for", "= float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis():", "False biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] /", "name(s) of the diode model(s) to make a test circuit for.') class Harmonic():", "continue self.distortion[i] = thd - last last = thd def __repr__(self): return \"FourierAnalysis({filename}):", "def __init__(self, filename): self.filename = filename with open(filename, \"r\") as f: buffer =", "def is_close(fa, m, n, ratio=2): \"\"\" Is the percent harmonic distortion at m", "= thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative, thd = fa.thd, harmonics", "or fa.distortion[n] == 0: return False biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m]", "+= fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in range(2,", "continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 = {k : v for k,", "else: k = k + 1 # because the arrary is zero based", "in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude", "four_folder, positive = positive, negative = negative) if not os.path.exists(fourier_file): return fa =", "#! python3 r\"\"\" THD = sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\"", "at m close to the percent harmonic distortion at harmonic n? Close is", "SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa", "zero based SumMag2 = 0 for i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude,", "for k, v in fa.items() if is_close(v, 2, 3)} with open(out_file, \"w\") as", "does not look like a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n", "\"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k == 0: k = self.n", "m, n, ratio=2): \"\"\" Is the percent harmonic distortion at m close to", "collection of fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s) to", "for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd, nd in", "self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag = 0 for i", "Is the percent harmonic distortion at m close to the percent harmonic distortion", "harmonic_distortion(self, k=0): if k == 0: k = self.n elif k >= self.n:", "k = self.n elif k >= self.n: k = self.n else: k =", "out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive, negative = negative)", "fa.n): try: thd = fa.harmonic_distortion(i) except KeyError as e: print (fourier_file) print (e)", "e: print (self.filename) print (e) continue self.distortion[i] = thd - last last =", "for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename = filename", "r\"four_table.txt\" with open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list = [(i, i) for", "harmonics = \",\".join(h) )) return def is_close(fa, m, n, ratio=2): \"\"\" Is the", "four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items(): flatten(dp, dn, four_folder, four_table) if __name__", "{} last = 0 for i in range(2, self.n): try: thd = self.harmonic_distortion(i)", "fa = {} for pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder =", "self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic},", "import argparse import re import collections import math import itertools import os parser", "* math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag =", "out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative, thd = fa.thd, harmonics = \",\".join(h)", "0 for i in range(2, self.n): try: thd = self.harmonic_distortion(i) except KeyError as", "= FourierAnalysis (fourier_file) close_2_3 = {k : v for k, v in fa.items()", "class Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude =", "0: k = self.n elif k >= self.n: k = self.n else: k", "as f: buffer = f.read() m = self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename}", "out_file = r\"four_table.txt\" with open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list = [(i,", "= 0 for i in range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f}", "file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics =", "four_folder = four_folder, positive = pd, negative = nd) if not os.path.exists(fourier_file): continue", "k == 0: k = self.n elif k >= self.n: k = self.n", "pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd,", "\"\"\" if m not in fa.distortion or n not in fa.distortion: return False", "(e) return h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative", "k, v in fa.items() if is_close(v, 2, 3)} with open(out_file, \"w\") as four_table:", "to make a test circuit for.') class Harmonic(): def __init__(self, m): self.Harmonic =", "float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp", "fa = FourierAnalysis(filename) print (fa) SumNormMag = 0 for i in range(fa.n): SumNormMag", "ratio \"\"\" if m not in fa.distortion or n not in fa.distortion: return", "2)) fa = {} for pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder", "fa.distortion[n] == 0: return False biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] >", "# where all the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\"", "self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer):", "/ fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest <", "as f: diode_list = f.read().splitlines() combo_list = [(i, i) for i in diode_list]", "(fourier_file) h = [] last = 0 for i in range(2, fa.n): try:", "thd - last)) last = thd def flatten(positive, negative, four_folder, out_file): fourier_file =", "(\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last = thd def", "= collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last", "fa.distortion[n]: return True if fa.distortion[m] == 0 or fa.distortion[n] == 0: return False", "is_close(fa, m, n, ratio=2): \"\"\" Is the percent harmonic distortion at m close", "h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative,", "(\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in range(2, fa.n): thd =", "< ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all the .four", "\"r\") as f: buffer = f.read() m = self.re_fourier_analysis.search(buffer) if not m: raise", "#show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all the .four files are diode_list_file =", "= four_folder, positive = positive, negative = negative) if not os.path.exists(fourier_file): return fa", "return def is_close(fa, m, n, ratio=2): \"\"\" Is the percent harmonic distortion at", "self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase},", "= 0 for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag))", "smaller) < ratio \"\"\" if m not in fa.distortion or n not in", "negative = nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3", "in fa.items() if is_close(v, 2, 3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for", "ValueError(\"{filename} does not look like a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer))", "return fa = FourierAnalysis (fourier_file) h = [] last = 0 for i", "math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename)", "= re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename):", "- last last = thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self,", "thd = fa.thd, harmonics = \",\".join(h) )) return def is_close(fa, m, n, ratio=2):", "+= math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa =", "in range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd,", "the percent harmonic distortion at m close to the percent harmonic distortion at", "m: raise ValueError(\"{filename} does not look like a fourier analysis file. File contents", "re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename", "flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive,", "= float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self):", "= self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does not look like a fourier", "v for k, v in fa.items() if is_close(v, 2, 3)} with open(out_file, \"w\")", "print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last = thd", "in range(2, self.n): try: thd = self.harmonic_distortion(i) except KeyError as e: print (self.filename)", "FourierAnalysis(filename) print (fa) SumNormMag = 0 for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag", "four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items(): flatten(dp, dn, four_folder, four_table) if", "fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m] == 0 or fa.distortion[n] == 0:", "def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase", "files') #parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s) to make a test", "for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last =", "r\"\"\" THD = sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse", "i in range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude,", "fa.distortion[m] == 0 or fa.distortion[n] == 0: return False biggest = fa.distortion[m] /", "Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude'))", "self.distortion[i] = thd - last last = thd def __repr__(self): return \"FourierAnalysis({filename}): n={n}", "show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag = 0 for i in range(fa.n):", "= r\"E:\\eda\\fourier\" # where all the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file", "= fa.harmonic_distortion(i) except KeyError as e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd -", "pd, negative = nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file)", "self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last = 0 for i in range(2,", "m = self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does not look like a", "/ self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag = 0 for", "as e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last)) last = thd", "Harmonic(m) self.distortion = {} last = 0 for i in range(2, self.n): try:", "raise ValueError(\"{filename} does not look like a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename,", "last last = thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0):", "print (e) continue self.distortion[i] = thd - last last = thd def __repr__(self):", "range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i", "is_close(v, 2, 3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v", "f: diode_list = f.read().splitlines() combo_list = [(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list,", "nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 = {k", "= sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse import re", "four_folder, positive = pd, negative = nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)]", "positive = positive, negative = negative) if not os.path.exists(fourier_file): return fa = FourierAnalysis", "[(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd,", "out of a collection of fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of the", "four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive, negative =", "r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as f: diode_list =", "thd = fa.harmonic_distortion(i) except KeyError as e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd", "{k : v for k, v in fa.items() if is_close(v, 2, 3)} with", "n not in fa.distortion: return False if fa.distortion[m] == fa.distortion[n]: return True if", "fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest < ratio", "= [(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for", "= self.harmonic_distortion(i) except KeyError as e: print (self.filename) print (e) continue self.distortion[i] =", "= nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 =", "0: return False biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else", "in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd, negative =", "fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last =", "negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive, negative", "if not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 = {k :", "n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k == 0: k = self.n elif", "return True if fa.distortion[m] == 0 or fa.distortion[n] == 0: return False biggest", "{3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last = thd def flatten(positive, negative, four_folder,", "/ Mag[1] \"\"\" import argparse import re import collections import math import itertools", "self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase", "{1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last = thd def flatten(positive,", "for i in range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i,", "last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative, thd =", "return h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative =", "re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self,", "2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print", "= self.n else: k = k + 1 # because the arrary is", "__init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase =", "= {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in range(2, fa.n): thd = fa.harmonic_distortion(i)", "KeyError as e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last)) last =", "if not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h = [] last =", "for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last = 0", "import itertools import os parser = argparse.ArgumentParser(description='Create fourier tables out of a collection", "= positive, negative = negative, thd = fa.thd, harmonics = \",\".join(h) )) return", ")) return def is_close(fa, m, n, ratio=2): \"\"\" Is the percent harmonic distortion", "= fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last)) last", "look like a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n'))", "def show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag = 0 for i in", "combo_list = [(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {}", "open(filename, \"r\") as f: buffer = f.read() m = self.re_fourier_analysis.search(buffer) if not m:", "True if fa.distortion[m] == 0 or fa.distortion[n] == 0: return False biggest =", "k=0): if k == 0: k = self.n elif k >= self.n: k", "100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag", "(dp, dn), v in close_2_3.items(): flatten(dp, dn, four_folder, four_table) if __name__ == '__main__':", ".four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with", "positive = pd, negative = nd) if not os.path.exists(fourier_file): continue fa[(pd, nd)] =", "as (larger / smaller) < ratio \"\"\" if m not in fa.distortion or", "harmonic distortion at m close to the percent harmonic distortion at harmonic n?", "i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd, nd in combo_list:", "> fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest < ratio def main(): #show_thds(\"testfile.four\")", "__init__(self, filename): self.filename = filename with open(filename, \"r\") as f: buffer = f.read()", "try: thd = fa.harmonic_distortion(i) except KeyError as e: print (fourier_file) print (e) return", "= four_folder, positive = pd, negative = nd) if not os.path.exists(fourier_file): continue fa[(pd,", "\"\"\" import argparse import re import collections import math import itertools import os", "in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last = 0 for i", "return False if fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m] == 0 or", "of fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s) to make", "range(2, fa.n): try: thd = fa.harmonic_distortion(i) except KeyError as e: print (fourier_file) print", "main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all the .four files are diode_list_file", "= re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename = filename with open(filename, \"r\") as", "/ smaller) < ratio \"\"\" if m not in fa.distortion or n not", "elif k >= self.n: k = self.n else: k = k + 1", "r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive, negative = negative) if not os.path.exists(fourier_file):", "= [] last = 0 for i in range(2, fa.n): try: thd =", "fourier tables out of a collection of fourier files') #parser.add_argument('models', nargs='+', help='The name(s)", "import math import itertools import os parser = argparse.ArgumentParser(description='Create fourier tables out of", "#parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s) to make a test circuit", "__repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k == 0: k", "\"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def", "def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k == 0:", "== 0 or fa.distortion[n] == 0: return False biggest = fa.distortion[m] / fa.distortion[n]", "= Harmonic(m) self.distortion = {} last = 0 for i in range(2, self.n):", "math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename): fa = FourierAnalysis(filename) print (fa) SumNormMag = 0", "if is_close(v, 2, 3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn),", "\"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items(): flatten(dp, dn, four_folder,", "False if fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m] == 0 or fa.distortion[n]", "v in fa.items() if is_close(v, 2, 3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\")", "fa.harmonics[i].NormMag print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in range(2, fa.n):", "return biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all", "thd, thd - last)) last = thd def flatten(positive, negative, four_folder, out_file): fourier_file", "= FourierAnalysis(filename) print (fa) SumNormMag = 0 for i in range(fa.n): SumNormMag +=", "fa.harmonics[i].Magnitude, thd, thd - last)) last = thd def flatten(positive, negative, four_folder, out_file):", "= r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as f: diode_list", "= {} last = 0 for i in range(2, self.n): try: thd =", "(larger / smaller) < ratio \"\"\" if m not in fa.distortion or n", "print (fa) SumNormMag = 0 for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print", "not os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 = {k : v", "m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase'))", "= r\"four_table.txt\" with open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list = [(i, i)", "files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file)", "{NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp),", "as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items(): flatten(dp, dn, four_folder, four_table)", "or n not in fa.distortion: return False if fa.distortion[m] == fa.distortion[n]: return True", "diode_list = f.read().splitlines() combo_list = [(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2))", "= 0 for i in range(2, fa.n): try: thd = fa.harmonic_distortion(i) except KeyError", "/ fa.distortion[m] return biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" #", "last = 0 for i in range(2, fa.n): try: thd = fa.harmonic_distortion(i) except", "k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def show_thds(filename):", "= positive, negative = negative) if not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file)", "m close to the percent harmonic distortion at harmonic n? Close is defined", "= k + 1 # because the arrary is zero based SumMag2 =", "close_2_3 = {k : v for k, v in fa.items() if is_close(v, 2,", "self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag", "distortion at harmonic n? Close is defined as (larger / smaller) < ratio", "fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive, negative = negative) if", "float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__)", "positive = positive, negative = negative, thd = fa.thd, harmonics = \",\".join(h) ))", "= f.read() m = self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does not look", "= float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion", "fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s) to make a", "analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics", "nd)] = FourierAnalysis (fourier_file) close_2_3 = {k : v for k, v in", "nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd, negative", "the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\"", "re import collections import math import itertools import os parser = argparse.ArgumentParser(description='Create fourier", "return False biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n]", "argparse import re import collections import math import itertools import os parser =", "m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last = 0 for", "negative = negative) if not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h =", "[] last = 0 for i in range(2, fa.n): try: thd = fa.harmonic_distortion(i)", "as e: print (self.filename) print (e) continue self.distortion[i] = thd - last last", "defined as (larger / smaller) < ratio \"\"\" if m not in fa.distortion", "self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last = 0 for i in", "with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in close_2_3.items(): flatten(dp,", "re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename = filename with open(filename,", "def flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive =", "thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k == 0: k = self.n elif k", "model(s) to make a test circuit for.') class Harmonic(): def __init__(self, m): self.Harmonic", "print (self.filename) print (e) continue self.distortion[i] = thd - last last = thd", "File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict()", "self.n else: k = k + 1 # because the arrary is zero", "= argparse.ArgumentParser(description='Create fourier tables out of a collection of fourier files') #parser.add_argument('models', nargs='+',", "# because the arrary is zero based SumMag2 = 0 for i in", "int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] =", "= 0 for i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100", "percent harmonic distortion at harmonic n? Close is defined as (larger / smaller)", "+ 1 # because the arrary is zero based SumMag2 = 0 for", "fa = FourierAnalysis (fourier_file) h = [] last = 0 for i in", "fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp),", "= {} for pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder,", "last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative, thd = fa.thd,", "self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m)", "n? Close is defined as (larger / smaller) < ratio \"\"\" if m", "def harmonic_distortion(self, k=0): if k == 0: k = self.n elif k >=", "k = k + 1 # because the arrary is zero based SumMag2", "< ratio \"\"\" if m not in fa.distortion or n not in fa.distortion:", "diode model(s) to make a test circuit for.') class Harmonic(): def __init__(self, m):", "positive, negative = negative) if not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h", "m not in fa.distortion or n not in fa.distortion: return False if fa.distortion[m]", "fa.items() if is_close(v, 2, 3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp,", "= self.n elif k >= self.n: k = self.n else: k = k", "the arrary is zero based SumMag2 = 0 for i in range(2, k):", "FourierAnalysis (fourier_file) h = [] last = 0 for i in range(2, fa.n):", "{} for pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive", "all the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file =", "last = 0 for i in range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3}", "print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last)) last = thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive", "KeyError as e: print (self.filename) print (e) continue self.distortion[i] = thd - last", ">= self.n: k = self.n else: k = k + 1 # because", "last)) last = thd def flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder", "\"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis =", "{NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL)", "parser = argparse.ArgumentParser(description='Create fourier tables out of a collection of fourier files') #parser.add_argument('models',", "is zero based SumMag2 = 0 for i in range(2, k): SumMag2 +=", "are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as", "biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder = r\"E:\\eda\\fourier\" # where all the", "analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic = re.compile(r\"^\\s*(?P<Harmonic>\\d+)\\s+(?P<Frequency>\\d+)\\s+(?P<Magnitude>{fp})\\s+(?P<Phase>{fp})\\s+(?P<NormMag>{fp})\\s+(?P<NormPhase>{fp})\\s+$\".format(fp=fp), re.MULTILINE) def __init__(self, filename): self.filename =", "based SumMag2 = 0 for i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2)", "four_folder = r\"E:\\eda\\fourier\" # where all the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\"", "int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase'))", "0 for i in range(2, fa.n): try: thd = fa.harmonic_distortion(i) except KeyError as", "== 0: k = self.n elif k >= self.n: k = self.n else:", "thd def flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive", "int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency')) self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag'))", "re.MULTILINE) def __init__(self, filename): self.filename = filename with open(filename, \"r\") as f: buffer", "because the arrary is zero based SumMag2 = 0 for i in range(2,", "with open(filename, \"r\") as f: buffer = f.read() m = self.re_fourier_analysis.search(buffer) if not", "negative = negative, thd = fa.thd, harmonics = \",\".join(h) )) return def is_close(fa,", "fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd -", "def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp =", "for pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive =", "thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}\".format(i, fa.harmonics[i].Magnitude, thd, thd - last))", "Sum(2, n)(Mag^2[n]) ) / Mag[1] \"\"\" import argparse import re import collections import", "self.n): try: thd = self.harmonic_distortion(i) except KeyError as e: print (self.filename) print (e)", "return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k == 0: k =", "f.read() m = self.re_fourier_analysis.search(buffer) if not m: raise ValueError(\"{filename} does not look like", "k = self.n else: k = k + 1 # because the arrary", "the percent harmonic distortion at harmonic n? Close is defined as (larger /", "a test circuit for.') class Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency", "= float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag},", "= thd def flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder,", "== fa.distortion[n]: return True if fa.distortion[m] == 0 or fa.distortion[n] == 0: return", "fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return biggest < ratio def main(): #show_thds(\"testfile.four\") four_folder", "(fourier_file) close_2_3 = {k : v for k, v in fa.items() if is_close(v,", "i in range(2, self.n): try: thd = self.harmonic_distortion(i) except KeyError as e: print", "biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m]", "except KeyError as e: print (self.filename) print (e) continue self.distortion[i] = thd -", "for i in range(2, self.n): try: thd = self.harmonic_distortion(i) except KeyError as e:", "SumNormMag = 0 for i in range(fa.n): SumNormMag += fa.harmonics[i].NormMag print (\"SumNormMag =", "argparse.ArgumentParser(description='Create fourier tables out of a collection of fourier files') #parser.add_argument('models', nargs='+', help='The", "print (\"SumNormMag = {SumNormMag}\".format(SumNormMag=SumNormMag)) last = 0 for i in range(2, fa.n): thd", "(self.filename) print (e) continue self.distortion[i] = thd - last last = thd def", "self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {}", "combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd, negative = nd)", "= r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file = r\"four_table.txt\" with open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list", "SumMag2 = 0 for i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return", "follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for m", "harmonic distortion at harmonic n? Close is defined as (larger / smaller) <", "= negative, thd = fa.thd, harmonics = \",\".join(h) )) return def is_close(fa, m,", "with open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list = [(i, i) for i", "{Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier", "open(diode_list_file) as f: diode_list = f.read().splitlines() combo_list = [(i, i) for i in", "in fa.distortion: return False if fa.distortion[m] == fa.distortion[n]: return True if fa.distortion[m] ==", "range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude def", "percent harmonic distortion at m close to the percent harmonic distortion at harmonic", "import re import collections import math import itertools import os parser = argparse.ArgumentParser(description='Create", "= fa.thd, harmonics = \",\".join(h) )) return def is_close(fa, m, n, ratio=2): \"\"\"", "contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in", "self.n elif k >= self.n: k = self.n else: k = k +", "{Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No.", "last = 0 for i in range(2, self.n): try: thd = self.harmonic_distortion(i) except", "if m not in fa.distortion or n not in fa.distortion: return False if", "2, 3)} with open(out_file, \"w\") as four_table: four_table.write(\"Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\\n\") for (dp, dn), v in", "collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))] = Harmonic(m) self.distortion = {} last =", "os.path.exists(fourier_file): continue fa[(pd, nd)] = FourierAnalysis (fourier_file) close_2_3 = {k : v for", "like a fourier analysis file. File contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd", "Mag[1] \"\"\" import argparse import re import collections import math import itertools import", "positive, negative = negative, thd = fa.thd, harmonics = \",\".join(h) )) return def", "Close is defined as (larger / smaller) < ratio \"\"\" if m not", "if k == 0: k = self.n elif k >= self.n: k =", "thd out_file.write(\"{positive},{negative},{thd},{harmonics}\\n\".format( positive = positive, negative = negative, thd = fa.thd, harmonics =", "help='The name(s) of the diode model(s) to make a test circuit for.') class", "n, ratio=2): \"\"\" Is the percent harmonic distortion at m close to the", "float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency}, {Magnitude},", "= \",\".join(h) )) return def is_close(fa, m, n, ratio=2): \"\"\" Is the percent", "combo_list.extend(itertools.combinations(diode_list, 2)) fa = {} for pd, nd in combo_list: fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format(", "FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis = re.compile(r\"Fourier analysis for.*No. Harmonics:\\s*(?P<n>\\d+),\\s*THD:\\s*(?P<thd>{fp})\\s*%\".format(fp=fp), re.DOTALL) re_harmonic =", "0 for i in range(2, k): SumMag2 += math.pow(self.harmonics[i].Magnitude, 2) return 100 *", "contents follow...\\n{contents}\".format(filename=filename, contents=buffer)) self.n = int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for", "math import itertools import os parser = argparse.ArgumentParser(description='Create fourier tables out of a", "self.n: k = self.n else: k = k + 1 # because the", "= float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def __str__(self): return \"{Harmonic}, {Frequency},", "k >= self.n: k = self.n else: k = k + 1 #", "where all the .four files are diode_list_file = r\"E:\\eda\\diodes\\diode-list.txt\" #diode_list_file = r\"E:\\eda\\diodes\\diode-list-test.txt\" out_file", "self.Magnitude = float(m.group('Magnitude')) self.Phase = float(m.group('Phase')) self.NormMag = float(m.group('NormMag')) self.NormPhase = float(m.group('NormPhase')) def", "self.harmonic_distortion(i) except KeyError as e: print (self.filename) print (e) continue self.distortion[i] = thd", "fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd, negative = nd) if", "return \"{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}\".format(**self.__dict__) class FourierAnalysis(): fp = \"[-+]?(?:(?:\\d*\\.\\d+)|(?:\\d+\\.?))(?:[Ee][+-]?\\d+)?\" re_fourier_analysis", "i in range(2, fa.n): try: thd = fa.harmonic_distortion(i) except KeyError as e: print", "f.read().splitlines() combo_list = [(i, i) for i in diode_list] combo_list.extend(itertools.combinations(diode_list, 2)) fa =", "circuit for.') class Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic')) self.Frequency = int(m.group('Frequency'))", "make a test circuit for.') class Harmonic(): def __init__(self, m): self.Harmonic = int(m.group('Harmonic'))", "last = thd def flatten(positive, negative, four_folder, out_file): fourier_file = r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder =", "= fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m] return", "0 for i in range(2, fa.n): thd = fa.harmonic_distortion(i) print (\"{0:<3} {1:0<12.10f} {2:0<12.10f}", "r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = pd, negative = nd) if not os.path.exists(fourier_file):", "os parser = argparse.ArgumentParser(description='Create fourier tables out of a collection of fourier files')", "try: thd = self.harmonic_distortion(i) except KeyError as e: print (self.filename) print (e) continue", "thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if k ==", "if not m: raise ValueError(\"{filename} does not look like a fourier analysis file.", ") / Mag[1] \"\"\" import argparse import re import collections import math import", "= r\"{four_folder}\\{positive}__{negative}.four\".format( four_folder = four_folder, positive = positive, negative = negative) if not", "except KeyError as e: print (fourier_file) print (e) return h.append(\"{0:.4f}\".format(thd - last)) last", "= int(m.group('n')) self.thd = float(m.group('thd')) self.harmonics = collections.OrderedDict() for m in self.re_harmonic.finditer(buffer): self.harmonics[int(m.group('Harmonic'))]", "in range(2, fa.n): try: thd = fa.harmonic_distortion(i) except KeyError as e: print (fourier_file)", "= thd - last last = thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__)", "nargs='+', help='The name(s) of the diode model(s) to make a test circuit for.')", "= negative) if not os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h = []", "os.path.exists(fourier_file): return fa = FourierAnalysis (fourier_file) h = [] last = 0 for", "fa.distortion or n not in fa.distortion: return False if fa.distortion[m] == fa.distortion[n]: return", "tables out of a collection of fourier files') #parser.add_argument('models', nargs='+', help='The name(s) of", "close to the percent harmonic distortion at harmonic n? Close is defined as", "filename with open(filename, \"r\") as f: buffer = f.read() m = self.re_fourier_analysis.search(buffer) if", "last = thd def __repr__(self): return \"FourierAnalysis({filename}): n={n} thd={thd}\".format(**self.__dict__) def harmonic_distortion(self, k=0): if" ]
[ "[], [], []]}, { 10: [ [], [], [], [], [], [], [],", "[ [], [], [], [], [], [], [], [], [], [78217], [], [],", "'no data') def test_podium(self): \"\"\" Test five best months of work time. \"\"\"", "[]), (49, []), (54, []), (58, []), ] sorted_dict = OrderedDict( [ (141,", "[], [], [], [], [], [], [], [], [], [], [], []]}, {", "[632015], [505118], [499105], [486939], [624356], [455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, [])", "user grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data", "= self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon',", "data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data = 2 + 2 data =", "[], [], [], [], [], [], []]}, {11: [[], [], [], [], [],", "[ (141, [612478]), (176, [606888]), (170, [576346]), (26, [560624]), (165, [555037]), (36, [546225]),", "Presence analyzer unit tests. \"\"\" from __future__ import unicode_literals import os.path import json", "json.loads(resp.data) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data', 0],", "{ 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21,", "data[0], { 'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict", "(165, [555037]), (170, [576346]), (23, [514312]), (24, [235634]), (141, [612478]), (26, [508050]), (26,", "[555037]), (170, [576346]), (23, [514312]), (24, [235634]), (141, [612478]), (26, [508050]), (26, [560624]),", "0], ['Sat', 0, 0], ['Sun', 0, 0] ] ) resp = self.client.get('/api/v1/podium/9999') data", "# pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self): \"\"\"", "tearDown(self): \"\"\" Get rid of unused objects after each test. \"\"\" pass def", "def test_mean(self): \"\"\" Test of mean and if empty list returns 0. \"\"\"", "30549], ['Sat', 6426], ['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data)", "checking if user exist. \"\"\" months_sum = [ [], [], [], [], [],", "test_xml_translator(self): \"\"\" Test user data from XML file extraction. \"\"\" data = utils.xml_translator()", "[], [], [], [], [], [], [], [], [], [], [], []]}, {62:", "data, OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [508050]), (165, [555037]),", "time the objects. \"\"\" start_example = datetime.time(13, 59, 59) end_example = datetime.time(23, 59,", "[606888]), (170, [576346]), (26, [508050]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]),", "grouped by weekday of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type,", "38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ]) def", "[], [], [], [], [], [], [], [], []]}, {10: [[], [], [],", "[], [], [], []]}, {141: [[], [], [], [], [], [], [], [],", "utf-8 -*- \"\"\" Presence analyzer unit tests. \"\"\" from __future__ import unicode_literals import", "\"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data = 2 + 2", "] } ) def suite(): \"\"\" Default test suite. \"\"\" base_suite = unittest.TestSuite()", "[]) data = utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [ [], [], [],", "0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0,", "self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test five best", "= utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self):", "data) def test_day_start_end(self): \"\"\" Test start and end work times sorted by weekday.", "given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data)", "['no data', 0], ['no data', 0], ['no data', 0], ['April', 1], ['July', 4],", "best months of work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json')", "render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users", "0], ['no data', 0], ['no data', 0], ['no data', 0], ['June', 76], ['July',", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\" Test top 5 user data.", "] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self):", "{ 10: [ [], [], [], [], [], [], [], [], [], [78217],", "{ 'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict =", "[[], [], [], [], [], [], [], [], [], [], [], [], []]}", "def test_user_validate(self): \"\"\" Test checking if user exist. \"\"\" months_sum = [ [],", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data,", "[], [19852], [], [], [] ] ) def test_user_validate(self): \"\"\" Test checking if", "(30, []), (31, []), (33, [306667]), (36, [546225]), (48, []), (49, []), (54,", "json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test of mean presence time grouped", "0], ['April', 1], ['July', 4], ['May', 6], ['August', 6], ['June', 7], ['September', 32]", "\"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], {", "months = [ [], [], [], [], [], [], [276890], [655139], [500730], [233576],", "utils # pylint: disable=relative-import import views # pylint: disable=unused-import, relative-import from .utils import", "= main.app.test_client() def tearDown(self): \"\"\" Get rid of unused objects after each test.", "['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ] ) resp", "'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50) } }, 179: { datetime.date(2013,", "[]]}, {11: [[], [], [], [], [], [], [], [], [], [], [],", "45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999') data", "[], [78217], [], [], [] ] }, { 11: [ [], [], [],", "20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] ) resp =", "mean presence time grouped by weekday of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11')", "68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\" Test top", "test_day_start_end(self): \"\"\" Test start and end work times sorted by weekday. \"\"\" user", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' },", "self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0],", "pass def test_mainpage(self): \"\"\" Test main page render template. \"\"\" resp = self.client.get('/')", "[], [], [], []]}, {62: [[], [], [], [], [], [], [], [],", "utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start and end work times sorted", "[394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (30, []),", "of CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68,", "[]), (58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]),", "= self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday',", "utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62]) sample_date", "} ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def setUp(self): \"\"\"", "json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri',", "] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self):", "\"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00))", "suming time for every month. \"\"\" items = { 178: { datetime.date(2013, 9,", "grouping presence entries by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [", "68, 49, 176, 141, 26, 62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10])", "(176, [606888]), (170, [576346]), (26, [560624]), (165, [555037]), (36, [546225]), (23, [514312]), (16,", "template. \"\"\" months = [ [], [], [], [], [], [], [276890], [655139],", "and medium time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json')", "<reponame>stxnext-kindergarten/presence-analyzer-asierhej # -*- coding: utf-8 -*- \"\"\" Presence analyzer unit tests. \"\"\" from", "Default test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return base_suite if __name__", "[], [], [], [], [], [], [], [], [], [], []]}, {62: [[],", "0) def test_interval(self): \"\"\" Test calculation of seconds between the time the objects.", "unittest from collections import OrderedDict import main # pylint: disable=relative-import import utils #", "data) def test_mean(self): \"\"\" Test of mean and if empty list returns 0.", "data. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]),", "['Sat', 0, 0], ['Sun', 0, 0] ] ) resp = self.client.get('/api/v1/podium/9999') data =", "= time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2", "\"\"\" Views tests. \"\"\" def setUp(self): \"\"\" Before each test, set up a", "[], [], []]}, {26: [[], [], [], [], [], [], [], [], [],", "[] ] ) def test_user_validate(self): \"\"\" Test checking if user exist. \"\"\" months_sum", "setUp(self): \"\"\" Before each test, set up a environment. \"\"\" main.app.config.update( { 'XML_DATA':", "start_example = datetime.time(13, 59, 59) end_example = datetime.time(23, 59, 59) data = utils.interval(start_example,", "[371559]), (33, [306667]), (11, [263049]), (24, [235634]), (101, []) ] ) data =", "presence entries by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68:", "2013) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11'", "[], [], [], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] data", "10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions", "(48, []), (49, []), (54, []), (58, []), ] sorted_dict = OrderedDict( [", "[], [], [], [], [], [], [276890], [655139], [500730], [233576], [], [], []", "[], [], [], [], []]}, {62: [[], [], [], [], [], [], [],", "test_presence_weekday_view(self): \"\"\" Test mean presence time of given user grouped by weekday. \"\"\"", "[], [], [], [], [], []]}, {176: [[], [], [], [], [], [],", "XML file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170])", "TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member,", "def test_xml_translator(self): \"\"\" Test user data from XML file extraction. \"\"\" data =", "data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[], [], [], [], [],", "[]), (49, []), (54, []), (58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data,", "\"\"\" def setUp(self): \"\"\" Before each test, set up a environment. \"\"\" main.app.config.update(", "[], [], [], [], [], [], [], []]}, { 10: [ [], [],", "(101, []) ] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170,", "62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ] ) resp", "{ 'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def", "medium time to come to the office and medium time of leave. \"\"\"", "15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141,", "'no data') def test_mean_time_weekday_view(self): \"\"\" Test of mean presence time grouped by weekday", "[], [], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0],", "[263049]), (24, [235634]), (101, []) ] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0],", "['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999')", "[], [], [], [], [], [], [], [], [], [], []]} ] )", "['September', 64], ['no data', 0], ['no data', 0], ['no data', 0] ] )", "Test mean presence time of given user grouped by weekday. \"\"\" resp =", "resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users listing. \"\"\" resp", "[], [], [], [], [], []]} ] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual(", "Test of mean presence time grouped by weekday of given user. \"\"\" resp", "data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since", "data = utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name':", "data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\"", "[], [], []]}, {62: [[], [], [], [], [], [], [], [], [],", "['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ] ) resp =", "end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test", "of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data =", "tests. \"\"\" def setUp(self): \"\"\" Before each test, set up a environment. \"\"\"", "data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0,", "them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual(", "[]]}, {176: [[], [], [], [], [], [], [], [], [], [], [],", "'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, {", "32] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def", "[], [] ] }, {141: [[], [], [], [], [], [], [], [],", "178, months) self.assertEqual( data, [ [], [], [], [], [], [], [], [],", "36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test mean presence", "(49, []), (54, []), (58, []) ] ) ) def test_months_sum_dict(self): \"\"\" Test", "item = datetime.date(2013, 9, 9) months = [[] for month in xrange(13)] data", "['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation", "]) def test_xml_translator(self): \"\"\" Test user data from XML file extraction. \"\"\" data", "self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5],", "data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data =", "\"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[], [], [], [],", "'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\" Test top 5 user data. \"\"\"", "} ] ) def test_five_top_user_data(self): \"\"\" Test top 5 user data. \"\"\" dict_months", "[ [], [], [], [], [], [], [], [], [], [19852], [], [],", "(165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (10, [455386]), (19, [434499]), (15,", "[ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0],", "(24, [235634]), (30, []), (31, []), (48, []), (49, []), (54, []), (58,", "suite(): \"\"\" Default test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return base_suite", "data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start and end work", "data, [ {68: [[], [], [], [], [], [], [], [], [], [],", "if empty list returns 0. \"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100, data)", "rid of unused objects after each test. \"\"\" pass def test_mainpage(self): \"\"\" Test", "{ 'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50) } }, 179: {", "in xrange(13)] data = utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data, [ [],", "returns 0. \"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100, data) data = utils.mean([0.5,", "data', 0], ['no data', 0], ['no data', 0], ['April', 1], ['July', 4], ['May',", "[], [], [], [], []]}, {141: [[], [], [], [], [], [], [],", "up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client", "[], [], [], [], []]} ] ) def test_five_top_workers(self): \"\"\" Test top 5", "[], [], [], [], [], [], [], [], []]}, {11: [[], [], [],", "json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885],", "data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of mean and", "\"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return base_suite if __name__ == '__main__': unittest.main()", "self.assertEqual( data, [ {68: [[], [], [], [], [], [], [], [], [],", "test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return base_suite if __name__ ==", "0], ['Sun', 0, 0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data,", "per months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data", "[], [], [], [], [], [], []]}, { 10: [ [], [], [],", "time grouped by weekday of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200)", "time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data =", "import datetime import time import unittest from collections import OrderedDict import main #", "self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar':", "+ 2 data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation():", "} } item = datetime.date(2013, 9, 9) months = [[] for month in", "(23, [514312]), (16, [513180]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29,", "34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0,", "[ [], [], [], [], [], [], [550395], [632015], [505118], [499105], [486939], [624356],", "self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of mean and if empty list returns", "[576346]), (23, [514312]), (24, [235634]), (141, [612478]), (26, [508050]), (26, [560624]), (29, [385973]),", "['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ]", "['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri',", "[], [], [], [], [], []]}, {11: [[], [], [], [], [], [],", "64], ['no data', 0], ['no data', 0], ['no data', 0] ] ) def", "(24, [235634]), (101, []) ] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], {", "[606888]), (170, [576346]), (26, [560624]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]),", "['Sat', 0, 0], ['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\" Test user data", "\"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client() def", "176, 141, 26, 62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start',", "sorted by weekday. \"\"\" user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [", "self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test of mean", "test_five_top(self): \"\"\" Test top 5 workers per months in year. \"\"\" resp =", "[513180]), (26, [508050]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]),", "unit tests. \"\"\" from __future__ import unicode_literals import os.path import json import datetime", "0], ['no data', 0], ['April', 1], ['July', 4], ['May', 6], ['August', 6], ['June',", "main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get rid", "} ) sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def", "[16564], [24123], [118402], [], [], [] ] }, {141: [[], [], [], [],", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar':", "{68: [[], [], [], [], [], [], [], [], [], [], [], [],", "(15, [432795]), (16, [513180]), (176, [606888]), (19, [434499]), (165, [555037]), (170, [576346]), (23,", "= json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test the medium time to", "00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation of seconds between the", "\"\"\" Get rid of unused objects after each test. \"\"\" pass def test_get_data(self):", "\"\"\" start_example = datetime.time(13, 59, 59) end_example = datetime.time(23, 59, 59) data =", "'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49, 'name':", "test_interval(self): \"\"\" Test calculation of seconds between the time the objects. \"\"\" start_example", "[], [], []]}, {176: [[], [], [], [], [], [], [], [], [],", "= utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.',", "(26, [560624]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (26, [508050]), (10,", "= utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar':", ") def test_five_top_user_data(self): \"\"\" Test top 5 user data. \"\"\" dict_months = [", "months = [[] for month in xrange(13)] data = utils.months_sum_dict(2013, items, item, 178,", "['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549],", "short_calculation(): data = 2 + 2 data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(),", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months,", "of work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data =", "} ) def test_presence_weekday_view(self): \"\"\" Test mean presence time of given user grouped", "test_get_data(self): \"\"\" Test parsing of CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict)", "(33, [306667]), (11, [263049]), (24, [235634]), (30, []), (31, []), (48, []), (49,", "= OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test", "+ 3 data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\"", "json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def", "{49: [[], [], [], [], [], [], [], [], [], [], [], [],", "5 workers per months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type,", "10: [ [], [], [], [], [], [], [], [], [], [78217], [],", "[], [], [], [], [], [], [], [], [], [], [], []]}, {11:", "4], ['May', 6], ['August', 6], ['June', 7], ['September', 32] ] ) resp =", "[612478]), (176, [606888]), (170, [576346]), (26, [560624]), (165, [555037]), (36, [546225]), (23, [514312]),", "24), 'start': datetime.time(16, 55, 24) } } } item = datetime.date(2013, 9, 9)", "months) self.assertEqual( data, [ [], [], [], [], [], [], [], [], [],", "(16, [513180]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12,", "\"\"\" Test start and end work times sorted by weekday. \"\"\" user =", "= self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test the", "data', 0], ['no data', 0], ['April', 1], ['July', 4], ['May', 6], ['August', 6],", "self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu',", "@memoize(age_cache=20) def short_calculation(): data = 2 + 2 data = time.time() time.sleep(1) return", "[]), (54, []), (58, []), ] sorted_dict = OrderedDict( [ (141, [612478]), (176,", "data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test data", "{ 178: { datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14, 42), 'start': datetime.time(11,", "['Sat', 6426], ['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data,", "data = utils.mean([100, 100, 100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3, 234])", "[432795]), (16, [513180]), (176, [606888]), (19, [434499]), (165, [555037]), (170, [576346]), (23, [514312]),", "= datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39,", "['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999')", "self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test the medium time to come to", "def short_calculation(): data = 2 + 2 data = time.time() time.sleep(1) return data", "[455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual(", "[514312]), (24, [235634]), (141, [612478]), (26, [508050]), (26, [560624]), (29, [385973]), (30, []),", "}, {141: [[], [], [], [], [], [], [], [], [], [], [],", "55, 24) } } } item = datetime.date(2013, 9, 9) months = [[]", "41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ] )", "caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data = 2 + 2 data = time.time()", "[], [], [], [], [], [], [], [], [78217], [], [], [] ]", "Get rid of unused objects after each test. \"\"\" pass def test_get_data(self): \"\"\"", "OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [508050]), (165, [555037]), (36,", "[], [], [], [], [], [], [], [], []]}, {49: [[], [], [],", "month. \"\"\" items = { 178: { datetime.date(2013, 9, 9): { 'end': datetime.time(17,", "pass def test_get_data(self): \"\"\" Test parsing of CSV file. \"\"\" data = utils.get_data()", "import views # pylint: disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV = os.path.join(", "[513180]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]),", "test, set up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV }", "[], []]}, {141: [[], [], [], [], [], [], [], [], [], [],", "month in xrange(13)] data = utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data, [", "'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141,", "data = utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data, [ [], [], [],", ") def test_podium_data_maker(self): \"\"\" Test groups presence entries as podium data. \"\"\" data", "] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def setUp(self): \"\"\" Before", "= self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test five", "from collections import OrderedDict import main # pylint: disable=relative-import import utils # pylint:", "{26: [[], [], [], [], [], [], [], [], [], [], [], [],", "\"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data,", "utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data', 0],", "[], [], [], [], [], [], [], [], [], [], [], []]}, {49:", "170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\"", "(58, []), ] sorted_dict = OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]),", "[], [], [], [], [], [], [], []]}, {176: [[], [], [], [],", "[]]}, {26: [[], [], [], [], [], [], [], [], [], [], [],", "[[], [], [], [], [], [], [], [], [], [], [], [], []]},", "0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0],", "7], ['September', 32] ] ) def test_group_by_month(self): \"\"\" Test grouping presence entries by", "00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation of seconds between the time", "def test_presence_start_end(self): \"\"\" Test the medium time to come to the office and", "work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data)", "22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data", "'start': datetime.time(11, 43, 50) } }, 179: { datetime.date(2013, 9, 12): { 'end':", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9, 2015) self.assertEqual( data, [ {", "] ) def test_group_by_month(self): \"\"\" Test grouping presence entries by month. \"\"\" data", "the time the objects. \"\"\" start_example = datetime.time(13, 59, 59) end_example = datetime.time(23,", "= datetime.time(13, 59, 59) end_example = datetime.time(23, 59, 59) data = utils.interval(start_example, end_example)", ") def test_group_by_month(self): \"\"\" Test grouping presence entries by month. \"\"\" data =", "{ 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class", "[], [], [19852], [], [], [] ] ) def test_user_validate(self): \"\"\" Test checking", "rid of unused objects after each test. \"\"\" pass def test_get_data(self): \"\"\" Test", "data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat',", "def test_mainpage(self): \"\"\" Test main page render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code,", "of unused objects after each test. \"\"\" pass def test_mainpage(self): \"\"\" Test main", "[508050]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (10, [455386]), (19, [434499]),", "[], []]}, {11: [[], [], [], [], [], [], [], [], [], [],", "test_mean_time_weekday_view(self): \"\"\" Test of mean presence time grouped by weekday of given user.", "[], [], [], [], [], []]}, { 10: [ [], [], [], [],", "data', 0], ['no data', 0], ['no data', 0], ['no data', 0], ['no data',", "'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])]) data", "= self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ {", "[], [], [], [], [], [], [], [], [], [], []]}, {49: [[],", "\"\"\" Before each test, set up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA,", "] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id': 141,", "[], [], [], [276890], [655139], [500730], [233576], [], [], [] ] data =", "= json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed',", "= utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual( data, [ {", "(33, [306667]), (36, [546225]), (48, []), (49, []), (54, []), (58, []), ]", "\"\"\" Test calculation of secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23))", "presence time of given user grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code,", "[118402], [], [], [] ] }, {141: [[], [], [], [], [], [],", "test_sorted_months_dict(self): \"\"\" Test sorting of months dict. \"\"\" dict_months = [ (10, [455386]),", "[]), (54, []), (58, []) ] ) ) def test_months_sum_dict(self): \"\"\" Test appending", "test_group_by_month(self): \"\"\" Test grouping presence entries by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013)", "11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49,", "'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id':", "34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [ [],", "(176, [606888]), (19, [434499]), (165, [555037]), (170, [576346]), (23, [514312]), (24, [235634]), (141,", "data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test top 5 workers", "[]]} ] ) def test_five_top_workers(self): \"\"\" Test top 5 presence users with information", ") self.client = main.app.test_client() def tearDown(self): \"\"\" Get rid of unused objects after", "\"\"\" Test top 5 user data. \"\"\" dict_months = [ (10, [455386]), (11,", "(48, []), (49, []), (54, []), (58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual(", "6], ['June', 7], ['September', 32] ] ) def test_group_by_month(self): \"\"\" Test grouping presence", "= utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting of months dict.", "] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0], ['no data', 0],", "data) data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0,", "59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000,", "[371559]), (13, [394007]), (15, [432795]), (16, [513180]), (176, [606888]), (19, [434499]), (165, [555037]),", "[434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]),", "['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\" Test user data from XML file", "200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.',", "[], [], [], [], [], [], [], [], [], [], []]}, {141: [[],", "178: { datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43,", "8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\"", "[], [], [], [], [], []]}, {141: [[], [], [], [], [], [],", "mean presence time of given user grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11')", "'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase):", "} ) def tearDown(self): \"\"\" Get rid of unused objects after each test.", "appending and suming time for every month. \"\"\" items = { 178: {", ") data = utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours': 15, 'user_id': 62,", "] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual( data,", "[], []]} ] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[],", "data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting of months", "['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat',", "def test_months_sum_dict(self): \"\"\" Test appending and suming time for every month. \"\"\" items", "[235634]), (101, []) ] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours':", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar':", "(24, [235634]), (141, [612478]), (26, [508050]), (26, [560624]), (29, [385973]), (30, []), (31,", "[500730], [233576], [], [], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no", "[], []]}, { 10: [ [], [], [], [], [], [], [], [],", "50) } }, 179: { datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5, 24),", "def test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data = 2", "= utils.mean([100, 100, 100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75,", "['no data', 0], ['no data', 0], ['no data', 0], ['no data', 0], ['April',", "office and medium time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type,", "[], [], [], [], [], [], [], []]}, {26: [[], [], [], [],", "by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[], [],", "] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[], [], [],", "self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([])", "['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun',", "[], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] } ) def suite():", "\"\"\" Test users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data", "[263049]), (12, [371559]), (13, [394007]), (15, [432795]), (16, [513180]), (176, [606888]), (19, [434499]),", "workers per months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json')", "[], [], [], [], [], [], [], []]}, {141: [[], [], [], [],", "tests. \"\"\" from __future__ import unicode_literals import os.path import json import datetime import", "data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id': 141, 'name': '<NAME>.',", "datetime.time(13, 59, 59) end_example = datetime.time(23, 59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000,", "'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation of", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict)", "import time import unittest from collections import OrderedDict import main # pylint: disable=relative-import", "Test the medium time to come to the office and medium time of", "['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ]", "[6426], [22969], [25321], [16564], [24123], [118402], [], [], [] ] }, {141: [[],", "data = json.loads(resp.data) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no", "def test_five_top_workers(self): \"\"\" Test top 5 presence users with information about them. \"\"\"", "[], [], [], [], [], [], [], [], [], [], []]}, {26: [[],", "0], ['no data', 0] ] ) def test_podium_data_maker(self): \"\"\" Test groups presence entries", "} } } item = datetime.date(2013, 9, 9) months = [[] for month", "(23, [514312]), (16, [513180]), (26, [508050]), (10, [455386]), (19, [434499]), (15, [432795]), (13,", "utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data', 0],", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests.", "\"\"\" Test appending and suming time for every month. \"\"\" items = {", "0] ] ) def test_podium_data_maker(self): \"\"\" Test groups presence entries as podium data.", "11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10, 'name': '<NAME>.',", "'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' )", "self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } )", "[555037]), (36, [546225]), (23, [514312]), (16, [513180]), (26, [508050]), (10, [455386]), (19, [434499]),", "} ) def suite(): \"\"\" Default test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))", "\"\"\" from __future__ import unicode_literals import os.path import json import datetime import time", "179: { datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55,", "] ) ) def test_months_sum_dict(self): \"\"\" Test appending and suming time for every", "200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5],", "def suite(): \"\"\" Default test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return", "[], [], [], [], [], [], [78217], [], [], [] ] }, {", ") class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def setUp(self): \"\"\" Before each", "[624356], [455386] ] } ) def suite(): \"\"\" Default test suite. \"\"\" base_suite", ") resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\"", "items = { 178: { datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14, 42),", "self.assertEqual( data, [ [], [], [], [], [], [], [], [], [], [19852],", "0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test", "['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data =", "200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue',", "[ ['no data', 0], ['no data', 0], ['no data', 0], ['no data', 0],", "}, { 'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours':", "= [ (10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]), (15, [432795]), (16,", "[306667]), (11, [263049]), (24, [235634]), (30, []), (31, []), (48, []), (49, []),", "and if empty list returns 0. \"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100,", "leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual(", "165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self):", "utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[], [], [], [], [], [], [],", "24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969]", "os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase):", "32] ] ) def test_group_by_month(self): \"\"\" Test grouping presence entries by month. \"\"\"", "100, 100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data", "\"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users listing. \"\"\"", "data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], { 'name':", ") resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\"", "62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141, 'name': '<NAME>.',", "['no data', 0], ['no data', 0], ['no data', 0], ['no data', 0], ['June',", "podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0], ['no", "datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24) } } } item = datetime.date(2013,", "(13, [394007]), (15, [432795]), (16, [513180]), (176, [606888]), (19, [434499]), (165, [555037]), (170,", "datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5)", "data', 0], ['June', 76], ['July', 181], ['August', 139], ['September', 64], ['no data', 0],", "\"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual( data,", "59, 59) end_example = datetime.time(23, 59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data)", ") ) def test_months_sum_dict(self): \"\"\" Test appending and suming time for every month.", "[306667]), (36, [546225]), (48, []), (49, []), (54, []), (58, []), ] sorted_dict", "[], [], [], [], [19852], [], [], [] ] ) def test_user_validate(self): \"\"\"", "200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['no data', 0], ['no", "self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0,", "TEST_DATA_CSV } ) self.client = main.app.test_client() def tearDown(self): \"\"\" Get rid of unused", "49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68, 'name': '<NAME>.',", "['Sat', 6426.0], ['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data,", "[]), (49, []), (54, []), (58, []) ] ) ) def test_months_sum_dict(self): \"\"\"", "}, { 'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours':", "[263049]), (24, [235634]), (30, []), (31, []), (48, []), (49, []), (54, []),", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9, 2015) self.assertEqual( data, [", "list returns 0. \"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100, data) data =", "utils.mean([100, 100, 100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data)", "Views tests. \"\"\" def setUp(self): \"\"\" Before each test, set up a environment.", "def test_podium_data_maker(self): \"\"\" Test groups presence entries as podium data. \"\"\" data =", "with information about them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data =", "sorted_dict = OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [560624]), (165,", "after each test. \"\"\" pass def test_mainpage(self): \"\"\" Test main page render template.", "['May', 6], ['August', 6], ['June', 7], ['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999')", "self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0,", "Before each test, set up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV':", "[], [], [], [], [], [], [], [], [], [], [], []]} ]", "self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result for podium template. \"\"\" months", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68'", "[], [], [], [], [], [], [], [], []]}, {26: [[], [], [],", "data') def test_presence_start_end(self): \"\"\" Test the medium time to come to the office", ") data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id': 141, 'name':", "# pylint: disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..',", "[], [], []]}, {141: [[], [], [], [], [], [], [], [], [],", "2 + 3 data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self):", "[], [], [], [], [], [], [], [], [], []]}, {10: [[], [],", "resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [", "[233576], [], [], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data',", "a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self):", "time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result for podium", "(54, []), (58, []), ] sorted_dict = OrderedDict( [ (141, [612478]), (176, [606888]),", "21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9,", "{ 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data", "(10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]), (15, [432795]), (16, [513180]), (176,", "[514312]), (16, [513180]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]),", "['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ]", "[], [], [], [], [], [], [19852], [], [], [] ] ) def", "def test_mean_time_weekday_view(self): \"\"\" Test of mean presence time grouped by weekday of given", "[], [], [], [], [], [], []]} ] ) def test_five_top_workers(self): \"\"\" Test", "the objects. \"\"\" start_example = datetime.time(13, 59, 59) end_example = datetime.time(23, 59, 59)", "0], ['no data', 0], ['June', 76], ['July', 181], ['August', 139], ['September', 64], ['no", "5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since midnight. \"\"\" data", "{141: [[], [], [], [], [], [], [], [], [], [], [], [],", "'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ]", "import unittest from collections import OrderedDict import main # pylint: disable=relative-import import utils", "= utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[], [], [], [], [], [],", "self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code,", "self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['no data', 0],", "secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data =", "of months dict. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]),", "import OrderedDict import main # pylint: disable=relative-import import utils # pylint: disable=relative-import import", "since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00,", "(23, [514312]), (24, [235634]), (141, [612478]), (26, [508050]), (26, [560624]), (29, [385973]), (30,", "Test users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data =", "test. \"\"\" pass def test_get_data(self): \"\"\" Test parsing of CSV file. \"\"\" data", "data') def test_mean_time_weekday_view(self): \"\"\" Test of mean presence time grouped by weekday of", "datetime.time(16, 55, 24) } } } item = datetime.date(2013, 9, 9) months =", "0. \"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2,", "[]]}, {62: [[], [], [], [], [], [], [], [], [], [], [],", "seconds between the time the objects. \"\"\" start_example = datetime.time(13, 59, 59) end_example", "[], [], [], [], [], []]}, {62: [[], [], [], [], [], [],", "# pylint: disable=relative-import import views # pylint: disable=unused-import, relative-import from .utils import memoize", "utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[], [], [], [], [], [], [],", "[], [], [], [], [], []]}, {10: [[], [], [], [], [], [],", "[], [], [], [], [], [], [], [], []]} ] ) def test_five_top_workers(self):", "self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2 + 3 data = time.time()", "data = utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [ [], [], [], [],", "'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get rid of unused objects after", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' },", "[]), ] sorted_dict = OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26,", "self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start and end work times sorted by", "'no data') def test_five_top(self): \"\"\" Test top 5 workers per months in year.", "6], ['August', 6], ['June', 7], ['September', 32] ] ) def test_group_by_month(self): \"\"\" Test", "\"\"\" Test top 5 presence users with information about them. \"\"\" data =", "[ (141, [612478]), (176, [606888]), (170, [576346]), (26, [508050]), (165, [555037]), (36, [546225]),", "Test top 5 presence users with information about them. \"\"\" data = utils.five_top_workers(9,", "[], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] } ) def", "of mean and if empty list returns 0. \"\"\" data = utils.mean([100, 100,", "data') def test_five_top(self): \"\"\" Test top 5 workers per months in year. \"\"\"", "'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def setUp(self):", "about them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013)", "[24123], [118402], [], [], [] ] }, {141: [[], [], [], [], [],", "self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['no data', 0], ['no data',", "2011) self.assertEqual( data, [ {68: [[], [], [], [], [], [], [], [],", "utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [", "[], []]}, {62: [[], [], [], [], [], [], [], [], [], [],", "grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data =", "5, 24), 'start': datetime.time(16, 55, 24) } } } item = datetime.date(2013, 9,", "[]) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]), (176, [606888]),", "'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68, 'name':", "os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..',", "Test five best months of work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200)", "(49, []), (54, []), (58, []), ] sorted_dict = OrderedDict( [ (141, [612478]),", "['no data', 0], ['no data', 0] ] ) def test_podium_data_maker(self): \"\"\" Test groups", "['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun',", "OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [560624]), (165, [555037]), (36,", "23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\"", "test_podium_data_maker(self): \"\"\" Test groups presence entries as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11])", "resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [", "self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123],", "[546225]), (23, [514312]), (16, [513180]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]),", "test_podium_result_structure_builder(self): \"\"\" Test building result for podium template. \"\"\" months = [ [],", "def tearDown(self): \"\"\" Get rid of unused objects after each test. \"\"\" pass", "12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176,", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test mean presence time", "= os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods", "(11, [263049]), (12, [371559]), (13, [394007]), (15, [432795]), (16, [513180]), (176, [606888]), (19,", "time of given user grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200)", "weekday of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data", "information about them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9,", "'..', 'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views", "] ) def test_five_top_user_data(self): \"\"\" Test top 5 user data. \"\"\" dict_months =", "data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual( data, {", "[], [], [], [], []]}, {49: [[], [], [], [], [], [], [],", "(30, []), (31, []), (48, []), (49, []), (54, []), (58, []) ]", "def test_podium_result_structure_builder(self): \"\"\" Test building result for podium template. \"\"\" months = [", "[]) ] ) ) def test_months_sum_dict(self): \"\"\" Test appending and suming time for", "[ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri',", "data, [ ['no data', 0], ['no data', 0], ['no data', 0], ['no data',", "[], [], [], [], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ]", "[], [], []]}, {11: [[], [], [], [], [], [], [], [], [],", "15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data)", "mean and if empty list returns 0. \"\"\" data = utils.mean([100, 100, 100])", "= utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.',", "[], [], [], [], [], [], [], []]} ] ) def test_five_top_workers(self): \"\"\"", "[612478]), (26, [508050]), (26, [560624]), (29, [385973]), (30, []), (31, []), (33, [306667]),", "(33, [306667]), (36, [546225]), (48, []), (49, []), (54, []), (58, []) ]", "= json.loads(resp.data) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data',", "11, 68, 49, 176, 141, 26, 62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date,", "sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting of months dict. \"\"\" dict_months", "[394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (101, [])", "'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id':", "test. \"\"\" pass def test_mainpage(self): \"\"\" Test main page render template. \"\"\" resp", "'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def", "9): { 'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50) } }, 179:", "utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0],", "self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0],", "Get rid of unused objects after each test. \"\"\" pass def test_mainpage(self): \"\"\"", "__future__ import unicode_literals import os.path import json import datetime import time import unittest", "time for every month. \"\"\" items = { 178: { datetime.date(2013, 9, 9):", "def test_five_top(self): \"\"\" Test top 5 workers per months in year. \"\"\" resp", "def test_podium(self): \"\"\" Test five best months of work time. \"\"\" resp =", "(15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24,", "= json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } )", "top 5 workers per months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200)", "entries as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data',", "(54, []), (58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141,", "= self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users listing. \"\"\" resp =", "objects after each test. \"\"\" pass def test_get_data(self): \"\"\" Test parsing of CSV", "[508050]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]),", "data', 0], ['April', 1], ['July', 4], ['May', 6], ['August', 6], ['June', 7], ['September',", "[505118], [499105], [486939], [624356], [455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data", "data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test five best months", "\"\"\" Get rid of unused objects after each test. \"\"\" pass def test_mainpage(self):", "data', 0], ['no data', 0], ['no data', 0], ['no data', 0], ['June', 76],", "(11, [263049]), (24, [235634]), (30, []), (31, []), (48, []), (49, []), (54,", "of unused objects after each test. \"\"\" pass def test_get_data(self): \"\"\" Test parsing", "[], [], [], [], [], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386]", "['August', 6], ['June', 7], ['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999') data =", "(58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]), (176,", "[499105], [486939], [624356], [455386] ] } ) def suite(): \"\"\" Default test suite.", "\"\"\" Test of mean presence time grouped by weekday of given user. \"\"\"", "self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test of mean presence time grouped by", "49, 176, 141, 26, 62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(),", "import os.path import json import datetime import time import unittest from collections import", "[432795]), (13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]),", "to come to the office and medium time of leave. \"\"\" resp =", "return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result for podium template.", "file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176,", "'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, {", "['July', 181], ['August', 139], ['September', 64], ['no data', 0], ['no data', 0], ['no", "self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start and", "data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result for podium template. \"\"\"", "self.client = main.app.test_client() def tearDown(self): \"\"\" Get rid of unused objects after each", "76], ['July', 181], ['August', 139], ['September', 64], ['no data', 0], ['no data', 0],", "'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0],", "[546225]), (48, []), (49, []), (54, []), (58, []), ] sorted_dict = OrderedDict(", "pylint: disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..',", "[], [], [], [], [], [], [], [78217], [], [], [] ] },", "\"\"\" Default test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return base_suite if", "[]), (58, []), ] sorted_dict = OrderedDict( [ (141, [612478]), (176, [606888]), (170,", "dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62]) sample_date = datetime.date(2013,", "(s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426],", "= utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0,", "[], [], [], [], [], [], []]}, {49: [[], [], [], [], [],", "set up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } )", "PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self): \"\"\" Before each test, set up", "the medium time to come to the office and medium time of leave.", "[], [], [], [], [], [], [], []]}, {10: [[], [], [], [],", "medium time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data", "json import datetime import time import unittest from collections import OrderedDict import main", "to the office and medium time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code,", "podium template. \"\"\" months = [ [], [], [], [], [], [], [276890],", "data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no", "['June', 7], ['September', 32] ] ) def test_group_by_month(self): \"\"\" Test grouping presence entries", "\"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data,", "[]), (33, [306667]), (36, [546225]), (48, []), (49, []), (54, []), (58, [])", "'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id':", "[], [], []]} ] ) def test_five_top_workers(self): \"\"\" Test top 5 presence users", "analyzer unit tests. \"\"\" from __future__ import unicode_literals import os.path import json import", "[513180]), (176, [606888]), (19, [434499]), (165, [555037]), (170, [576346]), (23, [514312]), (24, [235634]),", "[555037]), (36, [546225]), (23, [514312]), (16, [513180]), (10, [455386]), (19, [434499]), (15, [432795]),", "presence users with information about them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, [])", "= utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start and end work times", "[], [], [], [], [276890], [655139], [500730], [233576], [], [], [] ] data", "test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42,", "[25321], [16564], [24123], [118402], [], [], [] ] }, {141: [[], [], [],", ") resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\"", "0] ]) def test_xml_translator(self): \"\"\" Test user data from XML file extraction. \"\"\"", "start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of mean and if empty list", "utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self):", "by weekday. \"\"\" user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon',", "short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2 + 3 data = time.time() time.sleep(2)", "def test_get_data(self): \"\"\" Test parsing of CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data,", "json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11'", "user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual(", "empty list returns 0. \"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100, data) data", "['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data')", "[ [], [], [], [], [6426], [22969], [25321], [16564], [24123], [118402], [], [],", "\"\"\" Presence analyzer unit tests. \"\"\" from __future__ import unicode_literals import os.path import", "5 presence users with information about them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data,", "data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed',", "[], [], [], [], [], []]} ] ) def test_five_top_workers(self): \"\"\" Test top", "self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue',", "-*- \"\"\" Presence analyzer unit tests. \"\"\" from __future__ import unicode_literals import os.path", "(170, [576346]), (23, [514312]), (24, [235634]), (141, [612478]), (26, [508050]), (26, [560624]), (29,", ".utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' )", "9, 9): { 'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50) } },", "['Sun', 0, 0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no", "[], [], [], [], [], [], [], [], []]}, {62: [[], [], [],", "[], [], [], [], [], [], [], []]} ] ) data = utils.group_by_month(utils.get_data(),", "OrderedDict import main # pylint: disable=relative-import import utils # pylint: disable=relative-import import views", "\"\"\" Test user data from XML file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data,", "[], [], [], []]}, {11: [[], [], [], [], [], [], [], [],", "= utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0,", "Test parsing of CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10,", "too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self): \"\"\" Before each test,", "\"\"\" Utility functions tests. \"\"\" def setUp(self): \"\"\" Before each test, set up", "end work times sorted by weekday. \"\"\" user = utils.get_data() data = utils.day_start_end(user[10])", "= self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test top", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def", "data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed',", "data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of mean", "[36, 165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def", "[], [], [], [], [], [276890], [655139], [500730], [233576], [], [], [] ]", "[], [], [], [], []]}, { 10: [ [], [], [], [], [],", "[]), (58, []) ] ) ) def test_months_sum_dict(self): \"\"\" Test appending and suming", "[], [], [], []]}, {10: [[], [], [], [], [], [], [], [],", "(13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (101,", "def test_group_by_month(self): \"\"\" Test grouping presence entries by month. \"\"\" data = utils.group_by_month(utils.get_data(),", "'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id':", "41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ] ) resp =", "[]), (31, []), (33, [306667]), (36, [546225]), (48, []), (49, []), (54, []),", "Test of mean and if empty list returns 0. \"\"\" data = utils.mean([100,", "['no data', 0], ['no data', 0], ['June', 76], ['July', 181], ['August', 139], ['September',", "200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32, 'user_id':", "{ 'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8,", "\"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141,", "of given user grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type,", "(10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]), (33,", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test mean presence time of given", "'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176, 'name':", "[], [], [], [], [], [], []]} ] ) data = utils.group_by_month(utils.get_data(), 2011)", "= utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation of seconds", "self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test top 5 workers per months in", "disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self): \"\"\" Before each", "[], [], [], [], [], [], [], [], [], [], []]}, {10: [[],", "2015) self.assertEqual( data, [ { 'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62'", "['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data')", "['July', 4], ['May', 6], ['August', 6], ['June', 7], ['September', 32] ] ) def", "utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\"", "= 2 + 2 data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1)", "TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client() def tearDown(self): \"\"\" Get rid", "TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join(", "170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])])", "[434499]), (165, [555037]), (170, [576346]), (23, [514312]), (24, [235634]), (141, [612478]), (26, [508050]),", "[550395], [632015], [505118], [499105], [486939], [624356], [455386] ] } ) def suite(): \"\"\"", "json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test the medium time to come", "# -*- coding: utf-8 -*- \"\"\" Presence analyzer unit tests. \"\"\" from __future__", "\"\"\" pass def test_mainpage(self): \"\"\" Test main page render template. \"\"\" resp =", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test data caching. \"\"\"", "100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data =", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar':", "collections import OrderedDict import main # pylint: disable=relative-import import utils # pylint: disable=relative-import", "time import unittest from collections import OrderedDict import main # pylint: disable=relative-import import", "176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49, 'name': '<NAME>.',", "test_mean(self): \"\"\" Test of mean and if empty list returns 0. \"\"\" data", "[], [], [], [], [], []]}, {26: [[], [], [], [], [], [],", "coding: utf-8 -*- \"\"\" Presence analyzer unit tests. \"\"\" from __future__ import unicode_literals", "Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data = 2 + 2 data", "141) self.assertEqual( data, { 141: [ [], [], [], [], [], [], [550395],", "[], [], [], [], [], [], [], [], []]}, {176: [[], [], [],", "unused objects after each test. \"\"\" pass def test_mainpage(self): \"\"\" Test main page", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' },", "'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['no data', 0], ['no data', 0],", ") def suite(): \"\"\" Default test suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))", "[], [], [], [], [], [], [], [], [], [], [], []]}, {10:", "'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get rid of unused", "(29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (101, []) ]", "(26, [508050]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12,", "[486939], [624356], [455386] ] } ) def suite(): \"\"\" Default test suite. \"\"\"", "time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual(", "environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client()", "= utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62])", "['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat', 6426], ['Sun',", "[]]}, {49: [[], [], [], [], [], [], [], [], [], [], [],", "[], [], [], [], [], [], [], [], [], [78217], [], [], []", "[], [] ] }, { 11: [ [], [], [], [], [6426], [22969],", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar':", "presence entries as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no", "['no data', 0], ['no data', 0], ['no data', 0], ['June', 76], ['July', 181],", "[], [], [], [], [], [], [], [], [], []]}, {176: [[], [],", "data', 0], ['no data', 0], ['June', 76], ['July', 181], ['August', 139], ['September', 64],", "[]), (31, []), (48, []), (49, []), (54, []), (58, []) ] )", "64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0],", "os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class", "objects after each test. \"\"\" pass def test_mainpage(self): \"\"\" Test main page render", "[], [], [], []]}, { 10: [ [], [], [], [], [], [],", "0], ['no data', 0], ['no data', 0], ['April', 1], ['July', 4], ['May', 6],", "6], ['June', 7], ['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data)", "[], [], [], [19852], [], [], [] ] ) def test_user_validate(self): \"\"\" Test", "2 data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data", "[19852], [], [], [] ] ) def test_user_validate(self): \"\"\" Test checking if user", "6426], ['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no", "12): { 'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24) } } }", "['August', 139], ['September', 64], ['no data', 0], ['no data', 0], ['no data', 0]", "'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation():", "data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test the medium time", "data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building", "data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885],", "] ) def test_podium_data_maker(self): \"\"\" Test groups presence entries as podium data. \"\"\"", "data', 0], ['no data', 0], ['no data', 0], ['no data', 0], ['April', 1],", "'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue',", "exist. \"\"\" months_sum = [ [], [], [], [], [], [], [550395], [632015],", "6], ['August', 6], ['June', 7], ['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999') data", "[550395], [632015], [505118], [499105], [486939], [624356], [455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data,", "self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32,", "TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get rid of unused objects", ") def tearDown(self): \"\"\" Get rid of unused objects after each test. \"\"\"", "the office and medium time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200)", "(141, [612478]), (176, [606888]), (170, [576346]), (26, [508050]), (165, [555037]), (36, [546225]), (23,", "141, 26, 62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])", "0], ['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\" Test user data from XML", "utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting of months dict. \"\"\"", "test_api_users(self): \"\"\" Test users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json')", "datetime.time(11, 43, 50) } }, 179: { datetime.date(2013, 9, 12): { 'end': datetime.time(18,", "[]) ] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id':", "0], ['no data', 0], ['no data', 0], ['no data', 0], ['no data', 0],", "[]) data = utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11,", "{ 'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11,", "[ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu', 38926.0, 62631.0],", "\"\"\" Test of mean and if empty list returns 0. \"\"\" data =", "[], [], [], [], []]}, {26: [[], [], [], [], [], [], [],", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\" Test top 5 user", "(54, []), (58, []) ] ) ) def test_months_sum_dict(self): \"\"\" Test appending and", "data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, {", "[]]}, {10: [[], [], [], [], [], [], [], [], [], [], [],", "[], [], []]} ] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68:", "[499105], [486939], [624356], [455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data =", "} ) self.client = main.app.test_client() def tearDown(self): \"\"\" Get rid of unused objects", "\"\"\" Test mean presence time of given user grouped by weekday. \"\"\" resp", "def setUp(self): \"\"\" Before each test, set up a environment. \"\"\" main.app.config.update( {", "from XML file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165,", "utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def", "[], [], [], [], [], [78217], [], [], [] ] }, { 11:", "def test_day_start_end(self): \"\"\" Test start and end work times sorted by weekday. \"\"\"", "[ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours':", "{176: [[], [], [], [], [], [], [], [], [], [], [], [],", "(12, [371559]), (13, [394007]), (15, [432795]), (16, [513180]), (176, [606888]), (19, [434499]), (165,", "data, { 141: [ [], [], [], [], [], [], [550395], [632015], [505118],", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141'", "times sorted by weekday. \"\"\" user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data,", "{ 'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12,", "\"\"\" pass def test_get_data(self): \"\"\" Test parsing of CSV file. \"\"\" data =", "[612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting of", "come to the office and medium time of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10')", "(11, [263049]), (24, [235634]), (101, []) ] ) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' }", "[], [], [], [], [], [], [], [], [], [], [], []]}, {26:", "template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users listing.", "months of work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data", "(16, [513180]), (26, [508050]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29,", ") def test_presence_weekday_view(self): \"\"\" Test mean presence time of given user grouped by", "[], [], [], [], [], [], []]}, {176: [[], [], [], [], [],", "['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\"", "[385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (101, []) ] )", "0, 0], ['Sun', 0, 0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data)", "utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0],", "time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2 + 3", "resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [", "[], [], [], [], [], [], []]}, {26: [[], [], [], [], [],", "32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10,", "141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176, 'name': '<NAME>.',", "self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' },", "141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])]) data =", "self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test five best months of work time.", "between the time the objects. \"\"\" start_example = datetime.time(13, 59, 59) end_example =", "data', 0], ['no data', 0], ['no data', 0] ] ) def test_podium_data_maker(self): \"\"\"", "['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ])", "midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00,", "['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] ) resp", "['no data', 0], ['June', 76], ['July', 181], ['August', 139], ['September', 64], ['no data',", "[], [], [], [], [6426], [22969], [25321], [16564], [24123], [118402], [], [], []", "test_user_validate(self): \"\"\" Test checking if user exist. \"\"\" months_sum = [ [], [],", "0, 0], ['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\" Test user data from", "= utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data',", "self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [ [], [],", "def other_calculation(): data = 2 + 3 data = time.time() time.sleep(2) return data", "calculation of secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743)", "resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test", "as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0],", "utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar':", "{11: [[], [], [], [], [], [], [], [], [], [], [], [],", "OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting", "top 5 user data. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12,", "[], [] ] ) def test_user_validate(self): \"\"\" Test checking if user exist. \"\"\"", "Test building result for podium template. \"\"\" months = [ [], [], [],", "[514312]), (16, [513180]), (26, [508050]), (10, [455386]), (19, [434499]), (15, [432795]), (13, [394007]),", "-*- coding: utf-8 -*- \"\"\" Presence analyzer unit tests. \"\"\" from __future__ import", "utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [ [], [], [], [], [], [],", "self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test top 5", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20)", "}, { 'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours':", "utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141'", "}, { 11: [ [], [], [], [], [6426], [22969], [25321], [16564], [24123],", "data, [ [], [], [], [], [], [], [], [], [], [19852], [],", "dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' }", "9, 9) months = [[] for month in xrange(13)] data = utils.months_sum_dict(2013, items,", "} item = datetime.date(2013, 9, 9) months = [[] for month in xrange(13)]", "data, [ ['Weekday', 'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968],", "(13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (30,", "utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours':", "[235634]), (141, [612478]), (26, [508050]), (26, [560624]), (29, [385973]), (30, []), (31, []),", "json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test five best months of work", "months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data =", "each test. \"\"\" pass def test_mainpage(self): \"\"\" Test main page render template. \"\"\"", "1], ['July', 4], ['May', 6], ['August', 6], ['June', 7], ['September', 32] ] )", "by weekday of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json')", "59) end_example = datetime.time(23, 59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data", "[], [], [], [], [], [], [], [], [], [], []]}, {11: [[],", "\"\"\" user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0],", "= json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0],", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9, 2015) self.assertEqual( data,", "disable=relative-import import utils # pylint: disable=relative-import import views # pylint: disable=unused-import, relative-import from", "of secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data", "'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\"", "'application/json') data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36'", "[508050]), (26, [560624]), (29, [385973]), (30, []), (31, []), (33, [306667]), (36, [546225]),", "[], [], [276890], [655139], [500730], [233576], [], [], [] ] data = utils.podium_result_structure_builder(months)", "20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0] ] )", "[], [], [], [], [], []]}, {49: [[], [], [], [], [], [],", "(33, [306667]), (11, [263049]), (24, [235634]), (101, []) ] ) data = utils.five_top_user_data(dict_months,", "= { 178: { datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14, 42), 'start':", "22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def", "[], [], [], [], [], [], [], [], [], []]} ] ) data", "= json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test five best months of", "Test sorting of months dict. \"\"\" dict_months = [ (10, [455386]), (11, [263049]),", "utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar':", "data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation of", "[ [], [], [], [], [], [], [276890], [655139], [500730], [233576], [], [],", "data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def", "Test calculation of seconds between the time the objects. \"\"\" start_example = datetime.time(13,", "data = utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data)", "= [ [], [], [], [], [], [], [276890], [655139], [500730], [233576], [],", "data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu',", "(26, [508050]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (10, [455386]), (19,", "user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue',", "[78217], [], [], [] ] }, { 11: [ [], [], [], [],", "'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests.", "[], [], [], [], [], [], [], [], []]}, {141: [[], [], [],", "datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24) }", "[], [], [], [], []]}, {10: [[], [], [], [], [], [], [],", "[], [], [], [], []]} ] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data,", "test_presence_start_end(self): \"\"\" Test the medium time to come to the office and medium", "data', 0], ['no data', 0], ['no data', 0], ['June', 76], ['July', 181], ['August',", "Test calculation of secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data,", "def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since midnight. \"\"\" data = utils.seconds_since_midnight(datetime.time(2,", "\"\"\" Test building result for podium template. \"\"\" months = [ [], [],", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\" Test top 5", "Test appending and suming time for every month. \"\"\" items = { 178:", "26, 62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual(", "Test grouping presence entries by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data,", "work times sorted by weekday. \"\"\" user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual(", "141: [ [], [], [], [], [], [], [550395], [632015], [505118], [499105], [486939],", "for podium template. \"\"\" months = [ [], [], [], [], [], [],", "disable=relative-import import views # pylint: disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV =", "[612478]), (176, [606888]), (170, [576346]), (26, [508050]), (165, [555037]), (36, [546225]), (23, [514312]),", "= 2 + 3 data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def", "[], [], [], [], [], [], [], [], [], [], []]}, { 10:", "= utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141) self.assertEqual( data, { 141:", "self.assertEqual( data[0], { 'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' } )", "\"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data,", "(49, []), (54, []), (58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict(", "[], [], [], []]}, {49: [[], [], [], [], [], [], [], [],", "1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours': 32,", "[], [], [6426], [22969], [25321], [16564], [24123], [118402], [], [], [] ] },", "self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62]) sample_date =", "{ 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get rid of", "Test top 5 user data. \"\"\" dict_months = [ (10, [455386]), (11, [263049]),", "(170, [576346]), (26, [560624]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (26,", "data = utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours': 15, 'user_id': 62, 'name':", "= [ [], [], [], [], [], [], [550395], [632015], [505118], [499105], [486939],", "[], [], [], [], [], [], [], []]}, {11: [[], [], [], [],", "if user exist. \"\"\" months_sum = [ [], [], [], [], [], [],", "'..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime',", "time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2 +", "['Fri', 30549], ['Sat', 6426], ['Sun', 22969] ] ) resp = self.client.get('/api/v1/podium/9999') data =", "0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\" Test", "11: [ [], [], [], [], [6426], [22969], [25321], [16564], [24123], [118402], [],", "[], [], [], [78217], [], [], [] ] }, { 11: [ [],", "suite. \"\"\" base_suite = unittest.TestSuite() base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase)) base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase)) return base_suite if __name__ == '__main__':", "(19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]), (11,", "self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed',", "[385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (30, []), (31, []),", "self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds", "views # pylint: disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__),", "[ { 'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours':", "by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data)", ") # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self):", "main # pylint: disable=relative-import import utils # pylint: disable=relative-import import views # pylint:", "= utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours': 15, 'user_id': 62, 'name': '<NAME>.',", "[], [], [], [], [], [], [], [], [], []]} ] ) def", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\"", "'https://intranet.stxnext.pl:443/api/images/users/141' } ) sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, [])", "139], ['September', 64], ['no data', 0], ['no data', 0], ['no data', 0] ]", "38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ] )", "self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\" Test sorting of months dict. \"\"\" dict_months =", "= [[] for month in xrange(13)] data = utils.months_sum_dict(2013, items, item, 178, months)", ") def test_user_validate(self): \"\"\" Test checking if user exist. \"\"\" months_sum = [", "'..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data',", "memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA =", "[385973]), (30, []), (31, []), (33, [306667]), (36, [546225]), (48, []), (49, []),", "data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test of mean presence", "[371559]), (33, [306667]), (11, [263049]), (24, [235634]), (30, []), (31, []), (48, []),", "[], [], [], [], [], [], [], [], [], []]}, {26: [[], [],", "time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result for", "\"\"\" data = utils.mean([100, 100, 100]) self.assertEqual(100, data) data = utils.mean([0.5, 0.2, 0.3,", "0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def", "= utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[], [], [], [], [], [],", "= utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26,", "datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50) } }, 179: { datetime.date(2013, 9,", "= utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data, [ [], [], [], [],", "return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2 + 3 data", "(141, [612478]), (176, [606888]), (170, [576346]), (26, [560624]), (165, [555037]), (36, [546225]), (23,", "os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..',", "self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ { 'hours':", "4], ['May', 6], ['August', 6], ['June', 7], ['September', 32] ] ) def test_group_by_month(self):", "= datetime.time(23, 59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example,", "months dict. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]), (13,", "(36, [546225]), (23, [514312]), (16, [513180]), (10, [455386]), (19, [434499]), (15, [432795]), (13,", "(48, []), (49, []), (54, []), (58, []) ] ) ) def test_months_sum_dict(self):", "and suming time for every month. \"\"\" items = { 178: { datetime.date(2013,", "Utility functions tests. \"\"\" def setUp(self): \"\"\" Before each test, set up a", "data, [ { 'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, {", "33592.0, 58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0,", "(29, [385973]), (12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (30, []), (31,", "users with information about them. \"\"\" data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data", "relative-import from .utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data',", "{ datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24)", "= json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test of mean presence time", "[486939], [624356], [455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum,", "self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62]) sample_date = datetime.date(2013, 9,", "\"\"\" Test grouping presence entries by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual(", "self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation of seconds between the time the", "test_five_top_user_data(self): \"\"\" Test top 5 user data. \"\"\" dict_months = [ (10, [455386]),", "months_sum = [ [], [], [], [], [], [], [550395], [632015], [505118], [499105],", "'user_id': 11, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10, 'name':", "Test start and end work times sorted by weekday. \"\"\" user = utils.get_data()", "39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since midnight. \"\"\"", "\"\"\" months = [ [], [], [], [], [], [], [276890], [655139], [500730],", "data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[], [], [], [], [],", "[235634]), (30, []), (31, []), (48, []), (49, []), (54, []), (58, [])", "43, 50) } }, 179: { datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5,", "= OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [560624]), (165, [555037]),", "14, 42), 'start': datetime.time(11, 43, 50) } }, 179: { datetime.date(2013, 9, 12):", "data = json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.',", "'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml'", "[], [276890], [655139], [500730], [233576], [], [], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual(", "of leave. \"\"\" resp = self.client.get('/api/v1/presence_start_end/10') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data)", "['no data', 0] ] ) def test_podium_data_maker(self): \"\"\" Test groups presence entries as", "datetime import time import unittest from collections import OrderedDict import main # pylint:", "for month in xrange(13)] data = utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data,", "200) def test_api_users(self): \"\"\" Test users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200)", "class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self): \"\"\" Before each test, set", "= utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0)", "pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\" Views tests. \"\"\" def setUp(self): \"\"\" Before", "user data. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]), (13,", "'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, {", "end_example = datetime.time(23, 59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data =", "[]), (54, []), (58, []) ] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [", "user data from XML file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3],", "'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self): \"\"\" Test", "test_mainpage(self): \"\"\" Test main page render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200)", "[576346]), (26, [508050]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (10, [455386]),", "24) } } } item = datetime.date(2013, 9, 9) months = [[] for", "[546225]), (23, [514312]), (16, [513180]), (26, [508050]), (10, [455386]), (19, [434499]), (15, [432795]),", "[], [], [], [], [], [], []]}, {10: [[], [], [], [], [],", "[], [], [], [], [], [], [], [19852], [], [], [] ] )", "utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data, [ [], [], [], [], [],", "import main # pylint: disable=relative-import import utils # pylint: disable=relative-import import views #", "sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9,", "} ) def test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data", "22969] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def", "[], []]}, {49: [[], [], [], [], [], [], [], [], [], [],", "[576346]), (26, [560624]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (26, [508050]),", "[], [], [], [], [], [], [], [], [], [], [], []]}, {141:", "'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] ) def test_five_top_user_data(self):", "0], ['no data', 0], ['no data', 0], ['no data', 0], ['April', 1], ['July',", "from .utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'", "24123.0], ['Tue', 20942.5], ['Wed', 20942.5], ['Thu', 22984.0], ['Fri', 15274.5], ['Sat', 6426.0], ['Sun', 22969.0]", "PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def setUp(self): \"\"\" Before each test, set", "[], [], [], [], [], [], [], [], [], []]}, {141: [[], [],", "(12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (30, []), (31, []), (48,", "'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility", "\"\"\" Test calculation of seconds between the time the objects. \"\"\" start_example =", "xrange(13)] data = utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual( data, [ [], [],", "data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\"", "@memoize(age_cache=1) def other_calculation(): data = 2 + 3 data = time.time() time.sleep(2) return", "file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual(", "9, 12): { 'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24) } }", "[], [], [], [], [], [], [], [], [], []]}, { 10: [", "class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\" Utility functions tests. \"\"\" def setUp(self): \"\"\" Before each test,", "self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self):", ") sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self):", "data = utils.five_top_workers(9, 1997) self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual( data, [", "self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['no data',", "[], [], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] data =", "'no data') def test_presence_start_end(self): \"\"\" Test the medium time to come to the", "'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' }, { 'hours': 12, 'user_id': 141, 'name':", "unused objects after each test. \"\"\" pass def test_get_data(self): \"\"\" Test parsing of", "'application/json') data = json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name':", "= utils.user_validate(months_sum, 141) self.assertEqual( data, { 141: [ [], [], [], [], [],", "self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence", "0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ] ) resp = self.client.get('/api/v1/podium/9999')", "2 + 2 data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def", "[]), (48, []), (49, []), (54, []), (58, []) ] ) ) def", "items, item, 178, months) self.assertEqual( data, [ [], [], [], [], [], [],", "CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49,", "'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24) } } } item =", "utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation of seconds between", "\"\"\" Test checking if user exist. \"\"\" months_sum = [ [], [], [],", "[], [], [], [], [], [], [], []]}, {49: [[], [], [], [],", ") def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since midnight. \"\"\" data =", "and end work times sorted by weekday. \"\"\" user = utils.get_data() data =", "data) data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start and end", "\"\"\" items = { 178: { datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14,", "[], [], [], [], [], [19852], [], [], [] ] ) def test_user_validate(self):", "'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9, 2015)", "{ 'end': datetime.time(18, 5, 24), 'start': datetime.time(16, 55, 24) } } } item", "[], [], [], [], [], [], [], [], []]}, { 10: [ [],", "21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) class PresenceAnalyzerUtilsTestCase(unittest.TestCase): \"\"\"", "after each test. \"\"\" pass def test_get_data(self): \"\"\" Test parsing of CSV file.", "7], ['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no", "0], ['Sat', 0, 0], ['Sun', 0, 0] ]) def test_xml_translator(self): \"\"\" Test user", "json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0],", "9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) )", ") data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[], [], [], [],", "] sorted_dict = OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [560624]),", "import utils # pylint: disable=relative-import import views # pylint: disable=unused-import, relative-import from .utils", "[], [], [] ] }, {141: [[], [], [], [], [], [], [],", "datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test calculation of secounds since midnight.", "11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68,", "[], [], [], [], []]}, {176: [[], [], [], [], [], [], [],", "} ] ) data = utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours': 15,", "['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data')", "[624356], [455386] ] data = utils.user_validate(months_sum, 34654) self.assertEqual(data, []) data = utils.user_validate(months_sum, 141)", "['April', 1], ['July', 4], ['May', 6], ['August', 6], ['June', 7], ['September', 32] ]", "\"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get", "self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36,", "= utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of mean and if", "(170, [576346]), (26, [508050]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (10,", "}, { 'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ] )", "[], [], [], [], [], [], [], [], [], [], []]}, {176: [[],", "[], [], [], [], [], [], []]}, {141: [[], [], [], [], [],", "month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[], [], [],", "] data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]), (176, [606888]), (170,", "\"\"\" Test groups presence entries as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual(", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id': 176, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176'", "Test top 5 workers per months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code,", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def", "self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test", "five best months of work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type,", "] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self):", "[655139], [500730], [233576], [], [], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [", "[], [], [], [], [], [], [], [], [], []]}, {62: [[], [],", "'start': datetime.time(16, 55, 24) } } } item = datetime.date(2013, 9, 9) months", "[], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] data = utils.user_validate(months_sum, 34654)", "data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26,", "[], [], [], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] }", "[546225]), (48, []), (49, []), (54, []), (58, []) ] data = utils.sorted_months_dict(dict_months)", "data self.assertEqual(short_calculation(), short_calculation()) @memoize(age_cache=1) def other_calculation(): data = 2 + 3 data =", "utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar':", "[]]}, {141: [[], [], [], [], [], [], [], [], [], [], [],", "0], ['no data', 0], ['no data', 0] ] ) def test_podium_data_maker(self): \"\"\" Test", "[], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] data = utils.user_validate(months_sum,", "given user grouped by weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json')", "= self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon',", "= os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__),", "in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data)", "] ) def test_five_top_workers(self): \"\"\" Test top 5 presence users with information about", "= self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test of", "building result for podium template. \"\"\" months = [ [], [], [], [],", "[]), (33, [306667]), (36, [546225]), (48, []), (49, []), (54, []), (58, []),", ") def test_five_top_workers(self): \"\"\" Test top 5 presence users with information about them.", "resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0], {", "[], [], [], [], [], [], [], [], [], [], [], []]}, {176:", "= json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11, 'name': '<NAME>.', 'avatar':", "'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' }", "weekday. \"\"\" resp = self.client.get('/api/v1/presence_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual(", "self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ { 'hours': 32, 'user_id': 11,", "\"\"\" @memoize(age_cache=20) def short_calculation(): data = 2 + 2 data = time.time() time.sleep(1)", "[], [], [], []]} ] ) def test_five_top_workers(self): \"\"\" Test top 5 presence", "{ 'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11,", "result for podium template. \"\"\" months = [ [], [], [], [], [],", "['August', 6], ['June', 7], ['September', 32] ] ) def test_group_by_month(self): \"\"\" Test grouping", "Test groups presence entries as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data,", "{ 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_cache(self): \"\"\" Test data caching.", "{ datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50)", "\"\"\" Test five best months of work time. \"\"\" resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code,", "(19, [434499]), (165, [555037]), (170, [576346]), (23, [514312]), (24, [235634]), (141, [612478]), (26,", "{10: [[], [], [], [], [], [], [], [], [], [], [], [],", "(165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (26, [508050]), (10, [455386]), (19,", "[455386] ] } ) def suite(): \"\"\" Default test suite. \"\"\" base_suite =", "[10, 11, 68, 49, 176, 141, 26, 62]) sample_date = datetime.date(2013, 9, 10)", "[], [], [], [], [78217], [], [], [] ] }, { 11: [", "data = utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]),", "test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data = 2 +", "[], [], [], [], [], [], [], [], [], []]}, {49: [[], [],", "\"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0], ['no data', 0],", "10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9, 2015) self.assertEqual(", "data from XML file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36,", "self.assertEqual( data, OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [508050]), (165,", "self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data', 0], ['no", "def test_interval(self): \"\"\" Test calculation of seconds between the time the objects. \"\"\"", "self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'],", "[], [], [], [550395], [632015], [505118], [499105], [486939], [624356], [455386] ] } )", "json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test top 5 workers per months", "[22969], [25321], [16564], [24123], [118402], [], [], [] ] }, {141: [[], [],", "] }, {141: [[], [], [], [], [], [], [], [], [], [],", "200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Weekday', 'Presence (s)'], ['Mon',", "os.path import json import datetime import time import unittest from collections import OrderedDict", "234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\" Test start", "weekday. \"\"\" user = utils.get_data() data = utils.day_start_end(user[10]) self.assertEqual( data, [ ['Mon', 0,", "[] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0], ['no data',", "other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result for podium template. \"\"\" months =", "[]]} ] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [ {68: [[], [],", "[], [], [] ] ) def test_user_validate(self): \"\"\" Test checking if user exist.", "[] ] }, { 11: [ [], [], [], [], [6426], [22969], [25321],", "every month. \"\"\" items = { 178: { datetime.date(2013, 9, 9): { 'end':", "# pylint: disable=relative-import import utils # pylint: disable=relative-import import views # pylint: disable=unused-import,", "'..', '..', 'runtime', 'data', 'export_test.xml' ) # pylint: disable=maybe-no-member, too-many-public-methods class PresenceAnalyzerViewsTestCase(unittest.TestCase): \"\"\"", "'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['Mon', 24123.0], ['Tue', 20942.5], ['Wed', 20942.5],", "extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0],", "(36, [546225]), (23, [514312]), (16, [513180]), (26, [508050]), (10, [455386]), (19, [434499]), (15,", "58057.0], ['Thu', 38926.0, 62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0]", "\"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data,", "[], [], []]}, {49: [[], [], [], [], [], [], [], [], [],", "['July', 4], ['May', 6], ['August', 6], ['June', 7], ['September', 32] ] ) resp", "[]]}, { 10: [ [], [], [], [], [], [], [], [], [],", "def test_five_top_user_data(self): \"\"\" Test top 5 user data. \"\"\" dict_months = [ (10,", "main.app.test_client() def tearDown(self): \"\"\" Get rid of unused objects after each test. \"\"\"", "dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]), (15, [432795]),", "2013) self.assertEqual( data, [ {68: [[], [], [], [], [], [], [], [],", "'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test mean presence time of given user", "(16, [513180]), (176, [606888]), (19, [434499]), (165, [555037]), (170, [576346]), (23, [514312]), (24,", "{ 141: [ [], [], [], [], [], [], [550395], [632015], [505118], [499105],", "['June', 7], ['September', 32] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data,", "['no data', 0], ['April', 1], ['July', 4], ['May', 6], ['August', 6], ['June', 7],", "{ 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test", "[], [], [], [6426], [22969], [25321], [16564], [24123], [118402], [], [], [] ]", "import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv' ) TEST_XML_DATA", ") def test_months_sum_dict(self): \"\"\" Test appending and suming time for every month. \"\"\"", "= utils.mean([0.5, 0.2, 0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data) def", "disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime',", "resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test", "data') def test_podium(self): \"\"\" Test five best months of work time. \"\"\" resp", "import unicode_literals import os.path import json import datetime import time import unittest from", "main page render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\"", "self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test users listing. \"\"\" resp = self.client.get('/api/v1/users')", "= time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test building result", "(31, []), (48, []), (49, []), (54, []), (58, []) ] ) )", "self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys()[:3], [36, 165, 170]) self.assertEqual( data.values()[0], { 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36'", "['June', 76], ['July', 181], ['August', 139], ['September', 64], ['no data', 0], ['no data',", "unicode_literals import os.path import json import datetime import time import unittest from collections", "\"\"\" Test sorting of months dict. \"\"\" dict_months = [ (10, [455386]), (11,", "data = utils.seconds_since_midnight(datetime.time(2, 42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data,", "resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_podium(self): \"\"\" Test", "utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of mean and if empty", "data', 0], ['no data', 0] ] ) def test_podium_data_maker(self): \"\"\" Test groups presence", "pylint: disable=relative-import import views # pylint: disable=unused-import, relative-import from .utils import memoize TEST_DATA_CSV", "3 data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation()) def test_podium_result_structure_builder(self): \"\"\" Test", "datetime.date(2013, 9, 9): { 'end': datetime.time(17, 14, 42), 'start': datetime.time(11, 43, 50) }", "data = 2 + 3 data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(), other_calculation())", "\"\"\" months_sum = [ [], [], [], [], [], [], [550395], [632015], [505118],", "presence time grouped by weekday of given user. \"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code,", "main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client() def tearDown(self):", "[], [], [78217], [], [], [] ] }, { 11: [ [], [],", "[], [], [], [], [], [], [], [], []]} ] ) data =", "(58, []) ] ) ) def test_months_sum_dict(self): \"\"\" Test appending and suming time", ") def test_cache(self): \"\"\" Test data caching. \"\"\" @memoize(age_cache=20) def short_calculation(): data =", "self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0, 58057.0], ['Thu',", "} }, 179: { datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5, 24), 'start':", "data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/49'", "[306667]), (36, [546225]), (48, []), (49, []), (54, []), (58, []) ] data", "start and end work times sorted by weekday. \"\"\" user = utils.get_data() data", "\"\"\" Test the medium time to come to the office and medium time", "9) months = [[] for month in xrange(13)] data = utils.months_sum_dict(2013, items, item,", "utils.sorted_months_dict(dict_months) self.assertEqual( data, OrderedDict( [ (141, [612478]), (176, [606888]), (170, [576346]), (26, [508050]),", "{62: [[], [], [], [], [], [], [], [], [], [], [], [],", "10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def", "objects. \"\"\" start_example = datetime.time(13, 59, 59) end_example = datetime.time(23, 59, 59) data", "[], []]} ] ) def test_five_top_workers(self): \"\"\" Test top 5 presence users with", "'test_data.csv' ) TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' ) #", "[ {68: [[], [], [], [], [], [], [], [], [], [], [],", "'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client() def tearDown(self): \"\"\" Get rid of", "[632015], [505118], [499105], [486939], [624356], [455386] ] } ) def suite(): \"\"\" Default", "item, 178, months) self.assertEqual( data, [ [], [], [], [], [], [], [],", "self.assertEqual(data, []) data = utils.five_top_workers(9, 2013) self.assertEqual( data, [ { 'hours': 32, 'user_id':", "resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [", "[], [], [], []]} ] ) data = utils.group_by_month(utils.get_data(), 2011) self.assertEqual( data, [", "from __future__ import unicode_literals import os.path import json import datetime import time import", ") resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\"", "test_months_sum_dict(self): \"\"\" Test appending and suming time for every month. \"\"\" items =", "] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self):", "self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_presence_start_end(self): \"\"\" Test the medium", "[306667]), (11, [263049]), (24, [235634]), (101, []) ] ) data = utils.five_top_user_data(dict_months, sorted_dict)", "'Presence (s)'], ['Mon', 24123], ['Tue', 41885], ['Wed', 41885], ['Thu', 45968], ['Fri', 30549], ['Sat',", "{ 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client() def tearDown(self): \"\"\"", "resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data') def test_mean_time_weekday_view(self): \"\"\" Test", "[], [], []]}, {10: [[], [], [], [], [], [], [], [], [],", "self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self): \"\"\" Test", "0, 0] ]) def test_xml_translator(self): \"\"\" Test user data from XML file extraction.", "[606888]), (19, [434499]), (165, [555037]), (170, [576346]), (23, [514312]), (24, [235634]), (141, [612478]),", "listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual(", "self.assertEqual( data, [ { 'hours': 15, 'user_id': 62, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/62' },", "= utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0], ['no data', 0], ['no data',", "'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client = main.app.test_client() def tearDown(self): \"\"\" Get", "(26, [560624]), (29, [385973]), (30, []), (31, []), (33, [306667]), (36, [546225]), (48,", "functions tests. \"\"\" def setUp(self): \"\"\" Before each test, set up a environment.", "parsing of CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(), [10, 11,", "0], ['no data', 0], ['no data', 0], ['June', 76], ['July', 181], ['August', 139],", "= self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id':", "resp = self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [", "datetime.time(23, 59, 59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example)", "def test_api_users(self): \"\"\" Test users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type,", "import json import datetime import time import unittest from collections import OrderedDict import", "}, 179: { datetime.date(2013, 9, 12): { 'end': datetime.time(18, 5, 24), 'start': datetime.time(16,", "each test. \"\"\" pass def test_get_data(self): \"\"\" Test parsing of CSV file. \"\"\"", "sorted_dict) self.assertEqual( data[0], { 'hours': 170, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }", "5 user data. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]),", "Test checking if user exist. \"\"\" months_sum = [ [], [], [], [],", ") TEST_XML_DATA = os.path.join( os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml' ) # pylint:", "181], ['August', 139], ['September', 64], ['no data', 0], ['no data', 0], ['no data',", "[], [6426], [22969], [25321], [16564], [24123], [118402], [], [], [] ] }, {141:", "def test_presence_weekday_view(self): \"\"\" Test mean presence time of given user grouped by weekday.", "user exist. \"\"\" months_sum = [ [], [], [], [], [], [], [550395],", "(141, [612478]), (26, [508050]), (26, [560624]), (29, [385973]), (30, []), (31, []), (33,", "= datetime.date(2013, 9, 9) months = [[] for month in xrange(13)] data =", "\"\"\" Test parsing of CSV file. \"\"\" data = utils.get_data() self.assertIsInstance(data, dict) self.assertItemsEqual(data.keys(),", "9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test calculation", "self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self): \"\"\" Test", "[ (10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]), (15, [432795]), (16, [513180]),", "[394007]), (15, [432795]), (16, [513180]), (176, [606888]), (19, [434499]), (165, [555037]), (170, [576346]),", "42), 'start': datetime.time(11, 43, 50) } }, 179: { datetime.date(2013, 9, 12): {", "[]) def test_sorted_months_dict(self): \"\"\" Test sorting of months dict. \"\"\" dict_months = [", "[], [], [], [], [], [], [], [], [19852], [], [], [] ]", "[], [], [], []]}, {176: [[], [], [], [], [], [], [], [],", "[], []]}, {26: [[], [], [], [], [], [], [], [], [], [],", "[276890], [655139], [500730], [233576], [], [], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data,", "Test main page render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self):", "[[] for month in xrange(13)] data = utils.months_sum_dict(2013, items, item, 178, months) self.assertEqual(", "[], [], [], [], [], [], [], []]}, {62: [[], [], [], [],", "[455386]), (11, [263049]), (12, [371559]), (13, [394007]), (15, [432795]), (16, [513180]), (176, [606888]),", "'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/176' }, { 'hours': 11, 'user_id': 49, 'name': '<NAME>.', 'avatar':", "\"\"\" resp = self.client.get('/api/v1/mean_time_weekday/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data,", "(12, [371559]), (33, [306667]), (11, [263049]), (24, [235634]), (101, []) ] ) data", "datetime.date(2013, 9, 9) months = [[] for month in xrange(13)] data = utils.months_sum_dict(2013,", "other_calculation(): data = 2 + 3 data = time.time() time.sleep(2) return data self.assertNotEqual(other_calculation(),", "\"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0],", "data', 0] ] ) def test_podium_data_maker(self): \"\"\" Test groups presence entries as podium", "'hours': 12, 'user_id': 141, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/141' }, { 'hours': 11, 'user_id':", "'https://intranet.stxnext.pl:443/api/images/users/49' }, { 'hours': 8, 'user_id': 68, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/68' } ]", "groups presence entries as podium data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [", "users listing. \"\"\" resp = self.client.get('/api/v1/users') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data)", "test_podium(self): \"\"\" Test five best months of work time. \"\"\" resp = self.client.get('/api/v1/podium/11')", "0], ['June', 76], ['July', 181], ['August', 139], ['September', 64], ['no data', 0], ['no", "['no data', 0], ['no data', 0], ['April', 1], ['July', 4], ['May', 6], ['August',", "= json.loads(resp.data) self.assertEqual(data, 'no data') def test_five_top(self): \"\"\" Test top 5 workers per", "['no data', 0], ['no data', 0], ['no data', 0] ] ) def test_podium_data_maker(self):", "environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def tearDown(self): \"\"\"", "[] ] }, {141: [[], [], [], [], [], [], [], [], [],", "62631.0], ['Fri', 0, 0], ['Sat', 0, 0], ['Sun', 0, 0] ]) def test_xml_translator(self):", "[], [], [], []]}, {26: [[], [], [], [], [], [], [], [],", "pylint: disable=relative-import import utils # pylint: disable=relative-import import views # pylint: disable=unused-import, relative-import", "] ) def test_user_validate(self): \"\"\" Test checking if user exist. \"\"\" months_sum =", "self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data) def test_mean(self): \"\"\" Test of", "time to come to the office and medium time of leave. \"\"\" resp", "entries by month. \"\"\" data = utils.group_by_month(utils.get_data(), 2013) self.assertEqual( data, [ {68: [[],", "62]) sample_date = datetime.date(2013, 9, 10) self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'],", "(176, [606888]), (170, [576346]), (26, [508050]), (165, [555037]), (36, [546225]), (23, [514312]), (16,", "['no data', 0], ['no data', 0], ['no data', 0], ['no data', 0], ['no", "a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) self.client =", "dict. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]),", "data = 2 + 2 data = time.time() time.sleep(1) return data self.assertEqual(short_calculation(), short_calculation())", "[560624]), (165, [555037]), (36, [546225]), (23, [514312]), (16, [513180]), (26, [508050]), (10, [455386]),", "{ 11: [ [], [], [], [], [6426], [22969], [25321], [16564], [24123], [118402],", "42, 23)) self.assertEqual(data, 9743) data = utils.seconds_since_midnight(datetime.time(00, 00, 00)) self.assertEqual(data, 0) def test_interval(self):", "[560624]), (29, [385973]), (30, []), (31, []), (33, [306667]), (36, [546225]), (48, []),", "sorted_dict = OrderedDict([(141, [612478])]) data = utils.five_top_user_data(dict_months, sorted_dict) self.assertEqual(data, []) def test_sorted_months_dict(self): \"\"\"", "'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data =", "59) data = utils.interval(start_example, end_example) self.assertEqual(36000, data) data = utils.interval(end_example, start_example) self.assertEqual(-36000, data)", "data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' }", "top 5 presence users with information about them. \"\"\" data = utils.five_top_workers(9, 1997)", "def test_sorted_months_dict(self): \"\"\" Test sorting of months dict. \"\"\" dict_months = [ (10,", "of mean presence time grouped by weekday of given user. \"\"\" resp =", "[], [], [] ] }, { 11: [ [], [], [], [], [6426],", "[], [], [], [], [], [], [], [], [], []]}, {11: [[], [],", "(26, [508050]), (26, [560624]), (29, [385973]), (30, []), (31, []), (33, [306667]), (36,", "year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual(", "0, 0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no data')", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test mean presence time of", "= json.loads(resp.data) self.assertEqual( data, [ ['Mon', 0, 0], ['Tue', 34745.0, 64792.0], ['Wed', 33592.0,", "[455386]), (19, [434499]), (15, [432795]), (13, [394007]), (29, [385973]), (12, [371559]), (33, [306667]),", "data. \"\"\" data = utils.podium_data_maker(utils.get_data()[11]) self.assertEqual( data, [ ['no data', 0], ['no data',", "page render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def test_api_users(self): \"\"\" Test", "Test user data from XML file extraction. \"\"\" data = utils.xml_translator() self.assertIsInstance(data, dict)", "[], []]}, {10: [[], [], [], [], [], [], [], [], [], [],", "self.assertIn(sample_date, data[10]) self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end']) self.assertEqual( data[10][sample_date]['start'], datetime.time(9, 39, 5) ) def test_seconds_since_midnight(self):", "['September', 32] ] ) def test_group_by_month(self): \"\"\" Test grouping presence entries by month.", "each test, set up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV", "(36, [546225]), (48, []), (49, []), (54, []), (58, []), ] sorted_dict =", "calculation of seconds between the time the objects. \"\"\" start_example = datetime.time(13, 59,", "[], [] ] data = utils.podium_result_structure_builder(months) self.assertEqual( data, [ ['no data', 0], ['no", "6426.0], ['Sun', 22969.0] ] ) resp = self.client.get('/api/v1/podium/9999') data = json.loads(resp.data) self.assertEqual(data, 'no", "\"\"\" Test main page render template. \"\"\" resp = self.client.get('/') self.assertEqual(resp.status_code, 200) def", "TEST_DATA_CSV } ) def tearDown(self): \"\"\" Get rid of unused objects after each", "[], []]}, {176: [[], [], [], [], [], [], [], [], [], [],", "of seconds between the time the objects. \"\"\" start_example = datetime.time(13, 59, 59)", "for every month. \"\"\" items = { 178: { datetime.date(2013, 9, 9): {", "up a environment. \"\"\" main.app.config.update( { 'XML_DATA': TEST_XML_DATA, 'DATA_CSV': TEST_DATA_CSV } ) def", "= self.client.get('/api/v1/podium/11') self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data, [ ['no", "[], [], [], [], []]}, {11: [[], [], [], [], [], [], [],", "self.assertEqual(resp.status_code, 200) self.assertEqual(resp.content_type, 'application/json') data = json.loads(resp.data) self.assertEqual( data[0], { 'user_id': 36, 'name':", "\"\"\" Test top 5 workers per months in year. \"\"\" resp = self.client.get('/api/v1/five_top/9,2013')", "(29, [385973]), (30, []), (31, []), (33, [306667]), (36, [546225]), (48, []), (49,", "\"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12, [371559]), (13, [394007]), (15,", "'user_id': 36, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36' } ) def test_presence_weekday_view(self): \"\"\" Test mean", "[505118], [499105], [486939], [624356], [455386] ] } ) def suite(): \"\"\" Default test", "sorting of months dict. \"\"\" dict_months = [ (10, [455386]), (11, [263049]), (12,", "(36, [546225]), (48, []), (49, []), (54, []), (58, []) ] data =", "(31, []), (33, [306667]), (36, [546225]), (48, []), (49, []), (54, []), (58,", "self.assertEqual( data, { 141: [ [], [], [], [], [], [], [550395], [632015],", "] }, { 11: [ [], [], [], [], [6426], [22969], [25321], [16564],", "'https://intranet.stxnext.pl:443/api/images/users/10' } ] ) data = utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours':", "test_five_top_workers(self): \"\"\" Test top 5 presence users with information about them. \"\"\" data", "[], [], [], [], [], [], []]}, {62: [[], [], [], [], [],", "'<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/11' }, { 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10'", "0.2, 0.3, 234]) self.assertEqual(58.75, data) data = utils.mean([]) self.assertEqual(0, data) def test_day_start_end(self): \"\"\"", "['May', 6], ['August', 6], ['June', 7], ['September', 32] ] ) def test_group_by_month(self): \"\"\"", "}, { 'hours': 21, 'user_id': 10, 'name': '<NAME>.', 'avatar': 'https://intranet.stxnext.pl:443/api/images/users/10' } ] )", "[], [], [], [], [], [], [], [], [], [19852], [], [], []", "] ) data = utils.five_top_workers(9, 2015) self.assertEqual( data, [ { 'hours': 15, 'user_id':" ]
[ "Client from django.urls import reverse from products.models import Category, Product class TestBagViews(TestCase): def", "django.test import TestCase, Client from django.urls import reverse from products.models import Category, Product", "Category, Product class TestBagViews(TestCase): def setUp(self): self.client = Client() self.home = reverse(\"home\") self.view_bag", ") self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\",", "description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the view", "image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the view bag page ''' response", "= reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product", "name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test", "reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product =", "test_view_bag_view_GET(self): ''' test the view bag page ''' response = self.client.get(self.view_bag) self.assertEqual(response.status_code, 200)", "friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\",", "the view bag page ''' response = self.client.get(self.view_bag) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"bag/bag.html\") self.assertTemplateUsed(response,", "TestCase, Client from django.urls import reverse from products.models import Category, Product class TestBagViews(TestCase):", "import reverse from products.models import Category, Product class TestBagViews(TestCase): def setUp(self): self.client =", "reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\",", "def test_view_bag_view_GET(self): ''' test the view bag page ''' response = self.client.get(self.view_bag) self.assertEqual(response.status_code,", "def setUp(self): self.client = Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category =", ") def test_view_bag_view_GET(self): ''' test the view bag page ''' response = self.client.get(self.view_bag)", "TestBagViews(TestCase): def setUp(self): self.client = Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category", "= Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\",", "test the view bag page ''' response = self.client.get(self.view_bag) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"bag/bag.html\")", "<filename>bag/tests/test_views.py from django.test import TestCase, Client from django.urls import reverse from products.models import", "class TestBagViews(TestCase): def setUp(self): self.client = Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\")", "setUp(self): self.client = Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create(", "Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test", "self.client = Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\",", "Product class TestBagViews(TestCase): def setUp(self): self.client = Client() self.home = reverse(\"home\") self.view_bag =", "''' test the view bag page ''' response = self.client.get(self.view_bag) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response,", "Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\"", "self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test", "import Category, Product class TestBagViews(TestCase): def setUp(self): self.client = Client() self.home = reverse(\"home\")", "reverse from products.models import Category, Product class TestBagViews(TestCase): def setUp(self): self.client = Client()", "django.urls import reverse from products.models import Category, Product class TestBagViews(TestCase): def setUp(self): self.client", "= reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category,", "Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\",", "import TestCase, Client from django.urls import reverse from products.models import Category, Product class", "from django.test import TestCase, Client from django.urls import reverse from products.models import Category,", "name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\",", "self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" )", "self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test Category\" ) self.product = Product.objects.create(", "products.models import Category, Product class TestBagViews(TestCase): def setUp(self): self.client = Client() self.home =", "category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self):", "view bag page ''' response = self.client.get(self.view_bag) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, \"bag/bag.html\") self.assertTemplateUsed(response, \"base.html\")", "= Client() self.home = reverse(\"home\") self.view_bag = reverse(\"view_bag\") self.category = Category.objects.create( name=\"test_category\", friendly_name=\"Test", "from products.models import Category, Product class TestBagViews(TestCase): def setUp(self): self.client = Client() self.home", "price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the view bag page", "self.product = Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False,", "sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): '''", "rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the view bag page '''", "= Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, )", "has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the view bag page ''' response =", "from django.urls import reverse from products.models import Category, Product class TestBagViews(TestCase): def setUp(self):", "description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the view bag", "Product.objects.create( category=self.category, sku=\"1\", name=\"test product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def", "product\", description=\"test description\", price=\"2.99\", rating=\"4\", image=\"testimage.jpg\", has_sizes=False, ) def test_view_bag_view_GET(self): ''' test the" ]
[ "process' % files_count) # Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads)", "files creator, to create multiple files from a CLA. :param cla: a ``CLApplication``", "in the on-screen statements and in the name of the project subfolders :param", "files creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator =", "--variants-only\"] if __name__ == \"__main__\": # parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder',", "# metainfo field of a CLA file on Genestack that has the parameters", "class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the general", "arguments correspond to all default options except the type of variants to look", "parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing the raw reads", "return output # These CLA arguments correspond to all default options except the", "the type of variants to look for (SNPs only). # The easiest way", "files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i, source", "# this method can be overridden in child classes to allow for more", "BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class to create multiple", "(%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output) return output_files # this method can", "--multiallelic-caller --variants-only\"] if __name__ == \"__main__\": # parse script arguments parser = make_connection_parser()", "the Genestack folder where to put the output files') parser.add_argument('--ref-genome', help='Accession of the", "specific app is to look at the \"Parameters\" # metainfo field of a", "is to look at the \"Parameters\" # metainfo field of a CLA file", "class to create multiple files with a CLA class BatchFilesCreator(object): def __init__(self, cla,", "__name__ == \"__main__\": # parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession", "if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These CLA", "VariationCaller2Application, get_connection, make_connection_parser) # base class to create multiple files with a CLA", "% self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i, source in", "all default options except the type of variants to look for (SNPs only).", "of the project subfolders :param custom_args: list of custom command-line argument strings for", "help='Accession of the reference genome to use for the mapping step') args =", "``CLApplication`` object, wrapper for the corresponding CLA :param base_folder: accession of the base", "object, wrapper for the corresponding CLA :param base_folder: accession of the base folder", "def create_files(self, sources): print('Creating %s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files", "self._friendly_name = friendly_name self._custom_args = custom_args def create_files(self, sources): print('Creating %s files...' %", "if __name__ == \"__main__\": # parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack", "to Genestack...') # get connection and create output folder connection = get_connection(args) files_util", "# collect files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found", "to know the syntax of the command-line arguments for a specific app is", "project_name = args.name print('Connecting to Genestack...') # get connection and create output folder", "the folder containing the raw reads files to process') parser.add_argument('--name', default=\"New Project\", help='Name", "folder where to put the output files') parser.add_argument('--ref-genome', help='Accession of the reference genome", "print('Connecting to Genestack...') # get connection and create output folder connection = get_connection(args)", "cla: a ``CLApplication`` object, wrapper for the corresponding CLA :param base_folder: accession of", "creator, to create multiple files from a CLA. :param cla: a ``CLApplication`` object,", "output # special class for Bowtie to replace the default reference genome class", "ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) # replace reference genome if", "base class to create multiple files with a CLA class BatchFilesCreator(object): def __init__(self,", "for the mapping step') args = parser.parse_args() project_name = args.name print('Connecting to Genestack...')", "absolute_import from __future__ import division from __future__ import unicode_literals from future import standard_library", "from builtins import * from builtins import object from genestack_client import (AlignedReadsQC, BioMetaKeys,", "batch files creator, to create multiple files from a CLA. :param cla: a", "where the pipeline files will be organised into subfolders :param friendly_name: user-friendly name", "self._custom_args = custom_args def create_files(self, sources): print('Creating %s files...' % self._friendly_name) output_folder =", "= make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing the raw reads files", "def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) # replace reference genome if self._ref_genome:", "reads files to process') parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack folder where", "base_folder: accession of the base folder where the pipeline files will be organised", "syntax of the command-line arguments for a specific app is to look at", "%s file %s (%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output) return output_files #", "pipeline files will be organised into subfolders :param friendly_name: user-friendly name of the", "\"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting", "application wrappers and batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app", "create multiple files from a CLA. :param cla: a ``CLApplication`` object, wrapper for", "default=\"New Project\", help='Name of the Genestack folder where to put the output files')", "on Genestack that has the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250", "len(raw_reads) print('Found %d files to process' % files_count) # Create pipeline files mapped_reads", "class for Bowtie to replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self,", "create_files(self, sources): print('Creating %s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files =", "child classes to allow for more complex file creation logic def _create_output_file(self, source):", "help='Genestack accession of the folder containing the raw reads files to process') parser.add_argument('--name',", "base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source)", "help='Name of the Genestack folder where to put the output files') parser.add_argument('--ref-genome', help='Accession", "# create application wrappers and batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app =", "creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app,", "\"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\": # parse script arguments", "friendly_name: user-friendly name of the files produced by the app ; used in", "files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application", "the \"Parameters\" # metainfo field of a CLA file on Genestack that has", "\"__main__\": # parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the", "= files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and batch files", "create multiple files with a CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name,", "BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class to create multiple files", "a CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of", "CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the", "friendly_name, custom_args=None): \"\"\" Constructor of the general batch files creator, to create multiple", "files from a CLA. :param cla: a ``CLApplication`` object, wrapper for the corresponding", "FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and", "output folder connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder =", "get_connection, make_connection_parser) # base class to create multiple files with a CLA class", "bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads", "default options except the type of variants to look for (SNPs only). #", "custom command-line argument strings for the files. Default is ``None`` \"\"\" self._cla =", "self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args", "output, i, len(output))) output_files.append(output) return output_files # this method can be overridden in", "The easiest way to know the syntax of the command-line arguments for a", "project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count", "method can be overridden in child classes to allow for more complex file", "parser.add_argument('--ref-genome', help='Accession of the reference genome to use for the mapping step') args", "# parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder", "is ``None`` \"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name", "indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\": # parse script arguments parser =", "args = parser.parse_args() project_name = args.name print('Connecting to Genestack...') # get connection and", "wrappers and batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app =", "base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the general batch files creator, to create", "standard_library standard_library.install_aliases() from builtins import * from builtins import object from genestack_client import", "print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files to", "to replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name,", "the general batch files creator, to create multiple files from a CLA. :param", "that has the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1", "multiple files with a CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None):", "logic def _create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output", "arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing the raw", "files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files to process' % files_count) # Create", "at the \"Parameters\" # metainfo field of a CLA file on Genestack that", "self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i, source in enumerate(sources, 1): output =", "batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator", "print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output) return output_files", "``None`` \"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name =", "for i, source in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s", "mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are in the folder %s' % project_folder)", "Default is ``None`` \"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder", "Project\", help='Name of the Genestack folder where to put the output files') parser.add_argument('--ref-genome',", "Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your", "__future__ import print_function from __future__ import absolute_import from __future__ import division from __future__", "python # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import", "-d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"]", "self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args = custom_args def", "from a CLA. :param cla: a ``CLApplication`` object, wrapper for the corresponding CLA", "options except the type of variants to look for (SNPs only). # The", "from __future__ import division from __future__ import unicode_literals from future import standard_library standard_library.install_aliases()", "to create multiple files from a CLA. :param cla: a ``CLApplication`` object, wrapper", "cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self,", "= FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers", "creation logic def _create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return", "= BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) #", "accession of the folder containing the raw reads files to process') parser.add_argument('--name', default=\"New", "future import standard_library standard_library.install_aliases() from builtins import * from builtins import object from", "parent=self._base_folder) output_files = [] for i, source in enumerate(sources, 1): output = self._create_output_file(source)", "from builtins import object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application,", "Constructor of the general batch files creator, to create multiple files from a", "of the reference genome to use for the mapping step') args = parser.parse_args()", "output_folder) print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output) return", "for a specific app is to look at the \"Parameters\" # metainfo field", "coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import from __future__", "this method can be overridden in child classes to allow for more complex", "the syntax of the command-line arguments for a specific app is to look", "step') args = parser.parse_args() project_name = args.name print('Connecting to Genestack...') # get connection", "def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the general batch files", "custom_args def create_files(self, sources): print('Creating %s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder)", "def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome", "output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special class for", "= BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\")", "= mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are in the folder %s' %", "print('Found %d files to process' % files_count) # Create pipeline files mapped_reads =", "[] for i, source in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created", "in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)'", "BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args)", "= VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder,", "wrapper for the corresponding CLA :param base_folder: accession of the base folder where", "use for the mapping step') args = parser.parse_args() project_name = args.name print('Connecting to", "mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome)", "args.name print('Connecting to Genestack...') # get connection and create output folder connection =", "corresponding CLA :param base_folder: accession of the base folder where the pipeline files", "import object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser)", "= [] for i, source in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder)", "to look at the \"Parameters\" # metainfo field of a CLA file on", "\"Parameters\" # metainfo field of a CLA file on Genestack that has the", "# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import", "used in the on-screen statements and in the name of the project subfolders", "def _create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output #", "1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)' % (self._friendly_name,", "parent=created_files_folder) # create application wrappers and batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app", "the mapping step') args = parser.parse_args() project_name = args.name print('Connecting to Genestack...') #", "= files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files to process' % files_count) #", "self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args = custom_args def create_files(self, sources): print('Creating", "-m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__", "These CLA arguments correspond to all default options except the type of variants", "self._ref_genome = ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) # replace reference", "# get connection and create output folder connection = get_connection(args) files_util = FilesUtil(connection)", "custom_args=None): \"\"\" Constructor of the general batch files creator, to create multiple files", "files with a CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\"", "BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These CLA arguments correspond to", "the name of the project subfolders :param custom_args: list of custom command-line argument", "QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...')", "vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...') raw_reads", "self._custom_args) return output # special class for Bowtie to replace the default reference", "more complex file creation logic def _create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args:", "class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name,", "files will be organised into subfolders :param friendly_name: user-friendly name of the files", "--output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\": # parse", ":param custom_args: list of custom command-line argument strings for the files. Default is", "to put the output files') parser.add_argument('--ref-genome', help='Accession of the reference genome to use", "--BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\": #", "BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the general batch", "argument strings for the files. Default is ``None`` \"\"\" self._cla = cla self._files_util", "unicode_literals from future import standard_library standard_library.install_aliases() from builtins import * from builtins import", "of the folder containing the raw reads files to process') parser.add_argument('--name', default=\"New Project\",", "BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator", "(SNPs only). # The easiest way to know the syntax of the command-line", "= BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped", "the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None):", "project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator =", "connection and create output folder connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder =", "the corresponding CLA :param base_folder: accession of the base folder where the pipeline", "Genestack...') # get connection and create output folder connection = get_connection(args) files_util =", "self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These CLA arguments correspond", "FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args = custom_args def create_files(self, sources):", "source) # replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome)", "process') parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack folder where to put the", "parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack folder where to put the output", "a CLA. :param cla: a ``CLApplication`` object, wrapper for the corresponding CLA :param", "organised into subfolders :param friendly_name: user-friendly name of the files produced by the", "FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class to create multiple files with", "app ; used in the on-screen statements and in the name of the", "\"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app,", "by the app ; used in the on-screen statements and in the name", "of the general batch files creator, to create multiple files from a CLA.", "the base folder where the pipeline files will be organised into subfolders :param", "you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\",", "genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These", "DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\": # parse script", "= AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator", "the raw reads files to process') parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack", "a ``CLApplication`` object, wrapper for the corresponding CLA :param base_folder: accession of the", "enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)' %", "Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw", "%s (%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output) return output_files # this method", "name of the project subfolders :param custom_args: list of custom command-line argument strings", "mapping step') args = parser.parse_args() project_name = args.name print('Connecting to Genestack...') # get", "# The easiest way to know the syntax of the command-line arguments for", "and batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection)", "# Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done!", "files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and batch files creators bowtie_app = BowtieApplication(connection)", "look for (SNPs only). # The easiest way to know the syntax of", "the on-screen statements and in the name of the project subfolders :param custom_args:", "genome to use for the mapping step') args = parser.parse_args() project_name = args.name", "from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import *", "pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files", "the app ; used in the on-screen statements and in the name of", "in child classes to allow for more complex file creation logic def _create_output_file(self,", "friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self,", "to allow for more complex file creation logic def _create_output_file(self, source): output =", "put the output files') parser.add_argument('--ref-genome', help='Accession of the reference genome to use for", "CLA arguments correspond to all default options except the type of variants to", "\"\"\" Constructor of the general batch files creator, to create multiple files from", "parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing the raw reads files to process')", "in the name of the project subfolders :param custom_args: list of custom command-line", ":param base_folder: accession of the base folder where the pipeline files will be", "make_connection_parser) # base class to create multiple files with a CLA class BatchFilesCreator(object):", "VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants", "raw reads files to process') parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack folder", "\"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count =", "of the Genestack folder where to put the output files') parser.add_argument('--ref-genome', help='Accession of", "% files_count) # Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads)", "to use for the mapping step') args = parser.parse_args() project_name = args.name print('Connecting", "Genestack folder where to put the output files') parser.add_argument('--ref-genome', help='Accession of the reference", "\"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name", "# special class for Bowtie to replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator):", "the command-line arguments for a specific app is to look at the \"Parameters\"", "complex file creation logic def _create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output,", "base folder where the pipeline files will be organised into subfolders :param friendly_name:", "genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder,", "i, source in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file", "for Bowtie to replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla,", "%s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i,", "raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files to process'", "<reponame>genestack/python-client<filename>docs/source/sample_scripts/run_vc_pipeline.py #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function from", "the pipeline files will be organised into subfolders :param friendly_name: user-friendly name of", "self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These CLA arguments", "BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These CLA arguments correspond to all default", "want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\",", "subfolders :param friendly_name: user-friendly name of the files produced by the app ;", ":param friendly_name: user-friendly name of the files produced by the app ; used", "250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if", "with a CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor", "import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import * from builtins", "[\"--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller", "\"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\": # parse script arguments parser", "and create output folder connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED)", "statements and in the name of the project subfolders :param custom_args: list of", "created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and batch", "folder where the pipeline files will be organised into subfolders :param friendly_name: user-friendly", "self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special class for Bowtie to replace the", "correspond to all default options except the type of variants to look for", "files_count) # Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All", "output # These CLA arguments correspond to all default options except the type", "output_files # this method can be overridden in child classes to allow for", "import print_function from __future__ import absolute_import from __future__ import division from __future__ import", "parser.parse_args() project_name = args.name print('Connecting to Genestack...') # get connection and create output", "produced by the app ; used in the on-screen statements and in the", "know the syntax of the command-line arguments for a specific app is to", "self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output # These CLA arguments correspond to all", "bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are in the folder", "field of a CLA file on Genestack that has the parameters you want.", "the project subfolders :param custom_args: list of custom command-line argument strings for the", "import * from builtins import object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil,", "files to process') parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack folder where to", "_create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) # replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output],", "output = BatchFilesCreator._create_output_file(self, source) # replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output,", "make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing the raw reads files to", "i, len(output))) output_files.append(output) return output_files # this method can be overridden in child", "self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i, source in enumerate(sources,", "% (self._friendly_name, output, i, len(output))) output_files.append(output) return output_files # this method can be", "source): output = BatchFilesCreator._create_output_file(self, source) # replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME)", "overridden in child classes to allow for more complex file creation logic def", "project subfolders :param custom_args: list of custom command-line argument strings for the files.", "the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1 -E --BCF", "-*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import from", "look at the \"Parameters\" # metainfo field of a CLA file on Genestack", "= parser.parse_args() project_name = args.name print('Connecting to Genestack...') # get connection and create", "script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing the", "division from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import", "custom_args) self._ref_genome = ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) # replace", "reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla,", "BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect", "BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self, source): output =", "the reference genome to use for the mapping step') args = parser.parse_args() project_name", "user-friendly name of the files produced by the app ; used in the", ":param cla: a ``CLApplication`` object, wrapper for the corresponding CLA :param base_folder: accession", "to create multiple files with a CLA class BatchFilesCreator(object): def __init__(self, cla, base_folder,", "be overridden in child classes to allow for more complex file creation logic", "output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i, source in enumerate(sources, 1):", "self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i, len(output)))", "mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS)", "project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and batch files creators bowtie_app", "sources): print('Creating %s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = []", "for the files. Default is ``None`` \"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection)", "__future__ import division from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from", "default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self,", "create output folder connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder", "; used in the on-screen statements and in the name of the project", "variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app,", "mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are in", "allow for more complex file creation logic def _create_output_file(self, source): output = self._cla.create_file(source)", "command-line argument strings for the files. Default is ``None`` \"\"\" self._cla = cla", "special class for Bowtie to replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def", "BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder)", "collect files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d", "for the corresponding CLA :param base_folder: accession of the base folder where the", "accession of the base folder where the pipeline files will be organised into", "SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class to create multiple files with a", "containing the raw reads files to process') parser.add_argument('--name', default=\"New Project\", help='Name of the", "VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped", "project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files", "command-line arguments for a specific app is to look at the \"Parameters\" #", "reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output #", "classes to allow for more complex file creation logic def _create_output_file(self, source): output", "mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are in the folder %s'", "= self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special class for Bowtie", "Bowtie to replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder,", "output files') parser.add_argument('--ref-genome', help='Accession of the reference genome to use for the mapping", "to look for (SNPs only). # The easiest way to know the syntax", "can be overridden in child classes to allow for more complex file creation", "from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base", "the files. Default is ``None`` \"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder", "files produced by the app ; used in the on-screen statements and in", "* from builtins import object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders,", "= custom_args def create_files(self, sources): print('Creating %s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name,", "app is to look at the \"Parameters\" # metainfo field of a CLA", "of custom command-line argument strings for the files. Default is ``None`` \"\"\" self._cla", "output_files = [] for i, source in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output,", "BatchFilesCreator._create_output_file(self, source) # replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None,", "parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1 -E --BCF --output-tags", "custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads)", "__init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome =", "name of the files produced by the app ; used in the on-screen", "files. Default is ``None`` \"\"\" self._cla = cla self._files_util = FilesUtil(cla.connection) self._base_folder =", "= FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args = custom_args def create_files(self,", "= files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and batch files creators bowtie_app =", "None, self._ref_genome) return output # These CLA arguments correspond to all default options", "= len(raw_reads) print('Found %d files to process' % files_count) # Create pipeline files", "self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output)", "the output files') parser.add_argument('--ref-genome', help='Accession of the reference genome to use for the", "-*- from __future__ import print_function from __future__ import absolute_import from __future__ import division", "and in the name of the project subfolders :param custom_args: list of custom", "folder connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name,", "= base_folder self._friendly_name = friendly_name self._custom_args = custom_args def create_files(self, sources): print('Creating %s", "utf-8 -*- from __future__ import print_function from __future__ import absolute_import from __future__ import", "into subfolders :param friendly_name: user-friendly name of the files produced by the app", "bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder,", "friendly_name self._custom_args = custom_args def create_files(self, sources): print('Creating %s files...' % self._friendly_name) output_folder", "Genestack that has the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m", "# base class to create multiple files with a CLA class BatchFilesCreator(object): def", "reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files to process' %", "_create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special", "metainfo field of a CLA file on Genestack that has the parameters you", "of variants to look for (SNPs only). # The easiest way to know", "of a CLA file on Genestack that has the parameters you want. VC_ARGUMENTS_NO_INDELS", "= ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) # replace reference genome", "variants to look for (SNPs only). # The easiest way to know the", "builtins import object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection,", "__future__ import absolute_import from __future__ import division from __future__ import unicode_literals from future", "be organised into subfolders :param friendly_name: user-friendly name of the files produced by", "print_function from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals", "multiple files from a CLA. :param cla: a ``CLApplication`` object, wrapper for the", "get connection and create output folder connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder", "= self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i,", "file %s (%d/%d)' % (self._friendly_name, output, i, len(output))) output_files.append(output) return output_files # this", "files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create application wrappers and batch files creators", "from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from", "raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files to process' % files_count)", "files to process' % files_count) # Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs", "= friendly_name self._custom_args = custom_args def create_files(self, sources): print('Creating %s files...' % self._friendly_name)", "%d files to process' % files_count) # Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads)", "standard_library.install_aliases() from builtins import * from builtins import object from genestack_client import (AlignedReadsQC,", "files print('Collecting raw reads...') raw_reads = files_util.get_file_children(args.raw_reads_folder) files_count = len(raw_reads) print('Found %d files", "create application wrappers and batch files creators bowtie_app = BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection)", "1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ ==", "import absolute_import from __future__ import division from __future__ import unicode_literals from future import", "subfolders :param custom_args: list of custom command-line argument strings for the files. Default", "__init__(self, cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the general batch files creator,", "import division from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins", "where to put the output files') parser.add_argument('--ref-genome', help='Accession of the reference genome to", "(self._friendly_name, output, i, len(output))) output_files.append(output) return output_files # this method can be overridden", "ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self, source): output", "genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class", "general batch files creator, to create multiple files from a CLA. :param cla:", "= get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) #", "on-screen statements and in the name of the project subfolders :param custom_args: list", "cla, base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome", "files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are", "replace the default reference genome class BowtieBatchFilesCreator(BatchFilesCreator): def __init__(self, cla, base_folder, friendly_name, custom_args=None,", "import standard_library standard_library.install_aliases() from builtins import * from builtins import object from genestack_client", "replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return output", "__future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import * from", "of the base folder where the pipeline files will be organised into subfolders", "a specific app is to look at the \"Parameters\" # metainfo field of", "to process') parser.add_argument('--name', default=\"New Project\", help='Name of the Genestack folder where to put", "arguments for a specific app is to look at the \"Parameters\" # metainfo", "way to know the syntax of the command-line arguments for a specific app", "BowtieApplication(connection) mapped_qc_app = AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\",", "return output_files # this method can be overridden in child classes to allow", "source in enumerate(sources, 1): output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s", "builtins import * from builtins import object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication,", "custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self, source):", "files') parser.add_argument('--ref-genome', help='Accession of the reference genome to use for the mapping step')", "cla, base_folder, friendly_name, custom_args=None): \"\"\" Constructor of the general batch files creator, to", "easiest way to know the syntax of the command-line arguments for a specific", "= [\"--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels", "output_files.append(output) return output_files # this method can be overridden in child classes to", "has the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d 250 -m 1 -E", "get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder) # create", "= cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args =", "folder containing the raw reads files to process') parser.add_argument('--name', default=\"New Project\", help='Name of", "CLA :param base_folder: accession of the base folder where the pipeline files will", "return output # special class for Bowtie to replace the default reference genome", "for (SNPs only). # The easiest way to know the syntax of the", "Reads\", ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder,", "output = self._create_output_file(source) self._files_util.link_file(output, output_folder) print('Created %s file %s (%d/%d)' % (self._friendly_name, output,", "= args.name print('Connecting to Genestack...') # get connection and create output folder connection", "= self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for i, source in enumerate(sources, 1): output", "file creation logic def _create_output_file(self, source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args)", "base_folder, friendly_name, custom_args=None, ref_genome=None): BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args) self._ref_genome = ref_genome def", "parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of the folder containing", "ref_genome=args.ref_genome) mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, \"Mapped Reads QC\") vc_creator = BatchFilesCreator(variant_calling_app, project_folder, \"Variants\",", "friendly_name, custom_args) self._ref_genome = ref_genome def _create_output_file(self, source): output = BatchFilesCreator._create_output_file(self, source) #", "# replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome) return", "= bowtie_creator.create_files(raw_reads) mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads) vc_creator.create_files(mapped_reads) print('All done! Your files are in the", "#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function from __future__", "= BatchFilesCreator._create_output_file(self, source) # replace reference genome if self._ref_genome: self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME) self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME,", "the files produced by the app ; used in the on-screen statements and", "source): output = self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special class", "== \"__main__\": # parse script arguments parser = make_connection_parser() parser.add_argument('raw_reads_folder', help='Genestack accession of", "self._ref_genome) return output # These CLA arguments correspond to all default options except", "self._cla.change_command_line_arguments(output, self._custom_args) return output # special class for Bowtie to replace the default", "a CLA file on Genestack that has the parameters you want. VC_ARGUMENTS_NO_INDELS =", "object from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) #", "print('Creating %s files...' % self._friendly_name) output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder) output_files = [] for", "except the type of variants to look for (SNPs only). # The easiest", "base_folder self._friendly_name = friendly_name self._custom_args = custom_args def create_files(self, sources): print('Creating %s files...'", "from __future__ import print_function from __future__ import absolute_import from __future__ import division from", "file on Genestack that has the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels -d", "of the files produced by the app ; used in the on-screen statements", "files_count = len(raw_reads) print('Found %d files to process' % files_count) # Create pipeline", "list of custom command-line argument strings for the files. Default is ``None`` \"\"\"", "# These CLA arguments correspond to all default options except the type of", "import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class to", "CLA file on Genestack that has the parameters you want. VC_ARGUMENTS_NO_INDELS = [\"--skip-indels", "CLA. :param cla: a ``CLApplication`` object, wrapper for the corresponding CLA :param base_folder:", "connection = get_connection(args) files_util = FilesUtil(connection) created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED) project_folder = files_util.create_folder(project_name, parent=created_files_folder)", "for more complex file creation logic def _create_output_file(self, source): output = self._cla.create_file(source) if", "type of variants to look for (SNPs only). # The easiest way to", "will be organised into subfolders :param friendly_name: user-friendly name of the files produced", "if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special class for Bowtie to replace", "from future import standard_library standard_library.install_aliases() from builtins import * from builtins import object", "cla self._files_util = FilesUtil(cla.connection) self._base_folder = base_folder self._friendly_name = friendly_name self._custom_args = custom_args", "-E --BCF --output-tags DP,DV,DP4,SP\", \"\", \"--skip-variants indels --multiallelic-caller --variants-only\"] if __name__ == \"__main__\":", "self._cla.create_file(source) if self._custom_args: self._cla.change_command_line_arguments(output, self._custom_args) return output # special class for Bowtie to", "len(output))) output_files.append(output) return output_files # this method can be overridden in child classes", "AlignedReadsQC(connection) variant_calling_app = VariationCaller2Application(connection) bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, \"Mapped Reads\", ref_genome=args.ref_genome) mapped_qc_creator =", "to process' % files_count) # Create pipeline files mapped_reads = bowtie_creator.create_files(raw_reads) mapped_reads_qcs =", "of the command-line arguments for a specific app is to look at the", "(AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil, SpecialFolders, VariationCaller2Application, get_connection, make_connection_parser) # base class to create", "= BatchFilesCreator(variant_calling_app, project_folder, \"Variants\", custom_args=VC_ARGUMENTS_NO_INDELS) # collect files print('Collecting raw reads...') raw_reads =", "reference genome to use for the mapping step') args = parser.parse_args() project_name =", "custom_args: list of custom command-line argument strings for the files. Default is ``None``", "only). # The easiest way to know the syntax of the command-line arguments", "strings for the files. Default is ``None`` \"\"\" self._cla = cla self._files_util =", "to all default options except the type of variants to look for (SNPs" ]
[ "-> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str): if is_s3_path(path):", "False def bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def", "bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str):", "LocalFileSystem from s3fs import S3FileSystem def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return", "return False def bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0)", "from s3fs import S3FileSystem def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return True", "str) -> bool: if path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path: str) ->", "is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path: str)", "S3FileSystem def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return True return False def", "= path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str): if is_s3_path(path): return S3FileSystem() return", "import LocalFileSystem from s3fs import S3FileSystem def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"):", "fsspec.implementations.local import LocalFileSystem from s3fs import S3FileSystem def is_s3_path(path: str) -> bool: if", "True return False def bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return", "if path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path: str) -> str: path_parts =", "def bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path:", "path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str): if is_s3_path(path): return S3FileSystem()", "return True return False def bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\")", "bool: if path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path: str) -> str: path_parts", "str) -> str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str): if", "s3fs import S3FileSystem def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return True return", "import S3FileSystem def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return True return False", "path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str): if is_s3_path(path): return S3FileSystem() return LocalFileSystem(auto_mkdir=\"True\")", "def is_s3_path(path: str) -> bool: if path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path:", "str: path_parts = path.replace(\"s3://\", \"\").split(\"/\") return path_parts.pop(0) def get_fs(path: str): if is_s3_path(path): return", "path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path: str) -> str: path_parts = path.replace(\"s3://\",", "from fsspec.implementations.local import LocalFileSystem from s3fs import S3FileSystem def is_s3_path(path: str) -> bool:", "-> bool: if path.startswith(\"s3://\"): return True return False def bucket_name_from_path(path: str) -> str:" ]
[ "!= 0 else s[(length // 2)-1:(length // 2)+1] # Test # s =", "0 else s[(length // 2)-1:(length // 2)+1] # Test # s = 'abcde'", "Test # s = 'abcde' s = 'qwer' print(solution(s)) #5 -> 2 #4", "s = 'qwer' print(solution(s)) #5 -> 2 #4 -> [1:3] # 1, 2", "length % 2 != 0 else s[(length // 2)-1:(length // 2)+1] # Test", "% 2 != 0 else s[(length // 2)-1:(length // 2)+1] # Test #", "length = len(s) return s[length // 2] if length % 2 != 0", "// 2)+1] # Test # s = 'abcde' s = 'qwer' print(solution(s)) #5", "# Test # s = 'abcde' s = 'qwer' print(solution(s)) #5 -> 2", "def solution(s): length = len(s) return s[length // 2] if length % 2", "2)+1] # Test # s = 'abcde' s = 'qwer' print(solution(s)) #5 ->", "s[length // 2] if length % 2 != 0 else s[(length // 2)-1:(length", "// 2)-1:(length // 2)+1] # Test # s = 'abcde' s = 'qwer'", "2] if length % 2 != 0 else s[(length // 2)-1:(length // 2)+1]", "else s[(length // 2)-1:(length // 2)+1] # Test # s = 'abcde' s", "= 'abcde' s = 'qwer' print(solution(s)) #5 -> 2 #4 -> [1:3] #", "= len(s) return s[length // 2] if length % 2 != 0 else", "len(s) return s[length // 2] if length % 2 != 0 else s[(length", "'abcde' s = 'qwer' print(solution(s)) #5 -> 2 #4 -> [1:3] # 1,", "2)-1:(length // 2)+1] # Test # s = 'abcde' s = 'qwer' print(solution(s))", "if length % 2 != 0 else s[(length // 2)-1:(length // 2)+1] #", "2 != 0 else s[(length // 2)-1:(length // 2)+1] # Test # s", "# s = 'abcde' s = 'qwer' print(solution(s)) #5 -> 2 #4 ->", "return s[length // 2] if length % 2 != 0 else s[(length //", "<filename>programmers/skill-test-lv1/get_middle_char.py def solution(s): length = len(s) return s[length // 2] if length %", "s = 'abcde' s = 'qwer' print(solution(s)) #5 -> 2 #4 -> [1:3]", "solution(s): length = len(s) return s[length // 2] if length % 2 !=", "s[(length // 2)-1:(length // 2)+1] # Test # s = 'abcde' s =", "// 2] if length % 2 != 0 else s[(length // 2)-1:(length //" ]
[ "import models from django.contrib.auth.models import User class Book(models.Model): title = models.CharField(max_length=100) author =", "models from django.contrib.auth.models import User class Book(models.Model): title = models.CharField(max_length=100) author = models.ForeignKey(User)", "django.db import models from django.contrib.auth.models import User class Book(models.Model): title = models.CharField(max_length=100) author", "from django.db import models from django.contrib.auth.models import User class Book(models.Model): title = models.CharField(max_length=100)" ]
[ "давай ага супер пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло :)')", "# Вариант с инпутом. def podkat(): a = str(input(\"Привет, солнце, посветишь для меня", "\"гоу хочу хотела красава мечтаю давай ага супер пойдём\") if a.lower() in right_answers.lower():", ":)') else: while a.lower() not in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку:", "a = str(input(\"Привет, солнце, посветишь для меня сегодня ?): \")) right_answers = (\"да", "хотела красава мечтаю давай ага супер пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня", "a = str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько времени потеряли зря :)", "с инпутом. def podkat(): a = str(input(\"Привет, солнце, посветишь для меня сегодня ?):", "ответ. Повторите попытку: \")) return print(\"Столько времени потеряли зря :) Могли бы уже", "return print(\"Столько времени потеряли зря :) Могли бы уже зажигать!\") if __name__ ==", "красава мечтаю давай ага супер пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня кому-то", "print('Сегодня кому-то повезло :)') else: while a.lower() not in right_answers.lower(): a = str(input(\"Некорректный", "#! Подкат к программистке. # Вариант с инпутом. def podkat(): a = str(input(\"Привет,", "мечтаю давай ага супер пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло", "def podkat(): a = str(input(\"Привет, солнце, посветишь для меня сегодня ?): \")) right_answers", "кому-то повезло :)') else: while a.lower() not in right_answers.lower(): a = str(input(\"Некорректный ответ.", "podkat(): a = str(input(\"Привет, солнце, посветишь для меня сегодня ?): \")) right_answers =", "str(input(\"Привет, солнце, посветишь для меня сегодня ?): \")) right_answers = (\"да конечно посвечу", "= str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько времени потеряли зря :) Могли", "посвечу афк пошли мяф го игого\" \"гоу хочу хотела красава мечтаю давай ага", "ага супер пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло :)') else:", "str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько времени потеряли зря :) Могли бы", "конечно посвечу афк пошли мяф го игого\" \"гоу хочу хотела красава мечтаю давай", "супер пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло :)') else: while", "print(\"Столько времени потеряли зря :) Могли бы уже зажигать!\") if __name__ == \"__main__\":", "else: while a.lower() not in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку: \"))", "инпутом. def podkat(): a = str(input(\"Привет, солнце, посветишь для меня сегодня ?): \"))", "right_answers = (\"да конечно посвечу афк пошли мяф го игого\" \"гоу хочу хотела", "повезло :)') else: while a.lower() not in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите", "\")) return print(\"Столько времени потеряли зря :) Могли бы уже зажигать!\") if __name__", "к программистке. # Вариант с инпутом. def podkat(): a = str(input(\"Привет, солнце, посветишь", "посветишь для меня сегодня ?): \")) right_answers = (\"да конечно посвечу афк пошли", "right_answers.lower(): return print('Сегодня кому-то повезло :)') else: while a.lower() not in right_answers.lower(): a", "while a.lower() not in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку: \")) return", "игого\" \"гоу хочу хотела красава мечтаю давай ага супер пойдём\") if a.lower() in", "= str(input(\"Привет, солнце, посветишь для меня сегодня ?): \")) right_answers = (\"да конечно", "для меня сегодня ?): \")) right_answers = (\"да конечно посвечу афк пошли мяф", "попытку: \")) return print(\"Столько времени потеряли зря :) Могли бы уже зажигать!\") if", "сегодня ?): \")) right_answers = (\"да конечно посвечу афк пошли мяф го игого\"", "программистке. # Вариант с инпутом. def podkat(): a = str(input(\"Привет, солнце, посветишь для", "пошли мяф го игого\" \"гоу хочу хотела красава мечтаю давай ага супер пойдём\")", "in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько времени потеряли", "хочу хотела красава мечтаю давай ага супер пойдём\") if a.lower() in right_answers.lower(): return", "меня сегодня ?): \")) right_answers = (\"да конечно посвечу афк пошли мяф го", "?): \")) right_answers = (\"да конечно посвечу афк пошли мяф го игого\" \"гоу", "return print('Сегодня кому-то повезло :)') else: while a.lower() not in right_answers.lower(): a =", "<reponame>only-romano/junkyard #! Подкат к программистке. # Вариант с инпутом. def podkat(): a =", "афк пошли мяф го игого\" \"гоу хочу хотела красава мечтаю давай ага супер", "го игого\" \"гоу хочу хотела красава мечтаю давай ага супер пойдём\") if a.lower()", "\")) right_answers = (\"да конечно посвечу афк пошли мяф го игого\" \"гоу хочу", "in right_answers.lower(): return print('Сегодня кому-то повезло :)') else: while a.lower() not in right_answers.lower():", "пойдём\") if a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло :)') else: while a.lower()", "a.lower() not in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько", "right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько времени потеряли зря", "времени потеряли зря :) Могли бы уже зажигать!\") if __name__ == \"__main__\": podkat()", "Подкат к программистке. # Вариант с инпутом. def podkat(): a = str(input(\"Привет, солнце,", "солнце, посветишь для меня сегодня ?): \")) right_answers = (\"да конечно посвечу афк", "мяф го игого\" \"гоу хочу хотела красава мечтаю давай ага супер пойдём\") if", "Вариант с инпутом. def podkat(): a = str(input(\"Привет, солнце, посветишь для меня сегодня", "Повторите попытку: \")) return print(\"Столько времени потеряли зря :) Могли бы уже зажигать!\")", "= (\"да конечно посвечу афк пошли мяф го игого\" \"гоу хочу хотела красава", "(\"да конечно посвечу афк пошли мяф го игого\" \"гоу хочу хотела красава мечтаю", "if a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло :)') else: while a.lower() not", "not in right_answers.lower(): a = str(input(\"Некорректный ответ. Повторите попытку: \")) return print(\"Столько времени", "a.lower() in right_answers.lower(): return print('Сегодня кому-то повезло :)') else: while a.lower() not in" ]
[ "on it! # @app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") #", "ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") #", "# def user(name): # return f\"Hello {name}!\" # # Working on it! #", "@app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\", name=\"Admin!\")) if __name__ == \"__main__\": app.run()", "return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name): # return f\"Hello {name}!\" #", "@app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") # def admin(): #", "Working on it! # @app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\")", "url_for, render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name)", "f\"{ipF}\" # @app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\", name=\"Admin!\")) if __name__ ==", "# return f\"Hello {name}!\" # # Working on it! # @app.route(\"/<ipF>\") # def", "{name}!\" # # Working on it! # @app.route(\"/<ipF>\") # def ip(ipF): # return", "def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\",", "# def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") # def admin(): # return", "return f\"{ipF}\" # @app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\", name=\"Admin!\")) if __name__", "flask import Flask, redirect, url_for, render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def", "render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name): # return f\"Hello {name}!\" # #", "# return f\"{ipF}\" # @app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\", name=\"Admin!\")) if", "# Working on it! # @app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\" #", "ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\", name=\"Admin!\"))", "it! # @app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") # def", "render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) #", "content=name) # @app.route(\"/<name>\") # def user(name): # return f\"Hello {name}!\" # # Working", "home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name): # return f\"Hello {name}!\"", "# @app.route(\"/<name>\") # def user(name): # return f\"Hello {name}!\" # # Working on", "@app.route(\"/<name>\") # def user(name): # return f\"Hello {name}!\" # # Working on it!", "Flask, redirect, url_for, render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name): return", "= Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name):", "user(name): # return f\"Hello {name}!\" # # Working on it! # @app.route(\"/<ipF>\") #", "# # Working on it! # @app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\"", "def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name): # return f\"Hello", "# @app.route(\"/<ipF>\") # def ip(ipF): # return f\"{ipF}\" # @app.route(\"/admin\") # def admin():", "from flask import Flask, redirect, url_for, render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\")", "app = Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def", "# @app.route(\"/admin\") # def admin(): # return redirect(url_for(\"user\", name=\"Admin!\")) if __name__ == \"__main__\":", "#import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\")", "import Flask, redirect, url_for, render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name):", "@app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name): # return", "def user(name): # return f\"Hello {name}!\" # # Working on it! # @app.route(\"/<ipF>\")", "f\"Hello {name}!\" # # Working on it! # @app.route(\"/<ipF>\") # def ip(ipF): #", "Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\", content=name) # @app.route(\"/<name>\") # def user(name): #", "redirect, url_for, render_template #import ip_finder app = Flask(__name__) @app.route(\"/<name>\") def home(name): return render_template(\"index.html\",", "return f\"Hello {name}!\" # # Working on it! # @app.route(\"/<ipF>\") # def ip(ipF):" ]
[ "@Time : 2021/1/9 17:28 @Author : s_jing @File : __init__.py.py @Software: PyCharm \"\"\"", "-*- coding: utf-8 -*- \"\"\" @Time : 2021/1/9 17:28 @Author : s_jing @File", "coding: utf-8 -*- \"\"\" @Time : 2021/1/9 17:28 @Author : s_jing @File :", "-*- \"\"\" @Time : 2021/1/9 17:28 @Author : s_jing @File : __init__.py.py @Software:", "\"\"\" @Time : 2021/1/9 17:28 @Author : s_jing @File : __init__.py.py @Software: PyCharm", "# -*- coding: utf-8 -*- \"\"\" @Time : 2021/1/9 17:28 @Author : s_jing", "utf-8 -*- \"\"\" @Time : 2021/1/9 17:28 @Author : s_jing @File : __init__.py.py" ]
[ "throw for errors detected at runtime. .. warning:: All functions in the Config", "Exception: \"\"\" An exception class to throw for errors detected at runtime. ..", "at runtime. .. warning:: All functions in the Config class can potentially throw", "warning:: All functions in the Config class can potentially throw this exception. \"\"\"", "in the Config class can potentially throw this exception. \"\"\" def __init__(self): pass", "class to throw for errors detected at runtime. .. warning:: All functions in", "\"\"\" An exception class to throw for errors detected at runtime. .. warning::", "detected at runtime. .. warning:: All functions in the Config class can potentially", ".. warning:: All functions in the Config class can potentially throw this exception.", "to throw for errors detected at runtime. .. warning:: All functions in the", "class Exception: \"\"\" An exception class to throw for errors detected at runtime.", "An exception class to throw for errors detected at runtime. .. warning:: All", "for errors detected at runtime. .. warning:: All functions in the Config class", "errors detected at runtime. .. warning:: All functions in the Config class can", "All functions in the Config class can potentially throw this exception. \"\"\" def", "exception class to throw for errors detected at runtime. .. warning:: All functions", "runtime. .. warning:: All functions in the Config class can potentially throw this", "functions in the Config class can potentially throw this exception. \"\"\" def __init__(self):" ]
[ "projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$',", "Uncomment the next two lines to enable the admin: from django.contrib import admin", "projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently currently", "(r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently currently points to", "app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently currently points", "include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration view is in projdb at the", "in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),", "the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$',", "import views as authviews # Uncomment the next two lines to enable the", "urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb", "patterns, include, url from django.contrib.auth import views as authviews # Uncomment the next", "(r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently currently points to /login #", "# workspace (r'^workspace/', include('workspace.urls')), # registration view is in projdb at the moment", "view is in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login',", "two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns =", "include('workspace.urls')), # registration view is in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), #", "item currently currently points to /login # projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/',", "'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), # admin (r'^admin/', include(admin.site.urls)), (r'^admin/doc/', include('django.contrib.admindocs.urls')),", "'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), # admin (r'^admin/', include(admin.site.urls)),", "# registration view is in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication", "default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item", "as authviews # Uncomment the next two lines to enable the admin: from", "the next two lines to enable the admin: from django.contrib import admin admin.autodiscover()", "favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$',", "# default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu", "to /login # projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration view", "at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'),", "url from django.contrib.auth import views as authviews # Uncomment the next two lines", "lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('',", "admin admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default", "import admin admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), #", "views as authviews # Uncomment the next two lines to enable the admin:", "to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently", "# Workspace Logout menu item currently currently points to /login # projects (r'^projects/',", "/login # projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration view is", "the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$',", "(r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$',", "# projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration view is in", "{'url': '/static/images/favicon.ico'}), # default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace", "# Uncomment the next two lines to enable the admin: from django.contrib import", "import patterns, include, url from django.contrib.auth import views as authviews # Uncomment the", "from django.contrib.auth import views as authviews # Uncomment the next two lines to", "django.contrib.auth import views as authviews # Uncomment the next two lines to enable", "currently currently points to /login # projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')),", "is in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name':", "next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns", "workspace (r'^workspace/', include('workspace.urls')), # registration view is in projdb at the moment (r'^accounts/register/$',", "Logout menu item currently currently points to /login # projects (r'^projects/', include('projdb.urls')), #", "= patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb app", "enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # favicon", "admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to',", "patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb app (r'^$',", "(r'^workspace/', include('workspace.urls')), # registration view is in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'),", "'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), #", "projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration view is in projdb", "registration view is in projdb at the moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$',", "(r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'),", "to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', #", "{'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), # admin (r'^admin/',", "(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), #", "django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}),", "'/static/images/favicon.ico'}), # default to projdb app (r'^$', 'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout", "authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),", "'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'),", "django.conf.urls.defaults import patterns, include, url from django.contrib.auth import views as authviews # Uncomment", "authviews # Uncomment the next two lines to enable the admin: from django.contrib", "(r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration view is in projdb at", "menu item currently currently points to /login # projects (r'^projects/', include('projdb.urls')), # workspace", "# favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to projdb app (r'^$', 'projdb.views.index'),", "include, url from django.contrib.auth import views as authviews # Uncomment the next two", "(r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), # admin (r'^admin/', include(admin.site.urls)), (r'^admin/doc/', include('django.contrib.admindocs.urls')), )", "Workspace Logout menu item currently currently points to /login # projects (r'^projects/', include('projdb.urls')),", "'workspace.views.Exit'), # Workspace Logout menu item currently currently points to /login # projects", "'projdb.views.index'), (r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently currently points to /login", "# authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$',", "admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}), # default to", "moment (r'^accounts/register/$', 'projdb.views.register'), # authentication (r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'),", "from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', # favicon (r'^favicon\\.ico$', 'django.views.generic.simple.redirect_to', {'url':", "from django.conf.urls.defaults import patterns, include, url from django.contrib.auth import views as authviews #", "(r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), # admin (r'^admin/', include(admin.site.urls)), (r'^admin/doc/',", "currently points to /login # projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), #", "'django.contrib.auth.views.login', {'template_name': 'login.html'}), (r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'), (r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'), (r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'), (r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'), # admin", "points to /login # projects (r'^projects/', include('projdb.urls')), # workspace (r'^workspace/', include('workspace.urls')), # registration" ]
[ "'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm", "= { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 && \\", "\"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod", "'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl && \\ mv kubectl /usr/local/bin/'", "{ 'Darwin': 'brew install gnu-sed' } dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing", "\\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/ && \\ rm", "{ 'Darwin': 'brew update && brew install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass'", "mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform],", "unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if", "https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/ &&", "version in plugins.items(): install_cmd = \"helm plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing", "'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod", "install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin':", "\"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x", "os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform):", "{0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew update && brew", "\"helm plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm plugin with command: {0}\".format(install_cmd))", "logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs", "else: logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = {", "} dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault", "+x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not", "\"\"\"Install helm plugins. Requires helm to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0',", "{ 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\", "landscape CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform)", "install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds =", "+x kubectl && \\ mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not", "os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform):", "not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in {0}\".format(dst)) def", "plugin_url, version in plugins.items(): install_cmd = \"helm plugin install {0} --version={1}\".format( plugin_url, version)", "https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst", "else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = {", "rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True)", "'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip'", "terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing", "= { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in plugins.items(): install_cmd = \"helm", "minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not os.path.isfile(dst):", "&& \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\")", "installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO", "\\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform],", "rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True)", "'/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in", "--with-pinentry' } dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else:", "platform import os.path import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape", "None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform):", "'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl && \\ mv kubectl", "zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x helm && \\ mv helm", "= '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed", "'Darwin': 'brew install gnu-sed' } dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\")", "install gnu-sed' } dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True)", "{0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 &&", "installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl", "'/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in", "install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew", "not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst)) def", "} dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper", "to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in", "{0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz", "install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\", "else: logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm", "already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew", "/usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing", "'brew install gnu-sed' } dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform],", "Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf", "-LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/", "landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst", "be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in plugins.items():", "install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\"", "def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl &&", "if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in {0}\".format(dst))", "plugins.items(): install_cmd = \"helm plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm plugin", "chmod +x helm && \\ mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' }", "terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm", "} dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass", "\\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform],", "'0.1.0', } for plugin_url, version in plugins.items(): install_cmd = \"helm plugin install {0}", "install_cmds = { 'Darwin': 'brew install gnu-sed' } dst = '/usr/local/bin/gsed' if not", "\"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew install gnu-sed' } dst = '/usr/local/bin/gsed'", "'/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in", "= { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper &&", "logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin':", "dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already", "\\ chmod +x helm && \\ mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz'", "+x helm && \\ mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst", "def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew install gnu-sed' } dst", "not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def", "lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True)", "= '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed", "update && brew install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if not os.path.isfile(dst):", "{ 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 && \\ mv", "os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins():", "'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip'", "helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not os.path.isfile(dst):", "&& \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst =", "logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = {", "\\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\")", "/usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not os.path.isfile(dst):", "installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl", "} for plugin_url, version in plugins.items(): install_cmd = \"helm plugin install {0} --version={1}\".format(", "def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz &&", "if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst))", "shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds =", "chmod +x kubectl && \\ mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if", "sp import platform import os.path import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for", "already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl", "dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already", "&& \\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\")", "install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip", "&& \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst =", "dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already", "logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs", "install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew install gnu-sed'", "if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst))", "not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst)) def", "= '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed", "darwin-amd64/helm && \\ chmod +x helm && \\ mv helm /usr/local/bin/ && \\", "already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin':", "{ 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm &&", "Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin", "\"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install", "\\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault'", "already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin':", "kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl", "Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz", "import os.path import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape CLI", "= { 'Darwin': 'brew install gnu-sed' } dst = '/usr/local/bin/gsed' if not os.path.isfile(dst):", "def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\", "tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x helm && \\ mv", "kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes", "logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = {", "os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform):", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins.", "\\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform],", "shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds", "dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already", "\"\"\" Installs prerequisites for the landscape CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform)", "&& \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/ && \\", "Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d", "shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires", "if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst))", "in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip", "rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True)", "tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins()", "installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl", "&& \\ chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst =", "CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform)", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds", "plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm plugin with command: {0}\".format(install_cmd)) sp.call(install_cmd,", "install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip", "-LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl && \\ mv kubectl /usr/local/bin/' }", "} dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform", "logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to", "if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in {0}\".format(dst))", "brew install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\")", "logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs", "os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform):", "shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds", "install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod", "'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper", "{0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to be installed\"\"\" plugins =", "{ 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl && \\ mv", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\"", "not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst)) def", "\"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d", "in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to be installed\"\"\" plugins", "https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl && \\ mv kubectl /usr/local/bin/' } dst", "install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew update && brew install lastpass-cli", "subprocess as sp import platform import os.path import logging def install_prerequisites(os_platform): \"\"\" Installs", "dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already", "installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to be installed\"\"\"", "landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else:", "not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst)) def", "install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1", "else: logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = {", "logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin':", "dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already", "\"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar", "the landscape CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform)", "\\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform],", "https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst", "Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x", "minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True)", "install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds", "tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz'", "install_cmds = { 'Darwin': 'brew update && brew install lastpass-cli --with-pinentry' } dst", "for plugin_url, version in plugins.items(): install_cmd = \"helm plugin install {0} --version={1}\".format( plugin_url,", "landscaper && \\ mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst =", "import subprocess as sp import platform import os.path import logging def install_prerequisites(os_platform): \"\"\"", "&& \\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\")", "= { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl && \\", "install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg':", "helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x helm && \\ mv helm /usr/local/bin/", "} dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm", "Requires helm to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url,", "'/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in", "helm && \\ mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst =", "as sp import platform import os.path import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites", "installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in plugins.items(): install_cmd", "else: logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds =", "} dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl", "\\ mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\")", "\\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform'", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\"", "-LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' }", "os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform):", "-d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not", "not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst)) def", "&& \\ chmod +x helm && \\ mv helm /usr/local/bin/ && \\ rm", "'/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in", "install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = {", "install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\", "\\ mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if", "unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if", "'/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"kubectl already installed in", "helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else:", "in plugins.items(): install_cmd = \"helm plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm", "/usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing", "'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64", "= '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed", "install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape CLI tool Returns: None \"\"\" install_gsed(os_platform)", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds", "'Darwin': 'brew update && brew install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if", "&& \\ mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm'", "'brew update && brew install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if not", "-LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' }", "/usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else:", "= '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed", "= { 'Darwin': 'brew update && brew install lastpass-cli --with-pinentry' } dst =", "chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if", "already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl", "def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew update && brew install", "&& \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\")", "\\ mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if", "landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst):", "plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in plugins.items(): install_cmd =", "install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew install", "in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO", "logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install", "os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform):", "minikube\"\"\" install_cmds = { 'Darwin': 'brew install gnu-sed' } dst = '/usr/local/bin/gsed' if", "= '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed", "logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs", "minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\"", "if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst))", "install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper", "def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\", "install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \\ chmod +x kubectl &&", "install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\"", "'/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in", "&& \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x helm &&", "else: logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds =", "LastPass\"\"\" install_cmds = { 'Darwin': 'brew update && brew install lastpass-cli --with-pinentry' }", "else: logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds =", "= { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip &&", "def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape CLI tool Returns: None \"\"\"", "helm to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version", "'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube'", "import platform import os.path import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the", "shell=True) else: logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds", "landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\"", "not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in {0}\".format(dst)) def", "installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew update", "install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform],", "helm plugins. Requires helm to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', }", "= '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed", "logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape CLI tool Returns: None", "{0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz", "-LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' }", "} dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube", "already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin':", "'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv", "{ 'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in plugins.items(): install_cmd = \"helm plugin", "kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True)", "shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds =", "\\ chmod +x kubectl && \\ mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl'", "Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform) install_helm(os_platform) install_landscaper(os_platform) install_terraform(os_platform) install_helm_plugins() def", "gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed already installed in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\"", "Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz", "else: logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds =", "install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \\ unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip", "{0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip &&", "install_helm_plugins() def install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew install gnu-sed' }", "if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst))", "/usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing kubectl\") sp.call(install_cmds[os_platform], shell=True) else:", "'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\", "import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape CLI tool Returns:", "helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm", "vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else:", "zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\ mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' }", "kubectl && \\ mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst):", "= { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip &&", "install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm plugin with command: {0}\".format(install_cmd)) sp.call(install_cmd, shell=True)", "= '/usr/local/bin/helm' if not os.path.isfile(dst): logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed", "installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl", "os.path import logging def install_prerequisites(os_platform): \"\"\" Installs prerequisites for the landscape CLI tool", "lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in {0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp", "\\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x helm && \\", "dst = '/usr/local/bin/landscaper' if not os.path.isfile(dst): logging.info(\"installing landscaper\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds", "} dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"gnu-sed", "= { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm", "'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm", "for the landscape CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform) install_kubectl(os_platform)", "def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz &&", "gnu-sed' } dst = '/usr/local/bin/gsed' if not os.path.isfile(dst): logging.info(\"installing gnu-sed\") sp.call(install_cmds[os_platform], shell=True) else:", "rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True)", "dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already", "Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf", "&& brew install lastpass-cli --with-pinentry' } dst = '/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing", "prerequisites for the landscape CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform) install_vault(os_platform)", "/usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not os.path.isfile(dst):", "if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst))", "terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not os.path.isfile(dst): logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else:", "in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = { 'Darwin': 'curl -LO", "minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64", "in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO", "shell=True) else: logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds", "logging.info(\"kubectl already installed in {0}\".format(dst)) def install_helm(os_platform): \"\"\"Installs Kubernetes Helm\"\"\" install_cmds = {", "plugins. Requires helm to be installed\"\"\" plugins = { 'https://github.com/technosophos/helm-gpg': '0.1.0', } for", "os.path.isfile(dst): logging.info(\"installing vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform):", "shell=True) else: logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds =", "logging.info(\"landscaper already installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin':", "&& \\ mv kubectl /usr/local/bin/' } dst = '/usr/local/bin/kubectl' if not os.path.isfile(dst): logging.info(\"installing", "mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper' if not", "def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip &&", "logging.info(\"installing minikube\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"minikube already installed in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install", "install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \\ chmod +x minikube-darwin-amd64 &&", "https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst", "'https://github.com/technosophos/helm-gpg': '0.1.0', } for plugin_url, version in plugins.items(): install_cmd = \"helm plugin install", "{0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl", "already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to be", "Installs prerequisites for the landscape CLI tool Returns: None \"\"\" install_gsed(os_platform) install_minikube(os_platform) install_lastpass(os_platform)", "{ 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \\", "&& \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing", "\\ chmod +x minikube-darwin-amd64 && \\ mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube'", "mv minikube-darwin-amd64 /usr/local/bin/minikube' } dst = '/usr/local/bin/minikube' if not os.path.isfile(dst): logging.info(\"installing minikube\") sp.call(install_cmds[os_platform],", "vault\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes", "{0}\".format(dst)) def install_vault(os_platform): \"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip", "in {0}\".format(dst)) def install_lastpass(os_platform): \"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew update &&", "-d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \\ rm terraform_0.10.7_darwin_amd64.zip' } dst = '/usr/local/bin/terraform' if not", "\"\"\"Install LastPass\"\"\" install_cmds = { 'Darwin': 'brew update && brew install lastpass-cli --with-pinentry'", "\"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar", "logging.info(\"installing terraform\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"terraform already installed in {0}\".format(dst)) def install_helm_plugins(): \"\"\"Install", "--strip-components=1 darwin-amd64/helm && \\ chmod +x helm && \\ mv helm /usr/local/bin/ &&", "&& \\ mv landscaper /usr/local/bin/ && \\ rm landscaper-1.0.11-darwin-amd64.tar.gz' } dst = '/usr/local/bin/landscaper'", "&& \\ chmod +x kubectl && \\ mv kubectl /usr/local/bin/' } dst =", "https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x helm", "install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\", "logging.info(\"installing helm\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs", "'/usr/local/bin/lpass' if not os.path.isfile(dst): logging.info(\"installing lastpass\") sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"lastpass already installed in", "Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip -d /usr/local/bin/", "-LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \\ tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \\ chmod +x", "install_gsed(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'brew install gnu-sed' } dst =", "logging.info(\"vault already installed in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = {", "mv helm /usr/local/bin/ && \\ rm helm-v2.7.2-darwin-amd64.tar.gz' } dst = '/usr/local/bin/helm' if not", "= \"helm plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm plugin with command:", "install_cmd = \"helm plugin install {0} --version={1}\".format( plugin_url, version) logging.info(\"installing helm plugin with", "def install_helm_plugins(): \"\"\"Install helm plugins. Requires helm to be installed\"\"\" plugins = {", "sp.call(install_cmds[os_platform], shell=True) else: logging.info(\"helm already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\"", "vault_0.8.3_darwin_amd64.zip && \\ rm vault_0.8.3_darwin_amd64.zip' } dst = '/usr/local/bin/vault' if not os.path.isfile(dst): logging.info(\"installing", "in {0}\".format(dst)) def install_minikube(os_platform): \"\"\"Install minikube\"\"\" install_cmds = { 'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64", "\"\"\"Installs Hashicorp Vault\"\"\" install_cmds = { 'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \\ unzip", "{ 'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \\ tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \\", "already installed in {0}\".format(dst)) def install_landscaper(os_platform): \"\"\"Installs Helm Landscaper\"\"\" install_cmds = { 'Darwin':", "installed in {0}\".format(dst)) def install_terraform(os_platform): \"\"\"Installs Terraform\"\"\" install_cmds = { 'Darwin': 'curl -LO", "in {0}\".format(dst)) def install_kubectl(os_platform): \"\"\"Installs Kubernetes kubectl\"\"\" install_cmds = { 'Darwin': 'curl -LO" ]
[ "print('Agora é sua vez de personalizar a contagem!') ini=int(input('Início: ')) fim=int(input('Fim: ')) pas=int(input('Passo:", "um programa que tenha uma função chamada contador(), que receba três parâmetros: início,", "<=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i while cont <=f: print(f'", "Seu programa tem que realizar três contagens através da função criada: a) de", "personalizada''' from time import sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f}", "contagens através da função criada: a) de 1 até 10, de 1 em", "{f} de {p} em {p}') sleep(2.5) if i < f: cont=i while cont", "print('-='*20) print(f'contagem de {i} até {f} de {p} em {p}') sleep(2.5) if i", "três contagens através da função criada: a) de 1 até 10, de 1", "1 até 10, de 1 em 1 b) de 10 até 0, de", "passo. Seu programa tem que realizar três contagens através da função criada: a)", "início, fim e passo. Seu programa tem que realizar três contagens através da", "1 b) de 10 até 0, de 2 em 2 c) uma contagem", "função chamada contador(), que receba três parâmetros: início, fim e passo. Seu programa", "realizar três contagens através da função criada: a) de 1 até 10, de", "while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2)", "parâmetros: início, fim e passo. Seu programa tem que realizar três contagens através", "print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez de personalizar a contagem!') ini=int(input('Início:", "em {p}') sleep(2.5) if i < f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True)", "print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua", "é sua vez de personalizar a contagem!') ini=int(input('Início: ')) fim=int(input('Fim: ')) pas=int(input('Passo: '))", "cont=i while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1)", "tem que realizar três contagens através da função criada: a) de 1 até", "contador(1,10,1) contador(10,0,2) print('Agora é sua vez de personalizar a contagem!') ini=int(input('Início: ')) fim=int(input('Fim:", "print('FIM!') else: cont=i while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!')", "até 0, de 2 em 2 c) uma contagem personalizada''' from time import", "',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i while cont <=f: print(f' {cont} ',end='',flush=True)", "de 1 até 10, de 1 em 1 b) de 10 até 0,", "e passo. Seu programa tem que realizar três contagens através da função criada:", "import sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f} de {p} em", "if i < f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p", "em 1 b) de 10 até 0, de 2 em 2 c) uma", "while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i while cont", "fim e passo. Seu programa tem que realizar três contagens através da função", "'''Faça um programa que tenha uma função chamada contador(), que receba três parâmetros:", "três parâmetros: início, fim e passo. Seu programa tem que realizar três contagens", "< f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else:", "+=p print('FIM!') else: cont=i while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p", "<=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é", "b) de 10 até 0, de 2 em 2 c) uma contagem personalizada'''", "sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f} de {p} em {p}')", "uma contagem personalizada''' from time import sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i}", "from time import sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f} de", "{p} em {p}') sleep(2.5) if i < f: cont=i while cont <=f: print(f'{cont}", "em 2 c) uma contagem personalizada''' from time import sleep def contador(i,f,p): print('-='*20)", "através da função criada: a) de 1 até 10, de 1 em 1", "2 em 2 c) uma contagem personalizada''' from time import sleep def contador(i,f,p):", "que realizar três contagens através da função criada: a) de 1 até 10,", "de {i} até {f} de {p} em {p}') sleep(2.5) if i < f:", "até {f} de {p} em {p}') sleep(2.5) if i < f: cont=i while", "contagem personalizada''' from time import sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i} até", "else: cont=i while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20)", "criada: a) de 1 até 10, de 1 em 1 b) de 10", "cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora", "cont +=p print('FIM!') else: cont=i while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5) cont", "10, de 1 em 1 b) de 10 até 0, de 2 em", "contador(10,0,2) print('Agora é sua vez de personalizar a contagem!') ini=int(input('Início: ')) fim=int(input('Fim: '))", "contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f} de {p} em {p}') sleep(2.5) if", "print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez de personalizar a contagem!') ini=int(input('Início: '))", "da função criada: a) de 1 até 10, de 1 em 1 b)", "uma função chamada contador(), que receba três parâmetros: início, fim e passo. Seu", "print(f'contagem de {i} até {f} de {p} em {p}') sleep(2.5) if i <", "cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i while cont <=f:", "1 em 1 b) de 10 até 0, de 2 em 2 c)", "{cont} ',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez", "cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i while", "programa que tenha uma função chamada contador(), que receba três parâmetros: início, fim", "chamada contador(), que receba três parâmetros: início, fim e passo. Seu programa tem", "print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i while cont <=f: print(f' {cont}", "sua vez de personalizar a contagem!') ini=int(input('Início: ')) fim=int(input('Fim: ')) pas=int(input('Passo: ')) contador(ini,fim,pas)", "sleep(0.5) cont +=p print('FIM!') else: cont=i while cont <=f: print(f' {cont} ',end='',flush=True) sleep(0.5)", "{p}') sleep(2.5) if i < f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5)", "f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!') else: cont=i", "contador(), que receba três parâmetros: início, fim e passo. Seu programa tem que", "receba três parâmetros: início, fim e passo. Seu programa tem que realizar três", "de 1 em 1 b) de 10 até 0, de 2 em 2", "que tenha uma função chamada contador(), que receba três parâmetros: início, fim e", "que receba três parâmetros: início, fim e passo. Seu programa tem que realizar", "de {p} em {p}') sleep(2.5) if i < f: cont=i while cont <=f:", "',end='',flush=True) sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez de", "<gh_stars>0 '''Faça um programa que tenha uma função chamada contador(), que receba três", "sleep(0.5) cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez de personalizar", "função criada: a) de 1 até 10, de 1 em 1 b) de", "programa tem que realizar três contagens através da função criada: a) de 1", "c) uma contagem personalizada''' from time import sleep def contador(i,f,p): print('-='*20) print(f'contagem de", "até 10, de 1 em 1 b) de 10 até 0, de 2", "10 até 0, de 2 em 2 c) uma contagem personalizada''' from time", "cont -=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez de personalizar a", "0, de 2 em 2 c) uma contagem personalizada''' from time import sleep", "de 2 em 2 c) uma contagem personalizada''' from time import sleep def", "2 c) uma contagem personalizada''' from time import sleep def contador(i,f,p): print('-='*20) print(f'contagem", "tenha uma função chamada contador(), que receba três parâmetros: início, fim e passo.", "{i} até {f} de {p} em {p}') sleep(2.5) if i < f: cont=i", "i < f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont +=p print('FIM!')", "time import sleep def contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f} de {p}", "def contador(i,f,p): print('-='*20) print(f'contagem de {i} até {f} de {p} em {p}') sleep(2.5)", "de 10 até 0, de 2 em 2 c) uma contagem personalizada''' from", "a) de 1 até 10, de 1 em 1 b) de 10 até", "sleep(2.5) if i < f: cont=i while cont <=f: print(f'{cont} ',end='',flush=True) sleep(0.5) cont", "-=p print('FIM!') print('-='*20) contador(1,10,1) contador(10,0,2) print('Agora é sua vez de personalizar a contagem!')" ]
[ "= False self.refresh_at_next = False self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self, *args,", "!= file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) #", "_('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime", "Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime Capture", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb", "\"__main__\": import sys import wx application = wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview',", "if __name__ == \"__main__\": import sys import wx application = wx.App() frame =", "def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() # wx event def on_preview_click(self, event):", "update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def", "orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def on_show_preview(self, context): img", "'screen': _('Realtime Capture from desktop'), 'file': _('Read from pre-recorded video file (for testing)'),", "wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer", "- 30), (ox - 20, oy + 30), (ox + 20, oy)]) else:", "class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x, y,", "context['engine']['frame']) if img is None: return False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size)", "self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video", "event): # Propagate the event to the upper level. wx.PostEvent(self, event) source_message =", "Capture from desktop'), 'file': _('Read from pre-recorded video file (for testing)'), } def", "OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel): def __init__(self,", "self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args,", "specific language governing permissions and # limitations under the License. # import copy", "def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event def", "None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height = self.preview_size frame_rgb", "# Sizer to set the width of the text box to 640. self.video_input_sizer", "self.Layout() def draw_preview(self): frame_rgb = None try: self.lock.acquire() if self.latest_frame is None: if", "wx event def on_text_input(self, event): self.update_button_label() # wx event def on_button_click(self, event): file_path", "this file except in compliance with the License. # You may obtain a", "return True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This", "level. wx.PostEvent(self, event) source_message = { 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video", "# Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__", "= Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def", "self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100)", "= wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path =", "self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input'))", "<NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "False self._pause = False self.refresh_at_next = False self.latest_frame = None self.lock = threading.Lock()", "ANY KIND, either express or implied. # See the License for the specific", "open or not. self.prev_file_path = '' # Textbox for input file self.text_ctrl =", "wx event def on_leave_preview(self, event): self._enter = False self.draw_preview() # wx event def", "Video input (OpenCV driver)'), 'screen': _('Realtime Capture from desktop'), 'file': _('Read from pre-recorded", "====== # Copyright (C) 2015 <NAME> # # Licensed under the Apache License,", "(ox - 20, oy + 30), (ox + 20, oy)]) else: # Draw", "frame_rgb is None: return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel)", "_('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added)", "True finally: self.lock.release() # wx event def on_input_initialized(self, event): self.show_header(event.source) # wx event", "= wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path", "obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame'])", "FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return", "# import copy import os.path import threading import wx import cv2 from ikalog.utils", "Propagate the event to the upper level. wx.PostEvent(self, event) source_message = { 'amarec':", "wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys import", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime Capture from desktop'), 'file': _('Read", "== 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None try:", "Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self,", "class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state #", "else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event): self.update_button_label() # wx event def", "_('Select a video file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path)", "to the upper level. wx.PostEvent(self, event) source_message = { 'amarec': _('Capture through AmarecTV'),", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "is used to determine if a file dialog is open or not. self.prev_file_path", "= InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10)", "- 20, oy - 30), (ox - 20, oy + 30), (ox +", "+ 20, oy)]) else: # Draw two rectangles representing 'pause'. dc.DrawRectangle(ox - 20,", "self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release() # wx event def", "20, oy - 30), (ox - 20, oy + 30), (ox + 20,", "OF ANY KIND, either express or implied. # See the License for the", "import copy import os.path import threading import wx import cv2 from ikalog.utils import", "border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel)", "import threading import wx import cv2 from ikalog.utils import Localization from ikalog.ui.events import", "self.refresh_at_next = False self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer", "/ 2) oy = int(height / 2) if self._pause: # Draw a triangle", "int(width / 2) oy = int(height / 2) if self._pause: # Draw a", "wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) #", "# Copyright (C) 2015 <NAME> # # Licensed under the Apache License, Version", "None: self.lock.release() return self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def", "self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer)", "(ox + 20, oy)]) else: # Draw two rectangles representing 'pause'. dc.DrawRectangle(ox -", "return False width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if", "dc.DrawRectangle(ox - 20, oy - 30, 15, 60) dc.DrawRectangle(ox + 10, oy -", "self.refresh_at_next = False def __init__(self, *args, **kwargs): self._prev_bmp = None self._enter = False", "= int(height / 2) if self._pause: # Draw a triangle representing 'play'. dc.DrawPolygon([(ox", "if self.latest_frame is None: self.lock.release() return self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next", "= self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event):", "evt) # wx event def on_enter_preview(self, event): self._enter = True self.draw_preview() # wx", "threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized)", "frame_rgb = None try: self.lock.acquire() if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0,", "None try: self.lock.acquire() if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return", "AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'),", "= wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size", "file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from", "upper level. wx.PostEvent(self, event) source_message = { 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI", "return os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path):", "# Preview self.preview_size = (640, 360) # Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY,", "context): img = context['engine'].get('preview', context['engine']['frame']) if img is None: return False try: self.lock.acquire()", "= '' # Textbox for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT,", "(640, 360) # Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW,", "border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the width of the text box", "Draw two rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy - 30, 15, 60)", "self.draw_preview() self.refresh_at_next = False def __init__(self, *args, **kwargs): self._prev_bmp = None self._enter =", "_ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if self._enter:", "size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360)) layout = wx.BoxSizer(wx.VERTICAL) layout.Add(preview) frame.SetSizer(layout) frame.Show()", "wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1)", "FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x, y, filenames):", "self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the width of", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= False self._pause = False self.refresh_at_next = False self.latest_frame = None self.lock =", "evt) self.prev_file_path = file_path self.update_button_label() return # file_path is invalid. Open a file", "= file_path self.update_button_label() return # file_path is invalid. Open a file dialog. file_dialog", "event def on_enter_preview(self, event): self._enter = True self.draw_preview() # wx event def on_leave_preview(self,", "# limitations under the License. # import copy import os.path import threading import", "self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640,", "# Textbox for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None:", "/ 2) if self._pause: # Draw a triangle representing 'play'. dc.DrawPolygon([(ox - 20,", "self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target =", "drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "event): self._enter = False self.draw_preview() # wx event def on_input_file_added(self, event): # Propagate", "draw_preview(self): frame_rgb = None try: self.lock.acquire() if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp,", "60) # wx event def OnTimer(self, event): self.lock.acquire() if self.latest_frame is None: self.lock.release()", "through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV", "# wx event def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt =", "if self._pause: # Draw a triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy -", "self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON,", "= wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer", "oy - 30, 15, 60) # wx event def OnTimer(self, event): self.lock.acquire() if", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# wx event def OnTimer(self, event): self.lock.acquire() if self.latest_frame is None: self.lock.release() return", "should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self): file_path = self.text_ctrl.GetValue()", "'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video", "wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer", "image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) #", "permissions and # limitations under the License. # import copy import os.path import", "frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None: return False bmp", "ox = int(width / 2) oy = int(height / 2) if self._pause: #", "'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen':", "required by applicable law or agreed to in writing, software # distributed under", "file_path self.update_button_label() return # file_path is invalid. Open a file dialog. file_dialog =", "'pause'. dc.DrawRectangle(ox - 20, oy - 30, 15, 60) dc.DrawRectangle(ox + 10, oy", "None: return False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True finally:", "*args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is used to determine if a", "applicable law or agreed to in writing, software # distributed under the License", "wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x,", "Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0])", "self.prev_file_path = file_path self.update_button_label() return # file_path is invalid. Open a file dialog.", "self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event): self.update_button_label()", "self.refresh_at_next = True finally: self.lock.release() # wx event def on_input_initialized(self, event): self.show_header(event.source) #", "wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360)) layout = wx.BoxSizer(wx.VERTICAL)", "else: # Draw two rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy - 30,", "or agreed to in writing, software # distributed under the License is distributed", "def on_text_input(self, event): self.update_button_label() # wx event def on_button_click(self, event): file_path = self.text_ctrl.GetValue()", "os.path import threading import wx import cv2 from ikalog.utils import Localization from ikalog.ui.events", "determine if a file dialog is open or not. self.prev_file_path = '' #", "# Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self,", "return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0)", "False self.refresh_at_next = False self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs)", "self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) #", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "event.pause self.draw_preview() # wx event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self,", "box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640,", "False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp", "input (OpenCV driver)'), 'screen': _('Realtime Capture from desktop'), 'file': _('Read from pre-recorded video", "= self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None: return", "if a file dialog is open or not. self.prev_file_path = '' # Textbox", "source_message = { 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'),", "used to determine if a file dialog is open or not. self.prev_file_path =", "on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path", "event def on_input_file_added(self, event): # Propagate the event to the upper level. wx.PostEvent(self,", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "__init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y,", "writing, software # distributed under the License is distributed on an \"AS IS\"", "False self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self)", "_('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input", "try: self.lock.acquire() if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def __init__(self, *args, **kwargs): self._prev_bmp =", "self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys import wx application = wx.App() frame", "License. # You may obtain a copy of the License at # #", "file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames):", "is invalid. Open a file dialog. file_dialog = wx.FileDialog(self, _('Select a video file'))", "Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview)", "0, 0) self._prev_bmp = bmp if self._enter: ox = int(width / 2) oy", "dc.DrawPolygon([(ox - 20, oy - 30), (ox - 20, oy + 30), (ox", "self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer", "* _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer =", "OnTimer(self, event): self.lock.acquire() if self.latest_frame is None: self.lock.release() return self.lock.release() if not self.refresh_at_next:", "# wx event def on_text_input(self, event): self.update_button_label() # wx event def on_button_click(self, event):", "self._enter: ox = int(width / 2) oy = int(height / 2) if self._pause:", "evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return # file_path is", "y, filenames) return True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs)", "oy - 30, 15, 60) dc.DrawRectangle(ox + 10, oy - 30, 15, 60)", "compliance with the License. # You may obtain a copy of the License", "'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None try: self.lock.acquire()", "top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and", "oy - 30), (ox - 20, oy + 30), (ox + 20, oy)])", "5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if", "input (DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime Capture from", "python3 # -*- coding: utf-8 -*- # # IkaLog # ====== # Copyright", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if", "def on_leave_preview(self, event): self._enter = False self.draw_preview() # wx event def on_input_file_added(self, event):", "Preview self.preview_size = (640, 360) # Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size)", "event) source_message = { 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow,", "size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText(", "self.observer = observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames) return True", "15, 60) # wx event def OnTimer(self, event): self.lock.acquire() if self.latest_frame is None:", "filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable)", "width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is", "+ 30), (ox + 20, oy)]) else: # Draw two rectangles representing 'pause'.", "ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self)", "ikalog.utils import Localization from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget):", "# ====== # Copyright (C) 2015 <NAME> # # Licensed under the Apache", "wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames) return", "= True self.draw_preview() # wx event def on_leave_preview(self, event): self._enter = False self.draw_preview()", "# Propagate the event to the upper level. wx.PostEvent(self, event) source_message = {", "self.preview_size) self.refresh_at_next = True finally: self.lock.release() # wx event def on_input_initialized(self, event): self.show_header(event.source)", "# wx event def on_input_initialized(self, event): self.show_header(event.source) # wx event def on_ikalog_pause(self, event):", "| wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL,", "not use this file except in compliance with the License. # You may", "a triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy - 30), (ox - 20,", "sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\":", "from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class", "wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return # file_path is invalid. Open a", "License, Version 2.0 (the \"License\"); # you may not use this file except", "= cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None: return False bmp =", "30), (ox - 20, oy + 30), (ox + 20, oy)]) else: #", "IkaLog event def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if img is None:", "self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text =", "self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640, 360) # Preview image.", "# file_path is invalid. Open a file dialog. file_dialog = wx.FileDialog(self, _('Select a", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "on_input_file_added(self, event): # Propagate the event to the upper level. wx.PostEvent(self, event) source_message", "input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY,", "wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag", "if file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles", "wx event def OnTimer(self, event): self.lock.acquire() if self.latest_frame is None: self.lock.release() return self.lock.release()", "filenames) return True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) #", "wx.Panel.__init__(self, *args, **kwargs) # This is used to determine if a file dialog", "wx event def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() # wx event def", "= wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp", "os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open'))", "# you may not use this file except in compliance with the License.", "self.lock.release() # wx event def on_input_initialized(self, event): self.show_header(event.source) # wx event def on_ikalog_pause(self,", "agreed to in writing, software # distributed under the License is distributed on", "import sys import wx application = wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640,", "the upper level. wx.PostEvent(self, event) source_message = { 'amarec': _('Capture through AmarecTV'), 'dshow_capture':", "event): self.update_button_label() # wx event def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path):", "False width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb", "source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def", "import cv2 from ikalog.utils import Localization from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI',", "# wx event def on_leave_preview(self, event): self._enter = False self.draw_preview() # wx event", "*args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause)", "(the \"License\"); # you may not use this file except in compliance with", "file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path", "of the text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND |", "language governing permissions and # limitations under the License. # import copy import", "dialog is open or not. self.prev_file_path = '' # Textbox for input file", "'') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and", "-*- coding: utf-8 -*- # # IkaLog # ====== # Copyright (C) 2015", "sys import wx application = wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360))", "# Unless required by applicable law or agreed to in writing, software #", "if frame_rgb is None: return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc =", "frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360)) layout", "by applicable law or agreed to in writing, software # distributed under the", "= None self._enter = False self._pause = False self.refresh_at_next = False self.latest_frame =", "observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames)", "= (640, 360) # Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event def on_enter_preview(self, event): self._enter", "on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event def on_enter_preview(self,", "'file': _('Read from pre-recorded video file (for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel(", "self.lock.release() return self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def __init__(self,", "self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release() # wx event", "self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the width of the", "and self.prev_file_path != file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else:", "x, y, filenames): self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel): def __init__(self, *args,", "wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target)", "file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def", "file except in compliance with the License. # You may obtain a copy", "self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY)", "self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel", "def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event", "event): self._enter = True self.draw_preview() # wx event def on_leave_preview(self, event): self._enter =", "License for the specific language governing permissions and # limitations under the License.", "= False self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer =", "wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add(", "'play'. dc.DrawPolygon([(ox - 20, oy - 30), (ox - 20, oy + 30),", "event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event def on_enter_preview(self, event):", "dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if self._enter: ox = int(width / 2)", "to in writing, software # distributed under the License is distributed on an", "desktop'), 'file': _('Read from pre-recorded video file (for testing)'), } def show_header(self, source):", "implied. # See the License for the specific language governing permissions and #", "top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self):", "wx event def on_input_initialized(self, event): self.show_header(event.source) # wx event def on_ikalog_pause(self, event): self._pause", "wx.PostEvent(self, evt) # wx event def on_enter_preview(self, event): self._enter = True self.draw_preview() #", "\"License\"); # you may not use this file except in compliance with the", "file dialog. file_dialog = wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal() != wx.ID_OK:", "= None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER,", "event def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if img is None: return", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "2) oy = int(height / 2) if self._pause: # Draw a triangle representing", "flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the width of the text", "= { 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture':", "self._prev_bmp = bmp if self._enter: ox = int(width / 2) oy = int(height", "20, oy + 30), (ox + 20, oy)]) else: # Draw two rectangles", "if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height", "= wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360)) layout =", "IkaLog # ====== # Copyright (C) 2015 <NAME> # # Licensed under the", "filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state", "30, 15, 60) dc.DrawRectangle(ox + 10, oy - 30, 15, 60) # wx", "20, oy - 30, 15, 60) dc.DrawRectangle(ox + 10, oy - 30, 15,", "# IkaLog event def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if img is", "wx event def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path)", "if img is None: return False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next", "to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5))", "= wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640,", "from pre-recorded video file (for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, ''))", "file (for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file'))", "or implied. # See the License for the specific language governing permissions and", "wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL)", "on_input_initialized(self, event): self.show_header(event.source) # wx event def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview()", "<gh_stars>100-1000 #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # IkaLog # ======", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() # wx event def on_preview_click(self, event): evt", "= int(width / 2) oy = int(height / 2) if self._pause: # Draw", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "_('Read from pre-recorded video file (for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source,", "def on_input_file_added(self, event): # Propagate the event to the upper level. wx.PostEvent(self, event)", "show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None try: self.lock.acquire() if self.latest_frame is", "dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if self._enter: ox =", "oy = int(height / 2) if self._pause: # Draw a triangle representing 'play'.", "return self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def __init__(self, *args,", "60) dc.DrawRectangle(ox + 10, oy - 30, 15, 60) # wx event def", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "event def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self,", "def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is used to determine", "self._prev_bmp = None self._enter = False self._pause = False self.refresh_at_next = False self.latest_frame", "in writing, software # distributed under the License is distributed on an \"AS", "governing permissions and # limitations under the License. # import copy import os.path", "event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path =", "SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def", "recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime Capture from desktop'), 'file':", "application = wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame,", "10, oy - 30, 15, 60) # wx event def OnTimer(self, event): self.lock.acquire()", "enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def on_show_preview(self, context):", "file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self,", "return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y,", "int(height / 2) if self._pause: # Draw a triangle representing 'play'. dc.DrawPolygon([(ox -", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text", "wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360))", "self.update_button_label() return # file_path is invalid. Open a file dialog. file_dialog = wx.FileDialog(self,", "show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None try: self.lock.acquire() if self.latest_frame", "self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame,", "self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640, 360) # Preview image. self.preview_panel", "360) # Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview)", "self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the width of the text box to", "True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is", "PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog", "video file (for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source ==", "pre-recorded video file (for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source", "= IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event def on_enter_preview(self, event): self._enter =", "0) self._prev_bmp = bmp if self._enter: ox = int(width / 2) oy =", "= None try: self.lock.acquire() if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0)", "wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath()", "Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '')", "self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640, 360)", "self._pause: # Draw a triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy - 30),", "filenames): self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self,", "cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None: return False bmp = wx.BitmapFromBuffer(width, height,", "flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer,", "= FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path):", "event def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() # wx event def on_preview_click(self,", "self._pause)) wx.PostEvent(self, evt) # wx event def on_enter_preview(self, event): self._enter = True self.draw_preview()", "def draw_preview(self): frame_rgb = None try: self.lock.acquire() if self.latest_frame is None: if self._prev_bmp:", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "on_enter_preview(self, event): self._enter = True self.draw_preview() # wx event def on_leave_preview(self, event): self._enter", "event def on_leave_preview(self, event): self._enter = False self.draw_preview() # wx event def on_input_file_added(self,", "_('Realtime Capture from desktop'), 'file': _('Read from pre-recorded video file (for testing)'), }", "the License. # import copy import os.path import threading import wx import cv2", "testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self,", "= file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if", "20, oy)]) else: # Draw two rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy", "event to the upper level. wx.PostEvent(self, event) source_message = { 'amarec': _('Capture through", "self.draw_preview() # wx event def on_leave_preview(self, event): self._enter = False self.draw_preview() # wx", "self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video", "= wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click)", "bmp if self._enter: ox = int(width / 2) oy = int(height / 2)", "def OnTimer(self, event): self.lock.acquire() if self.latest_frame is None: self.lock.release() return self.lock.release() if not", "triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy - 30), (ox - 20, oy", "video file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback", "cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None: return False bmp = wx.BitmapFromBuffer(width,", "None self.lock = threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer,", "Sizer to set the width of the text box to 640. self.video_input_sizer =", "event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event", "use this file except in compliance with the License. # You may obtain", "a file dialog. file_dialog = wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal() !=", "the event to the upper level. wx.PostEvent(self, event) source_message = { 'amarec': _('Capture", "wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size =", "== \"__main__\": import sys import wx application = wx.App() frame = wx.Frame(None, wx.ID_ANY,", "img = context['engine'].get('preview', context['engine']['frame']) if img is None: return False try: self.lock.acquire() self.latest_frame", "if self._enter: ox = int(width / 2) oy = int(height / 2) if", "set the width of the text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text)", "finally: self.lock.release() # wx event def on_input_initialized(self, event): self.show_header(event.source) # wx event def", "wx event def on_enter_preview(self, event): self._enter = True self.draw_preview() # wx event def", "self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def __init__(self, *args, **kwargs): self._prev_bmp = None", "finally: self.lock.release() if frame_rgb is None: return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb)", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "# Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if not filenames: return", "# Draw a triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy - 30), (ox", "**kwargs): self._prev_bmp = None self._enter = False self._pause = False self.refresh_at_next = False", "self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys import wx application = wx.App()", "wx application = wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview =", "False def __init__(self, *args, **kwargs): self._prev_bmp = None self._enter = False self._pause =", "event def on_input_initialized(self, event): self.show_header(event.source) # wx event def on_ikalog_pause(self, event): self._pause =", "proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path def", "self.show_header(event.source) # wx event def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() # wx", "self.on_ikalog_pause) # Preview self.preview_size = (640, 360) # Preview image. self.preview_panel = wx.Panel(self,", "# wx event def on_enter_preview(self, event): self._enter = True self.draw_preview() # wx event", "utf-8 -*- # # IkaLog # ====== # Copyright (C) 2015 <NAME> #", "file_path is invalid. Open a file dialog. file_dialog = wx.FileDialog(self, _('Select a video", "'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360)) layout = wx.BoxSizer(wx.VERTICAL) layout.Add(preview) frame.SetSizer(layout)", "2.0 (the \"License\"); # you may not use this file except in compliance", "coding: utf-8 -*- # # IkaLog # ====== # Copyright (C) 2015 <NAME>", "self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview", "the width of the text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer,", "for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self,", "import os.path import threading import wx import cv2 from ikalog.utils import Localization from", "for the specific language governing permissions and # limitations under the License. #", "(DirectShow, recommended)'), 'opencv_capture': _('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime Capture from desktop'),", "InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return # file_path is invalid. Open", "context['engine'].get('preview', context['engine']['frame']) if img is None: return False try: self.lock.acquire() self.latest_frame = cv2.resize(img,", "wx.ID_ANY, 'Preview', size=(640, 360)) preview = PreviewPanel(frame, size=(640, 360)) layout = wx.BoxSizer(wx.VERTICAL) layout.Add(preview)", "# wx event def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() # wx event", "is None: self.lock.release() return self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False", "# wx event def on_input_file_added(self, event): # Propagate the event to the upper", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event): self.update_button_label() # wx event def on_button_click(self,", "proportion=1) # Sizer to set the width of the text box to 640.", "- 30, 15, 60) # wx event def OnTimer(self, event): self.lock.acquire() if self.latest_frame", "None: return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0,", "Draw a triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy - 30), (ox -", "# -*- coding: utf-8 -*- # # IkaLog # ====== # Copyright (C)", "file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if not", "# # Unless required by applicable law or agreed to in writing, software", "flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys import wx application", "is None: return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp,", "return False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release()", "express or implied. # See the License for the specific language governing permissions", "wx event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx", "img is None: return False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next =", "30), (ox + 20, oy)]) else: # Draw two rectangles representing 'pause'. dc.DrawRectangle(ox", "# This is used to determine if a file dialog is open or", "on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if img is None: return False try:", "!= wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self,", "True self.draw_preview() # wx event def on_leave_preview(self, event): self._enter = False self.draw_preview() #", "either express or implied. # See the License for the specific language governing", "- 30, 15, 60) dc.DrawRectangle(ox + 10, oy - 30, 15, 60) #", "self._pause = False self.refresh_at_next = False self.latest_frame = None self.lock = threading.Lock() wx.Panel.__init__(self,", "invalid. Open a file dialog. file_dialog = wx.FileDialog(self, _('Select a video file')) if", "False self.draw_preview() # wx event def on_input_file_added(self, event): # Propagate the event to", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "on_leave_preview(self, event): self._enter = False self.draw_preview() # wx event def on_input_file_added(self, event): #", "to set the width of the text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL)", "class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is used", "file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse'))", "self.draw_preview() # wx event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt)", "self._enter = False self.draw_preview() # wx event def on_input_file_added(self, event): # Propagate the", "if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state =", "rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy - 30, 15, 60) dc.DrawRectangle(ox +", "the License. # You may obtain a copy of the License at #", "= bmp if self._enter: ox = int(width / 2) oy = int(height /", "self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer", "import wx application = wx.App() frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360)) preview", "# IkaLog # ====== # Copyright (C) 2015 <NAME> # # Licensed under", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "Textbox for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button =", "observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel):", "(C) 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the", "self.prev_file_path = '' # Textbox for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '')", "self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText( self,", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # IkaLog # ====== #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "self.draw_preview() # wx event def on_input_file_added(self, event): # Propagate the event to the", "InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is used to", "= wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel =", "= InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return # file_path is invalid.", "threading import wx import cv2 from ikalog.utils import Localization from ikalog.ui.events import *", "oy + 30), (ox + 20, oy)]) else: # Draw two rectangles representing", "import wx import cv2 from ikalog.utils import Localization from ikalog.ui.events import * _", "'') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text,", "wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text =", "representing 'pause'. dc.DrawRectangle(ox - 20, oy - 30, 15, 60) dc.DrawRectangle(ox + 10,", "event): self.lock.acquire() if self.latest_frame is None: self.lock.release() return self.lock.release() if not self.refresh_at_next: return", "file dialog is open or not. self.prev_file_path = '' # Textbox for input", "bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp =", "def on_input_initialized(self, event): self.show_header(event.source) # wx event def on_ikalog_pause(self, event): self._pause = event.pause", "self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to", "Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ ==", "oy)]) else: # Draw two rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy -", "self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def __init__(self, *args, **kwargs):", "None self._enter = False self._pause = False self.refresh_at_next = False self.latest_frame = None", "wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE,", "= wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target = FileDropTarget(self)", "self.text_ctrl.SetValue(file_path) # Callback from wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if not filenames:", "wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED,", "self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer)", "with the License. # You may obtain a copy of the License at", "two rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy - 30, 15, 60) dc.DrawRectangle(ox", "return orig_state # IkaLog event def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if", "*args, **kwargs) # This is used to determine if a file dialog is", "self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop", "wx.FileDropTarget.OnDropFiles def on_drop_files(self, x, y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel):", "dialog. file_dialog = wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal() != wx.ID_OK: return", "on_text_input(self, event): self.update_button_label() # wx event def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "} def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self, show):", "def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x,", "self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return", "self.latest_frame is None: self.lock.release() return self.lock.release() if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next =", "- 20, oy - 30, 15, 60) dc.DrawRectangle(ox + 10, oy - 30,", "License. # import copy import os.path import threading import wx import cv2 from", "# Draw two rectangles representing 'pause'. dc.DrawRectangle(ox - 20, oy - 30, 15,", "+ 10, oy - 30, 15, 60) # wx event def OnTimer(self, event):", "This is used to determine if a file dialog is open or not.", "= obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def on_show_preview(self, context): img =", "return self.draw_preview() self.refresh_at_next = False def __init__(self, *args, **kwargs): self._prev_bmp = None self._enter", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "on_drop_files(self, x, y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self,", "30, 15, 60) # wx event def OnTimer(self, event): self.lock.acquire() if self.latest_frame is", "def on_drop_files(self, x, y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def", "self.lock.release() if frame_rgb is None: return False bmp = wx.BitmapFromBuffer(width, height, frame_rgb) dc", "self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640, 360) # Preview", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "under the License. # import copy import os.path import threading import wx import", "self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event): self.update_button_label() # wx", "def on_enter_preview(self, event): self._enter = True self.draw_preview() # wx event def on_leave_preview(self, event):", "obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def on_show_preview(self,", "event): self._pause = event.pause self.draw_preview() # wx event def on_preview_click(self, event): evt =", "15, 60) dc.DrawRectangle(ox + 10, oy - 30, 15, 60) # wx event", "Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY,", "or not. self.prev_file_path = '' # Textbox for input file self.text_ctrl = wx.TextCtrl(self,", "self._pause = event.pause self.draw_preview() # wx event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not", "-*- # # IkaLog # ====== # Copyright (C) 2015 <NAME> # #", "self._enter = False self._pause = False self.refresh_at_next = False self.latest_frame = None self.lock", "width of the text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND", "# Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW,", "text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5)", "= wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the", "event def on_text_input(self, event): self.update_button_label() # wx event def on_button_click(self, event): file_path =", "self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text = wx.StaticText( self, wx.ID_ANY,", "'' # Textbox for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '') self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input)", "y, filenames): self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel): def __init__(self, *args, **kwargs):", "= cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release() # wx event def on_input_initialized(self,", "*args, **kwargs): self._prev_bmp = None self._enter = False self._pause = False self.refresh_at_next =", "= observer def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames) return True class", "border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys import wx application =", "in compliance with the License. # You may obtain a copy of the", "from desktop'), 'file': _('Read from pre-recorded video file (for testing)'), } def show_header(self,", "frame_rgb) dc = wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if self._enter: ox", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event): self.update_button_label() #", "self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self): file_path", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "a video file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path = file_dialog.GetPath() self.text_ctrl.SetValue(file_path) #", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "= True finally: self.lock.release() # wx event def on_input_initialized(self, event): self.show_header(event.source) # wx", "wx.PostEvent(self, event) source_message = { 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input", "= wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer =", "2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the \"License\");", "Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False)", "not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled()", "Localization from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self,", "self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set", "See the License for the specific language governing permissions and # limitations under", "cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release() # wx event def on_input_initialized(self, event):", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "self.input_file_panel = InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT,", "is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height = self.preview_size", "import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer", "wx.StaticText( self, wx.ID_ANY, _('Video Input')) self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '') self.input_file_panel = InputFilePanel(self,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "cv2 from ikalog.utils import Localization from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext", "y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable):", "a file dialog is open or not. self.prev_file_path = '' # Textbox for", "0) return False width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release()", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "__init__(self, *args, **kwargs): self._prev_bmp = None self._enter = False self._pause = False self.refresh_at_next", "(for testing)'), } def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def", "wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel, proportion=1) # Sizer to set the width", "self._enter = True self.draw_preview() # wx event def on_leave_preview(self, event): self._enter = False", "360)) preview = PreviewPanel(frame, size=(640, 360)) layout = wx.BoxSizer(wx.VERTICAL) layout.Add(preview) frame.SetSizer(layout) frame.Show() application.MainLoop()", "dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB)", "wx event def on_input_file_added(self, event): # Propagate the event to the upper level.", "if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return #", "- 20, oy + 30), (ox + 20, oy)]) else: # Draw two", "self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys import wx", "= wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if self._enter: ox = int(width", "0, 0) return False width, height = self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally:", "self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height =", "self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path)", "_('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer =", "the specific language governing permissions and # limitations under the License. # import", "= self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label()", "Open a file dialog. file_dialog = wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal()", "def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt)", "is open or not. self.prev_file_path = '' # Textbox for input file self.text_ctrl", "wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path !=", "wx import cv2 from ikalog.utils import Localization from ikalog.ui.events import * _ =", "try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release() # wx", "= threading.Lock() wx.Panel.__init__(self, *args, **kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED,", "from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer):", "limitations under the License. # import copy import os.path import threading import wx", "640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) #", "def show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show)", "top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path", "= context['engine'].get('preview', context['engine']['frame']) if img is None: return False try: self.lock.acquire() self.latest_frame =", "self.preview_size = (640, 360) # Preview image. self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP,", "to determine if a file dialog is open or not. self.prev_file_path = ''", "**kwargs) # This is used to determine if a file dialog is open", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None try: self.lock.acquire() if self.latest_frame is None:", "obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event def on_show_preview(self, context): img = context['engine'].get('preview',", "self.update_button_label() # wx event def on_button_click(self, event): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): evt", "def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if img is None: return False", "= wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input", "self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top", "from ikalog.utils import Localization from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class", "self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state", "= wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import sys", "IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) # wx event def on_enter_preview(self, event): self._enter = True", "is None: return False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True", "self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self):", "self.on_text_input) self.button = wx.Button(self, wx.ID_ANY, _('Browse')) self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target", "the text box to 640. self.video_input_sizer = wx.BoxSizer(wx.VERTICAL) self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL,", "self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL)", "def OnDropFiles(self, x, y, filenames): self.observer.on_drop_files(x, y, filenames) return True class InputFilePanel(wx.Panel): def", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "Copyright (C) 2015 <NAME> # # Licensed under the Apache License, Version 2.0", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "wx.ClientDC(self.preview_panel) dc.DrawBitmap(bmp, 0, 0) self._prev_bmp = bmp if self._enter: ox = int(width /", "**kwargs) self.timer = wx.Timer(self) self.timer.Start(100) self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) #", "self.on_button_click) # Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl,", "fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def __init__(self, observer): wx.FileDropTarget.__init__(self) self.observer = observer def OnDropFiles(self, x,", "file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if", "{ 'amarec': _('Capture through AmarecTV'), 'dshow_capture': _('HDMI Video input (DirectShow, recommended)'), 'opencv_capture': _('HDMI", "and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer)", "import Localization from ikalog.ui.events import * _ = Localization.gettext_translation('IkaUI', fallback=True).gettext class FileDropTarget(wx.FileDropTarget): def", "__init__(self, *args, **kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is used to determine if", "def should_open_file(self, file_path): return os.path.isfile(file_path) and self.prev_file_path != file_path def update_button_label(self): file_path =", "x, y, filenames): if not filenames: return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj,", "# wx event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause)) wx.PostEvent(self, evt) #", "False try: self.lock.acquire() self.latest_frame = cv2.resize(img, self.preview_size) self.refresh_at_next = True finally: self.lock.release() #", "def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None try: self.lock.acquire() if", "(OpenCV driver)'), 'screen': _('Realtime Capture from desktop'), 'file': _('Read from pre-recorded video file", "self.should_open_file(file_path): evt = InputFileAddedEvent(input_file=file_path) wx.PostEvent(self, evt) self.prev_file_path = file_path self.update_button_label() return # file_path", "return self.text_ctrl.SetValue(filenames[0]) class PreviewPanel(wx.Panel): def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return", "'')) self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb =", "self.button.Bind(wx.EVT_BUTTON, self.on_button_click) # Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL)", "copy import os.path import threading import wx import cv2 from ikalog.utils import Localization", "return # file_path is invalid. Open a file dialog. file_dialog = wx.FileDialog(self, _('Select", "wx.Panel(self, wx.ID_ANY, size=self.preview_size) self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click) self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview) self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview) # Video Input self.video_input_title_text", "driver)'), 'screen': _('Realtime Capture from desktop'), 'file': _('Read from pre-recorded video file (for", "if not self.refresh_at_next: return self.draw_preview() self.refresh_at_next = False def __init__(self, *args, **kwargs): self._prev_bmp", "def SetEventHandlerEnable(self, obj, enable): orig_state = obj.GetEvtHandlerEnabled() obj.SetEvtHandlerEnabled(enable) return orig_state # IkaLog event", "def __init__(self, *args, **kwargs): self._prev_bmp = None self._enter = False self._pause = False", "self.lock.acquire() if self.latest_frame is None: self.lock.release() return self.lock.release() if not self.refresh_at_next: return self.draw_preview()", "self.preview_size frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB) finally: self.lock.release() if frame_rgb is None: return False", "= False def __init__(self, *args, **kwargs): self._prev_bmp = None self._enter = False self._pause", "not. self.prev_file_path = '' # Textbox for input file self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY,", "2) if self._pause: # Draw a triangle representing 'play'. dc.DrawPolygon([(ox - 20, oy", "# Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1)", "event): self.show_header(event.source) # wx event def on_ikalog_pause(self, event): self._pause = event.pause self.draw_preview() #", "Drag and drop drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button)", "self.lock.acquire() if self.latest_frame is None: if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width,", "self.OnTimer, self.timer) self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED, self.on_input_initialized) self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640, 360) #", "file_dialog = wx.FileDialog(self, _('Select a video file')) if file_dialog.ShowModal() != wx.ID_OK: return file_path", "InputFilePanel(self, wx.ID_ANY) self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED, self.on_input_file_added) self.show_input_file(False) self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL) self.video_input_source_sizer.Add( self.video_input_source_text, flag=wx.LEFT, border=10) self.video_input_source_sizer.Add(self.input_file_panel,", "self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx event def on_text_input(self, event): self.update_button_label() # wx event", "event def OnTimer(self, event): self.lock.acquire() if self.latest_frame is None: self.lock.release() return self.lock.release() if", "self.prev_file_path != file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse'))", "orig_state # IkaLog event def on_show_preview(self, context): img = context['engine'].get('preview', context['engine']['frame']) if img", "self.video_input_sizer.Add(self.video_input_title_text) self.video_input_sizer.Add(self.video_input_source_sizer, flag=wx.EXPAND | wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer =", "representing 'play'. dc.DrawPolygon([(ox - 20, oy - 30), (ox - 20, oy +", "_('HDMI Video input (OpenCV driver)'), 'screen': _('Realtime Capture from desktop'), 'file': _('Read from", "= False self.draw_preview() # wx event def on_input_file_added(self, event): # Propagate the event", "file_path def update_button_label(self): file_path = self.text_ctrl.GetValue() if self.should_open_file(file_path): self.button.SetLabel(_('Open')) else: self.button.SetLabel(_('Browse')) # wx", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "**kwargs): wx.Panel.__init__(self, *args, **kwargs) # This is used to determine if a file", "and # limitations under the License. # import copy import os.path import threading", "show_header(self, source): self.video_input_source_text.SetLabel( PreviewPanel.source_message.get(source, '')) self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout()", "self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause) # Preview self.preview_size = (640, 360) # Preview image. self.preview_panel =", "__name__ == \"__main__\": import sys import wx application = wx.App() frame = wx.Frame(None,", "self.show_input_file((source == 'file')) def show_input_file(self, show): self.input_file_panel.Show(show) self.Layout() def draw_preview(self): frame_rgb = None", "self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5) self.top_sizer.Add(self.preview_panel) self.SetSizer(self.top_sizer) if __name__ == \"__main__\": import", "wx.ALL, border=5) self.video_input_sizer.Add((640, 5)) # Top sizer self.top_sizer = wx.BoxSizer(wx.VERTICAL) self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5)", "= event.pause self.draw_preview() # wx event def on_preview_click(self, event): evt = IkalogPauseEvent(pause=(not self._pause))", "dc.DrawRectangle(ox + 10, oy - 30, 15, 60) # wx event def OnTimer(self,", "# # IkaLog # ====== # Copyright (C) 2015 <NAME> # # Licensed", "drop_target = FileDropTarget(self) self.text_ctrl.SetDropTarget(drop_target) top_sizer = wx.BoxSizer(wx.HORIZONTAL) top_sizer.Add(self.text_ctrl, proportion=1) top_sizer.Add(self.button) self.SetSizer(top_sizer) def should_open_file(self,", "if self._prev_bmp: dc.DrawBitmap(self._prev_bmp, 0, 0) return False width, height = self.preview_size frame_rgb =" ]
[ "= run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if __name__", "not os.path.exists(challenge_name): return \"Can't download this repository\", True except git.GitCommandError: pass return '',", "bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill() else: output", "challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg return", "settings import git import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name):", "bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception as e:", "= msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if __name__ == '__main__': status", "try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download this repository\", True except git.GitCommandError:", "error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True)", "import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download", "make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process = subprocess.Popen(make_command, stdin=None,", "= os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name) if error:", "challenge_name) if error: status_json['clone_error'] = msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] =", "output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception as e: return \"Have", "\"Have a error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return", "subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception as e: return \"Have a error", "try: if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0:", "= clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg return msg, setup_error = run_make_setup(challenge_name)", "if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill()", "run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if __name__ ==", "run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json):", "if bg_process.returncode != 0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False", "True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def", "'') msg, error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg return msg,", "import settings import git import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not", "if not os.path.exists(challenge_name): return \"Can't download this repository\", True except git.GitCommandError: pass return", "def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background:", "_run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json = dict()", "import subprocess import settings import git import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository)", "stderr=subprocess.STDOUT) return output, False except Exception as e: return \"Have a error in", "-*- encoding: utf-8 -*- import os import subprocess import settings import git import", "msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json)", "background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json = dict() challenge_repository =", "_run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json)", "status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if __name__ == '__main__':", "import os import subprocess import settings import git import requests def clone_challenge(challenge_repository, challenge_name):", "os.path.exists(challenge_name): return \"Can't download this repository\", True except git.GitCommandError: pass return '', False", "\"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json = dict() challenge_repository", "pass return '', False def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name),", "\"Can't download this repository\", True except git.GitCommandError: pass return '', False def _run_make_command(challenge_name,", "except git.GitCommandError: pass return '', False def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\",", "Exception as e: return \"Have a error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e),", "clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download this repository\", True", "def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\")", "return \"Can't download this repository\", True except git.GitCommandError: pass return '', False def", "if error: status_json['clone_error'] = msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg", "False except Exception as e: return \"Have a error in make {parameter} error:", "clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output']", "def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main():", "import git import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return", "except Exception as e: return \"Have a error in make {parameter} error: {error}\".format(parameter=make_parameter,", "return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name,", "download this repository\", True except git.GitCommandError: pass return '', False def _run_make_command(challenge_name, make_parameter,", "run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json", "e: return \"Have a error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def", "!= 0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception", "dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name)", "= dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository,", "= subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception as e: return \"Have a", "requests.post(settings.API_URL, status_json) def main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git',", "stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT)", "= subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill() else: output =", "def main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg,", "[\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None)", "make_parameter] try: if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode !=", "stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return", "stderr=None) if bg_process.returncode != 0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output,", "send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name", "a error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name,", "error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\")", "background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill() else:", "msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if __name__ == '__main__': status =", "utf-8 -*- import os import subprocess import settings import git import requests def", "0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception as", "background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process = subprocess.Popen(make_command,", "error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg return msg, setup_error =", "return \"Have a error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name):", "make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process =", "= msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return", "return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def main(): status_json =", "msg, error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg return msg, setup_error", "os import subprocess import settings import git import requests def clone_challenge(challenge_repository, challenge_name): try:", "subprocess import settings import git import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if", "error: status_json['clone_error'] = msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if", "challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name) if", "this repository\", True except git.GitCommandError: pass return '', False def _run_make_command(challenge_name, make_parameter, background=False):", "encoding: utf-8 -*- import os import subprocess import settings import git import requests", "error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name,", "-*- import os import subprocess import settings import git import requests def clone_challenge(challenge_repository,", "make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name):", "status_json) def main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '')", "as e: return \"Have a error in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True", "challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] =", "in make {parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def", "status_json['clone_error'] = msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error:", "{error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\",", "\"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode", "= [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None,", "# -*- encoding: utf-8 -*- import os import subprocess import settings import git", "return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL,", "os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error']", "\"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if", "else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except Exception as e: return", "status_json): requests.post(settings.API_URL, status_json) def main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name =", "main(): status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error", "challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download this repository\", True except", "{parameter} error: {error}\".format(parameter=make_parameter, error=e), True def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return", "setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if", "def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download this repository\",", "'', False def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try:", "repository\", True except git.GitCommandError: pass return '', False def _run_make_command(challenge_name, make_parameter, background=False): make_command", "status_json = dict() challenge_repository = os.environ.get(\"REPO\") challenge_name = challenge_repository.split('/')[-1].replace('.git', '') msg, error =", "subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None) if bg_process.returncode != 0: bg_process.kill() else: output = subprocess.check_output(make_command,", "git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download this repository\", True except git.GitCommandError: pass", "_run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if background: bg_process", "False def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter] try: if", "git.GitCommandError: pass return '', False def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\",", "git import requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't", "\"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name, status_json): requests.post(settings.API_URL, status_json) def", "bg_process.returncode != 0: bg_process.kill() else: output = subprocess.check_output(make_command, stderr=subprocess.STDOUT) return output, False except", "True except git.GitCommandError: pass return '', False def _run_make_command(challenge_name, make_parameter, background=False): make_command =", "return output, False except Exception as e: return \"Have a error in make", "requests def clone_challenge(challenge_repository, challenge_name): try: git.Git().clone(challenge_repository) if not os.path.exists(challenge_name): return \"Can't download this", "msg return msg, setup_error = run_make_setup(challenge_name) status_json['setup_output'] = msg if setup_error: return run_make_run(challenge_name)", "= challenge_repository.split('/')[-1].replace('.git', '') msg, error = clone_challenge(challenge_repository, challenge_name) if error: status_json['clone_error'] = msg", "return '', False def _run_make_command(challenge_name, make_parameter, background=False): make_command = [\"make\", \"-C\", \"{directory}\".format(directory=challenge_name), make_parameter]", "if setup_error: return run_make_run(challenge_name) send_status(challenge_name, status_json) if __name__ == '__main__': status = main()", "def run_make_setup(challenge_name): return _run_make_command(challenge_name, \"setup\") def run_make_run(challenge_name): return _run_make_command(challenge_name, \"run\", background=True) def send_status(challenge_name,", "output, False except Exception as e: return \"Have a error in make {parameter}" ]
[ "{}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'), options.get('node_type'),", "registered instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def", "django.conf import settings from awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal tower command.", "settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type,", "100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)", "parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution',", "be used to register the current system, \" \"as well as the default", "type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid): if not hostname:", "Inc. # All Rights Reserved import os from django.core.management.base import BaseCommand, CommandError from", "hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only intended for", "the database for HA tracking. \"\"\" help = ( \"Add instance to the", "import os from django.core.management.base import BaseCommand, CommandError from django.db import transaction from django.conf", "@transaction.atomic def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid')) if self.changed: print(\"(changed:", "tower command. Register this instance with the database for HA tracking. \"\"\" help", "node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from", "from Django settings will be used to register the current system, \" \"as", "or enabled for Kubernetes installs). \" \"Override with `--hostname`.\" ) def add_arguments(self, parser):", "parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop',", "use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control',", "# All Rights Reserved import os from django.core.management.base import BaseCommand, CommandError from django.db", "import BaseCommand, CommandError from django.db import transaction from django.conf import settings from awx.main.models", "= ( \"Add instance to the database. \" \"When no options are provided,", "default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def", "if needed (only used or enabled for Kubernetes installs). \" \"Override with `--hostname`.\"", "uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed", "during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid',", "values from settings only intended for use in K8s installs') from awx.main.management.commands.register_queue import", "is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully", "2015 Ansible, Inc. # All Rights Reserved import os from django.core.management.base import BaseCommand,", "else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname))", "from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0,", "installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100,", "to register the current system, \" \"as well as the default queues if", "(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME,", "hostname, node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values", "'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type,", "not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only intended for use in", "for use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'),", "_register_hostname(self, hostname, node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with", "with the database for HA tracking. \"\"\" help = ( \"Add instance to", "uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings", "only intended for use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance)", "= Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already", "type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid): if not hostname: if not", "100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else:", "All Rights Reserved import os from django.core.management.base import BaseCommand, CommandError from django.db import", "\" \"as well as the default queues if needed (only used or enabled", "[], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance)", "= Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0,", "to the database. \" \"When no options are provided, values from Django settings", "for HA tracking. \"\"\" help = ( \"Add instance to the database. \"", "well as the default queues if needed (only used or enabled for Kubernetes", "import settings from awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal tower command. Register", "import Instance class Command(BaseCommand): \"\"\" Internal tower command. Register this instance with the", "from awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal tower command. Register this instance", "(c) 2015 Ansible, Inc. # All Rights Reserved import os from django.core.management.base import", "def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid')) if self.changed: print(\"(changed: True)\")", "enabled for Kubernetes installs). \" \"Override with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname',", "\"\"\" help = ( \"Add instance to the database. \" \"When no options", "if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only intended for use", "as the default queues if needed (only used or enabled for Kubernetes installs).", "intended for use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) =", "import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()", "tracking. \"\"\" help = ( \"Add instance to the database. \" \"When no", "awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal tower command. Register this instance with", "= changed @transaction.atomic def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid')) if", "not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only intended", "needed (only used or enabled for Kubernetes installs). \" \"Override with `--hostname`.\" )", "parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid): if not hostname: if", "help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES:", "'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname,", "{}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self, **options):", "Django settings will be used to register the current system, \" \"as well", "\" \"When no options are provided, values from Django settings will be used", "if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only", "self.changed = changed @transaction.atomic def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))", "pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered", "\" \"Override with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used", "settings will be used to register the current system, \" \"as well as", "0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed,", "instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100,", "parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance", "import transaction from django.conf import settings from awx.main.models import Instance class Command(BaseCommand): \"\"\"", "node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname))", "in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)", "\"When no options are provided, values from Django settings will be used to", "default queues if needed (only used or enabled for Kubernetes installs). \" \"Override", "are provided, values from Django settings will be used to register the current", "choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self,", ").register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance", "this instance with the database for HA tracking. \"\"\" help = ( \"Add", "with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\")", "will be used to register the current system, \" \"as well as the", "print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self, **options): self.changed =", "for Kubernetes installs). \" \"Override with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname',", "'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid):", "Command(BaseCommand): \"\"\" Internal tower command. Register this instance with the database for HA", "( \"Add instance to the database. \" \"When no options are provided, values", "0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if", "uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE", "print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic", "UUID\") def _register_hostname(self, hostname, node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise", "Rights Reserved import os from django.core.management.base import BaseCommand, CommandError from django.db import transaction", "\"as well as the default queues if needed (only used or enabled for", "help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid): if", "Kubernetes installs). \" \"Override with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str,", "\"Override with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during", "settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering with values from settings only intended for use in K8s", "with values from settings only intended for use in K8s installs') from awx.main.management.commands.register_queue", "K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME,", "(only used or enabled for Kubernetes installs). \" \"Override with `--hostname`.\" ) def", "already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self, **options): self.changed = False", "HA tracking. \"\"\" help = ( \"Add instance to the database. \" \"When", "used to register the current system, \" \"as well as the default queues", "Instance class Command(BaseCommand): \"\"\" Internal tower command. Register this instance with the database", "RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue(", "\"\"\" Internal tower command. Register this instance with the database for HA tracking.", "type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\")", "django.db import transaction from django.conf import settings from awx.main.models import Instance class Command(BaseCommand):", "settings only intended for use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue (changed,", "the database. \" \"When no options are provided, values from Django settings will", "register the current system, \" \"as well as the default queues if needed", "is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) =", "settings from awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal tower command. Register this", "options are provided, values from Django settings will be used to register the", "RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname,", "from django.db import transaction from django.conf import settings from awx.main.models import Instance class", "instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self,", "add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control',", "from django.core.management.base import BaseCommand, CommandError from django.db import transaction from django.conf import settings", "transaction from django.conf import settings from awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal", "used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\")", "raise CommandError('Registering with values from settings only intended for use in K8s installs')", "the current system, \" \"as well as the default queues if needed (only", "queues if needed (only used or enabled for Kubernetes installs). \" \"Override with", "Reserved import os from django.core.management.base import BaseCommand, CommandError from django.db import transaction from", "the default queues if needed (only used or enabled for Kubernetes installs). \"", "# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved import os from", "provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node type\") parser.add_argument('--uuid', type=str,", "instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance", "used or enabled for Kubernetes installs). \" \"Override with `--hostname`.\" ) def add_arguments(self,", "from settings only intended for use in K8s installs') from awx.main.management.commands.register_queue import RegisterQueue", "if changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed =", "no options are provided, values from Django settings will be used to register", "Ansible, Inc. # All Rights Reserved import os from django.core.management.base import BaseCommand, CommandError", "instance to the database. \" \"When no options are provided, values from Django", "current system, \" \"as well as the default queues if needed (only used", "BaseCommand, CommandError from django.db import transaction from django.conf import settings from awx.main.models import", ") def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str,", "\"Add instance to the database. \" \"When no options are provided, values from", "database for HA tracking. \"\"\" help = ( \"Add instance to the database.", "CommandError from django.db import transaction from django.conf import settings from awx.main.models import Instance", "help = ( \"Add instance to the database. \" \"When no options are", "RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register()", "(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname)) else:", "Node type\") parser.add_argument('--uuid', type=str, help=\"Instance UUID\") def _register_hostname(self, hostname, node_type, uuid): if not", "instance with the database for HA tracking. \"\"\" help = ( \"Add instance", "Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already registered", "installs). \" \"Override with `--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname", "def _register_hostname(self, hostname, node_type, uuid): if not hostname: if not settings.AWX_AUTO_DEPROVISION_INSTANCES: raise CommandError('Registering", "node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True,", "system, \" \"as well as the default queues if needed (only used or", "def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid',", "[], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE ).register() else: (changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid) if changed:", "command. Register this instance with the database for HA tracking. \"\"\" help =", "os from django.core.management.base import BaseCommand, CommandError from django.db import transaction from django.conf import", "class Command(BaseCommand): \"\"\" Internal tower command. Register this instance with the database for", "`--hostname`.\" ) def add_arguments(self, parser): parser.add_argument('--hostname', dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type',", "Copyright (c) 2015 Ansible, Inc. # All Rights Reserved import os from django.core.management.base", "values from Django settings will be used to register the current system, \"", "type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance", "Register this instance with the database for HA tracking. \"\"\" help = (", "help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help=\"Instance Node", "dest='hostname', type=str, help=\"Hostname used during provisioning\") parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'],", "changed @transaction.atomic def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid')) if self.changed:", "registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self, **options): self.changed = False self._register_hostname(options.get('hostname'),", "from django.conf import settings from awx.main.models import Instance class Command(BaseCommand): \"\"\" Internal tower", "django.core.management.base import BaseCommand, CommandError from django.db import transaction from django.conf import settings from", "awx.main.management.commands.register_queue import RegisterQueue (changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [],", "provided, values from Django settings will be used to register the current system,", "Internal tower command. Register this instance with the database for HA tracking. \"\"\"", "database. \" \"When no options are provided, values from Django settings will be", "Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID) RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register() RegisterQueue( settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [],", "changed: print(\"Successfully registered instance {}\".format(hostname)) else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed", "CommandError('Registering with values from settings only intended for use in K8s installs') from", "else: print(\"Instance already registered {}\".format(instance.hostname)) self.changed = changed @transaction.atomic def handle(self, **options): self.changed" ]
[ "k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '=' + v) return '&'.join(l)", "from micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not %s\"", "'=' + v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not", "bytes, not %s\" % s.__class__.__name__) # Strip off the trailing newline encoded =", "res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s): if ' ' in", "# reduced from https://github.com/blainegarrett/urequests2 import binascii always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def", "def urlencode(query): if isinstance(query, dict): query = query.items() l = [] for k,", "from https://github.com/blainegarrett/urequests2 import binascii always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res", "\"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not", "return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)):", "not isinstance(v, list): v = [v] for value in v: k = quote_plus(str(k))", "quote_plus(str(value)) l.append(k + '=' + v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython", "[v] for value in v: k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k +", "in s: if c in always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res)", "quote_plus(s): if ' ' in s: s = s.replace(' ', '+') return quote(s)", "(bytes, bytearray)): raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__) # Strip off the", "res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s): if ' ' in s: s", "continue res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s): if ' ' in s:", "'_.-') def quote(s): res = [] for c in s: if c in", "ord(c)) return ''.join(res) def quote_plus(s): if ' ' in s: s = s.replace('", "def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected", "bytearray)): raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__) # Strip off the trailing", "= s.replace(' ', '+') return quote(s) def urlencode(query): if isinstance(query, dict): query =", "[] for c in s: if c in always_safe: res.append(c) continue res.append('%%%x' %", "' in s: s = s.replace(' ', '+') return quote(s) def urlencode(query): if", "for k, v in query: if not isinstance(v, list): v = [v] for", "return ''.join(res) def quote_plus(s): if ' ' in s: s = s.replace(' ',", "dict): query = query.items() l = [] for k, v in query: if", "quote(s) def urlencode(query): if isinstance(query, dict): query = query.items() l = [] for", "return quote(s) def urlencode(query): if isinstance(query, dict): query = query.items() l = []", "def quote_plus(s): if ' ' in s: s = s.replace(' ', '+') return", "b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes,", "binascii always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res = [] for", "always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s): if ' '", "v: k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '=' + v) return", "query: if not isinstance(v, list): v = [v] for value in v: k", "def quote(s): res = [] for c in s: if c in always_safe:", "if not isinstance(v, list): v = [v] for value in v: k =", "% ord(c)) return ''.join(res) def quote_plus(s): if ' ' in s: s =", "c in always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s): if", "[] for k, v in query: if not isinstance(v, list): v = [v]", "s: if c in always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res) def", "TypeError(\"expected bytes, not %s\" % s.__class__.__name__) # Strip off the trailing newline encoded", "reduced from https://github.com/blainegarrett/urequests2 import binascii always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s):", "isinstance(query, dict): query = query.items() l = [] for k, v in query:", "= quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '=' + v) return '&'.join(l) def", "for value in v: k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '='", "import binascii always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res = []", "always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res = [] for c", "''.join(res) def quote_plus(s): if ' ' in s: s = s.replace(' ', '+')", "quote(s): res = [] for c in s: if c in always_safe: res.append(c)", "s.replace(' ', '+') return quote(s) def urlencode(query): if isinstance(query, dict): query = query.items()", "micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not %s\" %", "% s.__class__.__name__) # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] return encoded", "= quote_plus(str(value)) l.append(k + '=' + v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from", "= [v] for value in v: k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k", "+ '=' + v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if", "quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '=' + v) return '&'.join(l) def b64encode(s):", "v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s, (bytes,", "= [] for k, v in query: if not isinstance(v, list): v =", "urlencode(query): if isinstance(query, dict): query = query.items() l = [] for k, v", "raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__) # Strip off the trailing newline", "s: s = s.replace(' ', '+') return quote(s) def urlencode(query): if isinstance(query, dict):", "in query: if not isinstance(v, list): v = [v] for value in v:", "v = quote_plus(str(value)) l.append(k + '=' + v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced", "v in query: if not isinstance(v, list): v = [v] for value in", "if isinstance(query, dict): query = query.items() l = [] for k, v in", "in always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s): if '", "value in v: k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '=' +", "+ v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s,", "not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__) # Strip", "if ' ' in s: s = s.replace(' ', '+') return quote(s) def", "isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__) # Strip off", "s = s.replace(' ', '+') return quote(s) def urlencode(query): if isinstance(query, dict): query", "'+') return quote(s) def urlencode(query): if isinstance(query, dict): query = query.items() l =", "query = query.items() l = [] for k, v in query: if not", "if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__) #", "k, v in query: if not isinstance(v, list): v = [v] for value", "in s: s = s.replace(' ', '+') return quote(s) def urlencode(query): if isinstance(query,", "%s\" % s.__class__.__name__) # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1] return", "https://github.com/blainegarrett/urequests2 import binascii always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res =", "= query.items() l = [] for k, v in query: if not isinstance(v,", "in v: k = quote_plus(str(k)) v = quote_plus(str(value)) l.append(k + '=' + v)", "not %s\" % s.__class__.__name__) # Strip off the trailing newline encoded = binascii.b2a_base64(s)[:-1]", "', '+') return quote(s) def urlencode(query): if isinstance(query, dict): query = query.items() l", "l = [] for k, v in query: if not isinstance(v, list): v", "for c in s: if c in always_safe: res.append(c) continue res.append('%%%x' % ord(c))", "c in s: if c in always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return", "base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise TypeError(\"expected bytes, not %s\" % s.__class__.__name__)", "'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res = [] for c in s: if", "'0123456789' '_.-') def quote(s): res = [] for c in s: if c", "isinstance(v, list): v = [v] for value in v: k = quote_plus(str(k)) v", "' ' in s: s = s.replace(' ', '+') return quote(s) def urlencode(query):", "v = [v] for value in v: k = quote_plus(str(k)) v = quote_plus(str(value))", "res = [] for c in s: if c in always_safe: res.append(c) continue", "list): v = [v] for value in v: k = quote_plus(str(k)) v =", "l.append(k + '=' + v) return '&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\"", "('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res = [] for c in s:", "query.items() l = [] for k, v in query: if not isinstance(v, list):", "'&'.join(l) def b64encode(s): \"\"\"Reproduced from micropython base64\"\"\" if not isinstance(s, (bytes, bytearray)): raise", "= ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' '0123456789' '_.-') def quote(s): res = [] for c in", "= [] for c in s: if c in always_safe: res.append(c) continue res.append('%%%x'", "if c in always_safe: res.append(c) continue res.append('%%%x' % ord(c)) return ''.join(res) def quote_plus(s):" ]
[ ".models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file = True def", "= textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going to", "@page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file = True def get_response(self, request, textfile,", "class TextFilePlugin(PageTypePlugin): model = TextFile is_file = True def get_response(self, request, textfile, **kwargs):", "PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file", "content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going to enforce this. return", "HttpResponse from fluent_pages.extensions import PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin):", "= TextFile is_file = True def get_response(self, request, textfile, **kwargs): content_type = textfile.content_type", "TextFile is_file = True def get_response(self, request, textfile, **kwargs): content_type = textfile.content_type if", "content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going", "import PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile", "from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file = True", "TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file = True def get_response(self, request,", "page_type_pool from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file =", "textfile, **kwargs): content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\"", "**kwargs): content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" #", "from django.http import HttpResponse from fluent_pages.extensions import PageTypePlugin, page_type_pool from .models import TextFile", "from fluent_pages.extensions import PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model", "fluent_pages.extensions import PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model =", "textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going to enforce", "TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going to enforce this. return HttpResponse(content=textfile.content, content_type=content_type)", "is_file = True def get_response(self, request, textfile, **kwargs): content_type = textfile.content_type if content_type", "= True def get_response(self, request, textfile, **kwargs): content_type = textfile.content_type if content_type in", "if content_type in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going to enforce this.", "django.http import HttpResponse from fluent_pages.extensions import PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register", "model = TextFile is_file = True def get_response(self, request, textfile, **kwargs): content_type =", "TextFilePlugin(PageTypePlugin): model = TextFile is_file = True def get_response(self, request, textfile, **kwargs): content_type", "in TextFile.UTF8_TYPES: content_type += \"; charset=utf-8\" # going to enforce this. return HttpResponse(content=textfile.content,", "True def get_response(self, request, textfile, **kwargs): content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES:", "get_response(self, request, textfile, **kwargs): content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type +=", "import TextFile @page_type_pool.register class TextFilePlugin(PageTypePlugin): model = TextFile is_file = True def get_response(self,", "def get_response(self, request, textfile, **kwargs): content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type", "import HttpResponse from fluent_pages.extensions import PageTypePlugin, page_type_pool from .models import TextFile @page_type_pool.register class", "request, textfile, **kwargs): content_type = textfile.content_type if content_type in TextFile.UTF8_TYPES: content_type += \";" ]
[ "= pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music)", "__init__(self): \"\"\" Inicializamos el sonido y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3'", "<filename>Alien Invasion/sound_fx.py import pygame class Sound_fx: \"\"\"Clase que controla el sonido.\"\"\" def __init__(self):", "pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1)", "= pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init()", "Invasion/sound_fx.py import pygame class Sound_fx: \"\"\"Clase que controla el sonido.\"\"\" def __init__(self): \"\"\"", "'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn =", "Sound_fx: \"\"\"Clase que controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido y", "self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def", "pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music)", "= pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self):", "self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav')", "class Sound_fx: \"\"\"Clase que controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido", "controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido y cargamos los recursos\"\"\"", "play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1) def play_shot(self): self.shot_cn.play(self.shot) def play_alien(self): self.alien_cn.play(self.alien)", "self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5)", "el sonido y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3'", "Inicializamos el sonido y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music =", "que controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido y cargamos los", "pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1) def play_shot(self):", "el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido y cargamos los recursos\"\"\" self.init_music", "y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot =", "pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def", "\"\"\"Clase que controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido y cargamos", "recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien =", "pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1) def play_shot(self): self.shot_cn.play(self.shot) def", "self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self):", "cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav')", "\"\"\" Inicializamos el sonido y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music", "def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1) def play_shot(self): self.shot_cn.play(self.shot) def play_alien(self):", "= pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1) def", "los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien", "pygame class Sound_fx: \"\"\"Clase que controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el", "def __init__(self): \"\"\" Inicializamos el sonido y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title", "self.alien_cn = pygame.mixer.Channel(2) self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1)", "Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1)", "= 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn", "import pygame class Sound_fx: \"\"\"Clase que controla el sonido.\"\"\" def __init__(self): \"\"\" Inicializamos", "= 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn =", "'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn = pygame.mixer.Channel(2)", "sonido.\"\"\" def __init__(self): \"\"\" Inicializamos el sonido y cargamos los recursos\"\"\" self.init_music =", "sonido y cargamos los recursos\"\"\" self.init_music = 'sound/01_Title Screen.mp3' self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot", "self.game_music = 'sound/12_Invader_Homeworld.mp3' self.shot = pygame.mixer.Sound('sound/shot.wav') self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav') self.shot_cn = pygame.mixer.Channel(1) self.alien_cn", "self.alien_cn.set_volume(0.5) pygame.mixer.init() def play_init_music(self): pygame.mixer.music.load(self.init_music) pygame.mixer.music.play(-1) def play_game_music(self): pygame.mixer.music.load(self.game_music) pygame.mixer.music.play(-1) def play_shot(self): self.shot_cn.play(self.shot)" ]
[ "br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like", "+= \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token def __repr__(self): return", "\"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36", "like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url", "\"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11;", "(Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url", "+ \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" %", "Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\"", "\\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\"", "(KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url += \"|Content-Type=\"", "\"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token", "url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url", "self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url", "= playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url", "\"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\"", "self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token def __repr__(self):", "Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url +=", "self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] #", "\"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\"", "\"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google", "playback): self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"]", "self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url", "self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url +=", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\",", "Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url += \"|Content-Type=\" self.license_url +=", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url +=", "(X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\",", "Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url +=", "= url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\"", "self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url", "deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML,", "self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url", "self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url", "+= \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url +=", "\"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\"", "NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url =", "= self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36", "% self.license_token self.license_url = url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url +=", "PlayBack: def __init__(self, playback): self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"]", "+= \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url +=", "# self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like", "= playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url =", "= playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT", "\"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token def", "like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url += \"|Content-Type=\" self.license_url", "url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\"", "\"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url", "+= \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\"", "+= \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63", "playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0", "+= \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux x86_64)", "+= \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url +=", "self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows", "Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\"", "Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url", "\"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url", "url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64)", "Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url", "class PlayBack: def __init__(self, playback): self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type =", "= playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url +", "self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url +=", "playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"]", "= url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0", "(KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\"", "\"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\"", "self.license_token self.license_url = url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip,", "self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\" %", "+= \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url +=", "self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url", "self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "__init__(self, playback): self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token =", "playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0;", "10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token self.license_url = url", "Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url +=", "Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url", "\"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token def __repr__(self): return self.playback.__repr__()", "<filename>resources/lib/api/models/playback.py class PlayBack: def __init__(self, playback): self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type", "def __init__(self, playback): self.playback = playback self.src = playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token", "self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0 (X11; Linux", "self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url +=", "self.license_url = url self.license_url += \"|Content-Type=\" self.license_url += \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate,", "+= \"&Accept=*/*\" self.license_url += \"&Accept-Encoding=gzip, deflate, br\" self.license_url += \"&Accept-Language=en-US,en;q=0.9,da;q=0.8\" self.license_url += \"&User-Agent=Mozilla/5.0", "+= \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\"", "+= \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url += \"&x-dt-auth-token=%s|R{SSM}|JBlicense\" % self.license_token", "self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url = url + \"|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,", "self.license_url += \"&Sec-Fetch-Dest=empty\" self.license_url += \"&Sec-Fetch-Mode=cors\" self.license_url += \"&Sec-Fetch-Site=cross-site\" self.license_url += \"&Host=lic.drmtoday.com\" self.license_url", "\\\" Not;A Brand\\\";v=\\\"99\\\", \\\"Chromium\\\";v=\\\"93\\\"\" self.license_url += \"&sec-ch-ua-mobile=?0\" self.license_url += \"&sec-ch-ua-platform=\\\"Linux\\\"\" self.license_url += \"&Sec-Fetch-Dest=empty\"", "playback[\"smil\"][\"video\"][\"src\"] self.mime_type = playback[\"smil\"][\"video\"][\"type\"] self.license_token = playback[\"smil\"][\"securityLicense\"][\"token\"] url = self.playback[\"smil\"][\"securityLicense\"][\"url\"] # self.license_url =", "Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\"", "x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\" self.license_url += \"&sec-ch-ua=\\\"Google Chrome\\\";v=\\\"93\\\", \\\" Not;A" ]
[ "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)),", "], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution', name='pokemon', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon'), ), ]", "models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon',", "('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True,", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat',", "models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='',", "('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon',", "unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id',", "models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution', name='pokemon', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE,", "on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution', name='pokemon', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon'),", "migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ] operations", "('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution', name='pokemon', field=models.ForeignKey(default='',", "('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel(", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ],", "verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id',", "models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution',", "[ ('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution', name='pokemon', field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon'), ),", "name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)),", "), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort',", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ],", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel(", "<reponame>andresRah/PokemonDjango # Generated by Django 3.0.3 on 2020-02-24 05:12 from django.db import migrations,", "Generated by Django 3.0.3 on 2020-02-24 05:12 from django.db import migrations, models import", "operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='',", "Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution', fields=[", "models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)),", "max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='',", "('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100,", "3.0.3 on 2020-02-24 05:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)),", "models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True,", "dependencies = [ ('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id',", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel(", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')),", "verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ),", "verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ),", "serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ],", "max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField( model_name='evolution', name='pokemon',", "models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat',", "class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ),", "serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ],", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'),", "('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel(", "name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)),", "on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement',", "by Django 3.0.3 on 2020-02-24 05:12 from django.db import migrations, models import django.db.models.deletion", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ] operations =", "= [ ('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight',", "models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[", "('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "05:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "on 2020-02-24 05:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "[ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name',", "= [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)),", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ),", "max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='',", "), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height',", "] operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId',", "2020-02-24 05:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "'0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE,", "verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel(", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ]", "models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='',", "to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='',", "primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')),", "], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)),", "('pokemon', '0001_initial'), ] operations = [ migrations.CreateModel( name='Evolution', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')), ], ), migrations.CreateModel( name='StatsPokemon', fields=[ ('id',", "max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('baseStat', models.CharField(default='', max_length=100)), ('effort', models.CharField(default='', max_length=100)), ('pokemon', models.ForeignKey(default='',", "('height', models.CharField(default='', max_length=100)), ('weight', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='StatElement', fields=[ ('id', models.AutoField(auto_created=True,", "('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User', ), migrations.AddField(", "migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)), ('height', models.CharField(default='',", "models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100)), ('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')), ], ), migrations.DeleteModel( name='User',", "# Generated by Django 3.0.3 on 2020-02-24 05:12 from django.db import migrations, models", "Django 3.0.3 on 2020-02-24 05:12 from django.db import migrations, models import django.db.models.deletion class", "], ), migrations.CreateModel( name='Pokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100, unique=True)),", "primary_key=True, serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon',", "serialize=False, verbose_name='ID')), ('evolutionChainId', models.CharField(default='', max_length=100)), ('name', models.CharField(default='', max_length=100)), ], ), migrations.CreateModel( name='Pokemon', fields=[", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('pokemon', '0001_initial'), ] operations = [", "), migrations.CreateModel( name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url',", "name='StatsPokemon', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='', max_length=100)), ('url', models.URLField(default='', max_length=100))," ]
[ "self._literal_value == 'true' else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def", "@staticmethod def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class", "get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n'", "self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self):", "NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value", "= 1 int = 2 double = 3 string = 4 class BaseLiteralNode(AbstractNode,", "def _run_scope_check(self): pass # no checks needed def _run_type_check(self): pass # no checks", "generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label()", "f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class", "raise NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return", "no checks needed def _run_type_check(self): pass # no checks needed def generate_code(self): raise", "= 1 if self._literal_value == 'true' else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n'", "_run_type_check(self): pass # no checks needed def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type:", "# aliases NULL = 0 BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL =", "$v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode):", ".utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool = 1 Int", "import AbstractNode from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool", "BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass # no checks needed def _run_type_check(self):", "def generate_code(self): value = 1 if self._literal_value == 'true' else 0 self.ctx.text_segment +=", "def __init__(self, ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type)", "1 int = 2 double = 3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin):", "= 2 Double = 3 String = 4 # aliases NULL = 0", "f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class", "aliases NULL = 0 BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL = 3", "NULL = 0 BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL", "import enum import struct from .abstract import AbstractNode from .utils import ValuedNodeMixin, NodeContext", "= value def _run_scope_check(self): pass # no checks needed def _run_type_check(self): pass #", "import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool = 1 Int =", "self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment", "def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode):", "class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.double\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tl.d", "class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self):", "+= f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment +=", "STRINGLITERAL = 4 bool = 1 int = 2 double = 3 string", "bool = 1 int = 2 double = 3 string = 4 class", "f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.double\\t{self._literal_value}\\n'", "str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass #", "from .abstract import AbstractNode from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null =", "# no checks needed def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str) ->", "return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment", "value = 1 if self._literal_value == 'true' else 0 self.ctx.text_segment += f'\\tli $v0,", "= 3 STRINGLITERAL = 4 bool = 1 int = 2 double =", "Bool = 1 Int = 2 Double = 3 String = 4 #", "AbstractNode from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool =", "String = 4 # aliases NULL = 0 BOOLEANLITERAL = 1 INTLITERAL =", "class PrimitiveTypes(enum.Enum): Null = 0 Bool = 1 Int = 2 Double =", ".abstract import AbstractNode from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0", "1 INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL = 4 bool = 1", "self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if", "= 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str, value: str):", "needed def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type]", "generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1", "import struct from .abstract import AbstractNode from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum):", "PrimitiveTypes(enum.Enum): Null = 0 Bool = 1 Int = 2 Double = 3", "checks needed def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes: return", "Null = 0 Bool = 1 Int = 2 Double = 3 String", "INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL = 4 bool = 1 int", "$v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.double\\t{self._literal_value}\\n' self.ctx.text_segment", "$zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.double\\t{self._literal_value}\\n' self.ctx.text_segment +=", "+= f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if self._literal_value", "== 'true' else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self):", "super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass # no", "ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool = 1 Int = 2", "if self._literal_value == 'true' else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode):", "label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode):", "no checks needed def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes:", "Int = 2 Double = 3 String = 4 # aliases NULL =", "Double = 3 String = 4 # aliases NULL = 0 BOOLEANLITERAL =", "self._literal_value = value def _run_scope_check(self): pass # no checks needed def _run_type_check(self): pass", "+= f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment +=", "IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value", "self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0,", "get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def", "$v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if self._literal_value == 'true'", "NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value", "def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.double\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tl.d $f0, {label}\\n'", "= 4 bool = 1 int = 2 double = 3 string =", "str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self):", "string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str, value:", "= 1 INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL = 4 bool =", "class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if self._literal_value == 'true' else 0", "def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n'", "generate_code(self): value = 1 if self._literal_value == 'true' else 0 self.ctx.text_segment += f'\\tli", "value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def", "'true' else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment", "= 1 Int = 2 Double = 3 String = 4 # aliases", "value def _run_scope_check(self): pass # no checks needed def _run_type_check(self): pass # no", "class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla", "= 0 BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL =", "3 String = 4 # aliases NULL = 0 BOOLEANLITERAL = 1 INTLITERAL", "+= f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n'", "else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment +=", "int = 2 double = 3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def", "0 BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL = 4", "-> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label", "BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type", "# no checks needed def _run_type_check(self): pass # no checks needed def generate_code(self):", "self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass # no checks needed", "1 if self._literal_value == 'true' else 0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class", "self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment +=", "__init__(self, ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value", "4 bool = 1 int = 2 double = 3 string = 4", "enum import struct from .abstract import AbstractNode from .utils import ValuedNodeMixin, NodeContext class", "= self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def", "= 3 String = 4 # aliases NULL = 0 BOOLEANLITERAL = 1", "{self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if self._literal_value == 'true' else", "PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label =", "= 0 Bool = 1 Int = 2 Double = 3 String =", "{label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def", "PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment", "BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if self._literal_value == 'true' else 0 self.ctx.text_segment", "2 Double = 3 String = 4 # aliases NULL = 0 BOOLEANLITERAL", "= 2 DOUBLELITERAL = 3 STRINGLITERAL = 4 bool = 1 int =", "class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self):", "2 double = 3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx:", "= 2 double = 3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self,", "def generate_code(self): self.ctx.text_segment += f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value =", "def _run_type_check(self): pass # no checks needed def generate_code(self): raise NotImplementedError @staticmethod def", "generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def get_value(self):", "+= f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n'", "3 STRINGLITERAL = 4 bool = 1 int = 2 double = 3", "def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment +=", "4 # aliases NULL = 0 BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL", "f'\\tli $v0, {self._literal_value}\\n' class BoolLiteralNode(BaseLiteralNode): def generate_code(self): value = 1 if self._literal_value ==", "from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool = 1", "3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str,", "checks needed def _run_type_check(self): pass # no checks needed def generate_code(self): raise NotImplementedError", "class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx)", "self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass # no checks", "= BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass # no checks needed def", "0 Bool = 1 Int = 2 Double = 3 String = 4", "pass # no checks needed def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str)", "NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label", "{value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def", "ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type =", "str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self):", "struct from .abstract import AbstractNode from .utils import ValuedNodeMixin, NodeContext class PrimitiveTypes(enum.Enum): Null", "2 DOUBLELITERAL = 3 STRINGLITERAL = 4 bool = 1 int = 2", "NodeContext class PrimitiveTypes(enum.Enum): Null = 0 Bool = 1 Int = 2 Double", "1 Int = 2 Double = 3 String = 4 # aliases NULL", "self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment", "StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0,", "DOUBLELITERAL = 3 STRINGLITERAL = 4 bool = 1 int = 2 double", "ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value =", "f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class IntLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tli", "def generate_code(self): raise NotImplementedError @staticmethod def get_type(value_type: str) -> PrimitiveTypes: return PrimitiveTypes[value_type] def", "BOOLEANLITERAL = 1 INTLITERAL = 2 DOUBLELITERAL = 3 STRINGLITERAL = 4 bool", "return PrimitiveTypes[value_type] def get_value(self): return self._literal_value class StringLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label()", "def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label =", "$v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0, $zero\\n' class DoubleLiteralNode(BaseLiteralNode):", "= 4 # aliases NULL = 0 BOOLEANLITERAL = 1 INTLITERAL = 2", "self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove $v0,", "double = 3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext,", "= 3 string = 4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type:", "4 class BaseLiteralNode(AbstractNode, ValuedNodeMixin): def __init__(self, ctx: NodeContext, value_type: str, value: str): super(BaseLiteralNode,", "DoubleLiteralNode(BaseLiteralNode): def generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.double\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tl.d $f0,", "pass # no checks needed def _run_type_check(self): pass # no checks needed def", "value: str): super(BaseLiteralNode, self).__init__(ctx) self._value_type = BaseLiteralNode.get_type(value_type) self._literal_value = value def _run_scope_check(self): pass", "generate_code(self): label = self.ctx.label_generator.get_label() self.ctx.data_segment += f'{label}:\\t.asciiz\\t{self._literal_value}\\n' self.ctx.text_segment += f'\\tla $v0, {label}\\n' class", "needed def _run_type_check(self): pass # no checks needed def generate_code(self): raise NotImplementedError @staticmethod", "0 self.ctx.text_segment += f'\\tli $v0, {value}\\n' class NullLiteralNode(BaseLiteralNode): def generate_code(self): self.ctx.text_segment += f'\\tmove", "_run_scope_check(self): pass # no checks needed def _run_type_check(self): pass # no checks needed" ]
[ "kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 =", "for sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])", "[4, 16, 24, 16, 4], [6, 24, -476, 24, 6], [4, 16, 24,", "image) kernel_identity = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) #", "cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3)", "kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image =", "0], [-1, -2, -1]]) outline_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1,", "kernel_blur = 1 / 9 * np.array([[1, 1, 1], [1, 1, 1], [1,", "original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image,", "using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original image: image = cv2.imread('cat-face.png') #", "[1, 2, 1]]) # Try a kernel for embossing: kernel_emboss = np.array([[-2, -1,", "using cv2.filter2D() \"\"\" # Import required packages: import cv2 import numpy as np", "title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the", "[6, 24, -476, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6,", "show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9)", "np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking = -1 /", "0, -1], [0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0],", "# Apply all the kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image,", "\"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\", 12)", "1], [1, 1, 1]]) gaussian_blur = 1 / 16 * np.array([[1, 2, 1],", "-1], [2, 0, -2], [1, 0, -1]]) sobel_y_kernel = np.array([[1, 2, 1], [0,", "kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image =", "the image) kernel_identity = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])", "\"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur", "show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\",", "plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original", "= np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1,", "= cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1,", "= cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1,", "3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6)", "-1, -1], [-1, 8, -1], [-1, -1, -1]]) # Try different kernels for", "-2, -1]]) outline_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])", "= cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) # Show all the", "pos): \"\"\"Shows an image using matplotlib capabilities\"\"\" # Convert BGR image to RGB", "1], [0, 1, 2]]) # Try different kernels for edge detection: sobel_x_kernel =", "\"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image,", "-1, -1], [-1, 8, -1], [-1, -1, -1]]) # Apply all the kernels:", "np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1,", "show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\",", "emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image,", "kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3,", "= cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1,", "image = cv2.imread('cat-face.png') # We try different kernels # Identify kernel (does not", "8, -1], [-1, -1, -1]]) # Apply all the kernels: original_image = cv2.filter2D(image,", "0, 0]]) # Try different kernels for edge detection: kernel_edge_detection_1 = np.array([[1, 0,", "show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using matplotlib capabilities\"\"\" # Convert BGR image", "8, -1], [-1, -1, -1]]) # Try different kernels for sharpening: kernel_sharpen =", "0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4, 1], [0,", "2, 1]]) # Try a kernel for embossing: kernel_emboss = np.array([[-2, -1, 0],", "8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y", "cv2 import numpy as np import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos):", "= cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1,", "1, 0], [0, 0, 0]]) # Try different kernels for edge detection: kernel_edge_detection_1", "cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur)", "image using matplotlib capabilities\"\"\" # Convert BGR image to RGB img_RGB = color_img[:,", "\"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\",", "-1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image", "-1, 0]]) kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6, 4, 1],", "\"\"\" # Import required packages: import cv2 import numpy as np import matplotlib.pyplot", "embossing: kernel_emboss = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) #", "Comparing different kernels using cv2.filter2D() \"\"\" # Import required packages: import cv2 import", "kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original image: image = cv2.imread('cat-face.png')", "gaussian_blur = 1 / 16 * np.array([[1, 2, 1], [2, 4, 2], [1,", "= 1 / 9 * np.array([[1, 1, 1], [1, 1, 1], [1, 1,", "1], [1, 1, 1], [1, 1, 1]]) gaussian_blur = 1 / 16 *", "numpy as np import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an", "-476, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]])", "outline_kernel) # Show all the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection", "fontweight='bold') # Load the original image: image = cv2.imread('cat-face.png') # We try different", "4], [6, 24, -476, 24, 6], [4, 16, 24, 16, 4], [1, 4,", "of the figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\",", "different kernels using cv2.filter2D() \"\"\" # Import required packages: import cv2 import numpy", "1, 1], [1, 1, 1], [1, 1, 1]]) gaussian_blur = 1 / 16", "4, 2], [1, 2, 1]]) # Try a kernel for embossing: kernel_emboss =", "Apply all the kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1,", "-1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) # Show all the images: show_with_matplotlib(original_image,", "# Try different kernels for sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1, 5,", "1, 0], [1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1],", "0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) #", "different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original image: image =", "kernels using cv2.filter2D() \"\"\" # Import required packages: import cv2 import numpy as", "kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3 =", "kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image =", "[0, 1, 2]]) # Try different kernels for edge detection: sobel_x_kernel = np.array([[1,", "-1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) #", "[-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking = -1 / 256 * np.array([[1,", "sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking", "edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image,", "1 / 16 * np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]])", "9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline", "2], [1, 2, 1]]) # Try a kernel for embossing: kernel_emboss = np.array([[-2,", "kernels for sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1, 5, -1], [0, -1,", "as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using matplotlib capabilities\"\"\" #", "-1]]) # Try different kernels for sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1,", "the figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14,", "= cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1,", "4, 1], [4, 16, 24, 16, 4], [6, 24, -476, 24, 6], [4,", "Load the original image: image = cv2.imread('cat-face.png') # We try different kernels #", "figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold')", "-1]]) # Apply all the kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 =", "-1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image", "16, 4], [1, 4, 6, 4, 1]]) # Try different kernels for smoothing:", "np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) # Try different kernels", "for edge detection: sobel_x_kernel = np.array([[1, 0, -1], [2, 0, -2], [1, 0,", "kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image =", "img_RGB = color_img[:, :, ::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off')", "Show all the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2)", "# We try different kernels # Identify kernel (does not modify the image)", "\"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11)", "# Show all the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\",", "image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image,", "[-1, -1, -1]]) # Apply all the kernels: original_image = cv2.filter2D(image, -1, kernel_identity)", "np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) # Try a kernel", "image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\", 12) # Show", "0], [0, 0, 0]]) # Try different kernels for edge detection: kernel_edge_detection_1 =", "24, -476, 24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4,", "2, 1], [2, 4, 2], [1, 2, 1]]) # Try a kernel for", "0], [-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking = -1 / 256 *", "kernel_identity = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) # Try", "[-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4, 1], [0, 1,", "all the kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1)", "# Import required packages: import cv2 import numpy as np import matplotlib.pyplot as", "detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4)", "sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) # Show all", "plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions of the figure", "np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) # Try different kernels", "Convert BGR image to RGB img_RGB = color_img[:, :, ::-1] ax = plt.subplot(3,", "-1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image", "Try different kernels for edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0,", "sobel_y_kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) outline_kernel =", "and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') #", "1, 1], [1, 1, 1]]) gaussian_blur = 1 / 16 * np.array([[1, 2,", "for embossing: kernel_emboss = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])", "show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\",", "cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel)", "np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) # Apply all the", "sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image,", "kernels for edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0, 0], [-1,", "= cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1,", "np import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using", "/ 9 * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) gaussian_blur", "cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2)", "kernel_emboss = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) # Try", "np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) outline_kernel = np.array([[-1, -1,", "kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6, 4, 1], [4, 16,", "0, -2], [1, 0, -1]]) sobel_y_kernel = np.array([[1, 2, 1], [0, 0, 0],", "We try different kernels # Identify kernel (does not modify the image) kernel_identity", "1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge", "= np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]]) # Try different", "kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 =", "modify the image) kernel_identity = np.array([[0, 0, 0], [0, 1, 0], [0, 0,", "\"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10)", "edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image,", "-1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3", "[0, -1, 0]]) kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6, 4,", "the dimensions of the figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels", "detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp", "-1, 0], [-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking = -1 / 256", "plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions of the figure and set title:", "plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original image: image", "different kernels for smoothing: kernel_blur = 1 / 9 * np.array([[1, 1, 1],", "[0, 0, 0], [-1, -2, -1]]) outline_kernel = np.array([[-1, -1, -1], [-1, 8,", "4, 6, 4, 1]]) # Try different kernels for smoothing: kernel_blur = 1", "# Try different kernels for smoothing: kernel_blur = 1 / 9 * np.array([[1,", "-1], [-1, -1, -1]]) # Try different kernels for sharpening: kernel_sharpen = np.array([[0,", "[-1, 1, 1], [0, 1, 2]]) # Try different kernels for edge detection:", "kernel_sharpen = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking =", "1]]) # Try different kernels for smoothing: kernel_blur = 1 / 9 *", "image to RGB img_RGB = color_img[:, :, ::-1] ax = plt.subplot(3, 4, pos)", "[0, 0, 0]]) # Try different kernels for edge detection: kernel_edge_detection_1 = np.array([[1,", "detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\",", "= 1 / 16 * np.array([[1, 2, 1], [2, 4, 2], [1, 2,", "gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image,", "-1, outline_kernel) # Show all the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge", "matplotlib capabilities\"\"\" # Convert BGR image to RGB img_RGB = color_img[:, :, ::-1]", "\"\"\"Shows an image using matplotlib capabilities\"\"\" # Convert BGR image to RGB img_RGB", "[1, 4, 6, 4, 1]]) # Try different kernels for smoothing: kernel_blur =", "for edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0, 0], [-1, 0,", "2, 1], [0, 0, 0], [-1, -2, -1]]) outline_kernel = np.array([[-1, -1, -1],", "/ 16 * np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) #", "np.array([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, -476,", "a kernel for embossing: kernel_emboss = np.array([[-2, -1, 0], [-1, 1, 1], [0,", "1], [0, 0, 0], [-1, -2, -1]]) outline_kernel = np.array([[-1, -1, -1], [-1,", "image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel", "1, 1], [0, 1, 2]]) # Try different kernels for edge detection: sobel_x_kernel", "edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image,", "original image: image = cv2.imread('cat-face.png') # We try different kernels # Identify kernel", "different kernels # Identify kernel (does not modify the image) kernel_identity = np.array([[0,", "kernels # Identify kernel (does not modify the image) kernel_identity = np.array([[0, 0,", ":, ::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the", "set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load", "kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image =", "image: image = cv2.imread('cat-face.png') # We try different kernels # Identify kernel (does", "[1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8,", "[-1, -2, -1]]) outline_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1,", "show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\", 12) # Show the Figure:", "1 / 9 * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])", "cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel)", "16 * np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) # Try", "# Try different kernels for edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0,", "gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image =", "fontsize=14, fontweight='bold') # Load the original image: image = cv2.imread('cat-face.png') # We try", "0]]) # Try different kernels for edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1],", "-1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image", "= cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1,", "0, -1], [2, 0, -2], [1, 0, -1]]) sobel_y_kernel = np.array([[1, 2, 1],", "-1], [-1, 8, -1], [-1, -1, -1]]) # Try different kernels for sharpening:", "outline_image = cv2.filter2D(image, -1, outline_kernel) # Show all the images: show_with_matplotlib(original_image, \"identity kernel\",", "* np.array([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24,", "sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) # Show all the images: show_with_matplotlib(original_image, \"identity", "2]]) # Try different kernels for edge detection: sobel_x_kernel = np.array([[1, 0, -1],", "0, 0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4, 1],", "required packages: import cv2 import numpy as np import matplotlib.pyplot as plt def", "16, 24, 16, 4], [6, 24, -476, 24, 6], [4, 16, 24, 16,", "\"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\", 12) # Show the Figure: plt.show()", "not modify the image) kernel_identity = np.array([[0, 0, 0], [0, 1, 0], [0,", "Import required packages: import cv2 import numpy as np import matplotlib.pyplot as plt", "the original image: image = cv2.imread('cat-face.png') # We try different kernels # Identify", "5, -1], [0, -1, 0]]) kernel_unsharp_masking = -1 / 256 * np.array([[1, 4,", "color_img[:, :, ::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create", "4, 1]]) # Try different kernels for smoothing: kernel_blur = 1 / 9", "x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\", 12) #", "different kernels for edge detection: sobel_x_kernel = np.array([[1, 0, -1], [2, 0, -2],", "\"\"\" Comparing different kernels using cv2.filter2D() \"\"\" # Import required packages: import cv2", "show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image,", "kernel for embossing: kernel_emboss = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1,", "\"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur", "sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image,", "1, 2]]) # Try different kernels for edge detection: sobel_x_kernel = np.array([[1, 0,", "0], [-1, 1, 1], [0, 1, 2]]) # Try different kernels for edge", "cv2.filter2D() \"\"\" # Import required packages: import cv2 import numpy as np import", "4, 6, 4, 1], [4, 16, 24, 16, 4], [6, 24, -476, 24,", "image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image, \"sobel", "0, 0], [-1, -2, -1]]) outline_kernel = np.array([[-1, -1, -1], [-1, 8, -1],", "\"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image,", "show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection", "3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7)", "blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x image\", 10) show_with_matplotlib(sobel_y_image,", "-1]]) sobel_y_kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) outline_kernel", "-1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image", "# Try a kernel for embossing: kernel_emboss = np.array([[-2, -1, 0], [-1, 1,", "= cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1,", "capabilities\"\"\" # Convert BGR image to RGB img_RGB = color_img[:, :, ::-1] ax", "pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions of the figure and set", "1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])", "0], [0, 1, 0], [0, 0, 0]]) # Try different kernels for edge", "= plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions of the", "cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking)", "-4, 1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8, -1],", "packages: import cv2 import numpy as np import matplotlib.pyplot as plt def show_with_matplotlib(color_img,", "7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\", 9) show_with_matplotlib(sobel_x_image, \"sobel x", "-1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image", "[2, 0, -2], [1, 0, -1]]) sobel_y_kernel = np.array([[1, 2, 1], [0, 0,", "-2], [1, 0, -1]]) sobel_y_kernel = np.array([[1, 2, 1], [0, 0, 0], [-1,", "ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions of", "* np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) gaussian_blur = 1", "blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image,", "1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image,", "kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss) sobel_x_image =", "kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2 =", "16, 4], [6, 24, -476, 24, 6], [4, 16, 24, 16, 4], [1,", "kernels for edge detection: sobel_x_kernel = np.array([[1, 0, -1], [2, 0, -2], [1,", "1]]) gaussian_blur = 1 / 16 * np.array([[1, 2, 1], [2, 4, 2],", "= -1 / 256 * np.array([[1, 4, 6, 4, 1], [4, 16, 24,", "[1, 0, -1]]) sobel_y_kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2,", "unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image,", "6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]) # Try", "plt.title(title) plt.axis('off') # Create the dimensions of the figure and set title: plt.figure(figsize=(12,", "detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2", "-1], [0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1,", "kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) # Try", "-1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image", "\"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3)", "-1 / 256 * np.array([[1, 4, 6, 4, 1], [4, 16, 24, 16,", "= color_img[:, :, ::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') #", "/ 256 * np.array([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4],", "[-1, 8, -1], [-1, -1, -1]]) # Try different kernels for sharpening: kernel_sharpen", "= cv2.imread('cat-face.png') # We try different kernels # Identify kernel (does not modify", "# Identify kernel (does not modify the image) kernel_identity = np.array([[0, 0, 0],", "-1, -1]]) # Try different kernels for sharpening: kernel_sharpen = np.array([[0, -1, 0],", "an image using matplotlib capabilities\"\"\" # Convert BGR image to RGB img_RGB =", "2) show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\",", "different kernels for edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0, 0],", "6, 4, 1], [4, 16, 24, 16, 4], [6, 24, -476, 24, 6],", "cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) # Show all the images:", "cv2.filter2D(image, -1, kernel_emboss) sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel)", "different kernels for sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1, 5, -1], [0,", "all the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2,", "0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])", "Try different kernels for smoothing: kernel_blur = 1 / 9 * np.array([[1, 1,", "Identify kernel (does not modify the image) kernel_identity = np.array([[0, 0, 0], [0,", "= cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1,", "24, 16, 4], [1, 4, 6, 4, 1]]) # Try different kernels for", "as np import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image", "[1, 1, 1]]) gaussian_blur = 1 / 16 * np.array([[1, 2, 1], [2,", "np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) sobel_y_kernel = np.array([[1, 2,", "cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original image: image = cv2.imread('cat-face.png') # We", "[4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]) # Try different", "sobel_x_kernel) sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel) outline_image = cv2.filter2D(image, -1, outline_kernel) # Show", "show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection 2\",", "# Convert BGR image to RGB img_RGB = color_img[:, :, ::-1] ax =", "5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\",", "show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8)", "1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3", "show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian", "Try different kernels for edge detection: sobel_x_kernel = np.array([[1, 0, -1], [2, 0,", "-1, -1]]) # Apply all the kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1", "[0, 1, 0], [0, 0, 0]]) # Try different kernels for edge detection:", "title, pos): \"\"\"Shows an image using matplotlib capabilities\"\"\" # Convert BGR image to", "1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8, -1], [-1,", "np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1,", "0, -1]]) sobel_y_kernel = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])", "6)) plt.suptitle(\"Comparing different kernels using cv2.filter2D()\", fontsize=14, fontweight='bold') # Load the original image:", "= np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) outline_kernel = np.array([[-1,", "\"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image,", "1], [4, 16, 24, 16, 4], [6, 24, -476, 24, 6], [4, 16,", "[1, 1, 1], [1, 1, 1]]) gaussian_blur = 1 / 16 * np.array([[1,", "# Create the dimensions of the figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing", "= np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) sobel_y_kernel = np.array([[1,", "the kernels: original_image = cv2.filter2D(image, -1, kernel_identity) edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1) edge_image_2", "cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen)", "::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions", "0], [1, -4, 1], [0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1,", "cv2.filter2D(image, -1, kernel_unsharp_masking) blur_image = cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur)", "= cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image = cv2.filter2D(image, -1,", "kernel_edge_detection_1) edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image =", "1]]) # Try a kernel for embossing: kernel_emboss = np.array([[-2, -1, 0], [-1,", "[0, 1, 0]]) kernel_edge_detection_3 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1,", "# Try different kernels for edge detection: sobel_x_kernel = np.array([[1, 0, -1], [2,", "for smoothing: kernel_blur = 1 / 9 * np.array([[1, 1, 1], [1, 1,", "16, 24, 16, 4], [1, 4, 6, 4, 1]]) # Try different kernels", "def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using matplotlib capabilities\"\"\" # Convert BGR", "np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) gaussian_blur = 1 /", "= np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0,", "4, pos) plt.imshow(img_RGB) plt.title(title) plt.axis('off') # Create the dimensions of the figure and", "edge detection: kernel_edge_detection_1 = np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]])", "BGR image to RGB img_RGB = color_img[:, :, ::-1] ax = plt.subplot(3, 4,", "-1], [-1, -1, -1]]) # Apply all the kernels: original_image = cv2.filter2D(image, -1,", "kernel (does not modify the image) kernel_identity = np.array([[0, 0, 0], [0, 1,", "outline_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) # Apply", "-1], [0, -1, 0]]) kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6,", "to RGB img_RGB = color_img[:, :, ::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB)", "matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using matplotlib capabilities\"\"\"", "-1, kernel_edge_detection_2) edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3) sharpen_image = cv2.filter2D(image, -1, kernel_sharpen) unsharp_masking_image", "dimensions of the figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different kernels using", "import cv2 import numpy as np import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title,", "# Load the original image: image = cv2.imread('cat-face.png') # We try different kernels", "cv2.filter2D(image, -1, outline_kernel) # Show all the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1,", "-1]]) outline_kernel = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) #", "24, 6], [4, 16, 24, 16, 4], [1, 4, 6, 4, 1]]) #", "smoothing: kernel_blur = 1 / 9 * np.array([[1, 1, 1], [1, 1, 1],", "plt.axis('off') # Create the dimensions of the figure and set title: plt.figure(figsize=(12, 6))", "256 * np.array([[1, 4, 6, 4, 1], [4, 16, 24, 16, 4], [6,", "images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge detection", "[-1, 8, -1], [-1, -1, -1]]) # Apply all the kernels: original_image =", "edge detection: sobel_x_kernel = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]])", "10) show_with_matplotlib(sobel_y_image, \"sobel y image\", 11) show_with_matplotlib(outline_image, \"outline image\", 12) # Show the", "= np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]]) # Try different", "np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) # Try different kernels", "[-1, -1, -1]]) # Try different kernels for sharpening: kernel_sharpen = np.array([[0, -1,", "Try different kernels for sharpening: kernel_sharpen = np.array([[0, -1, 0], [-1, 5, -1],", "kernels for smoothing: kernel_blur = 1 / 9 * np.array([[1, 1, 1], [1,", "try different kernels # Identify kernel (does not modify the image) kernel_identity =", "= np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) # Try different", "masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss", "cv2.imread('cat-face.png') # We try different kernels # Identify kernel (does not modify the", "9 * np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) gaussian_blur =", "2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\",", "Try a kernel for embossing: kernel_emboss = np.array([[-2, -1, 0], [-1, 1, 1],", "RGB img_RGB = color_img[:, :, ::-1] ax = plt.subplot(3, 4, pos) plt.imshow(img_RGB) plt.title(title)", "detection: sobel_x_kernel = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) sobel_y_kernel", "24, 16, 4], [6, 24, -476, 24, 6], [4, 16, 24, 16, 4],", "using matplotlib capabilities\"\"\" # Convert BGR image to RGB img_RGB = color_img[:, :,", "cv2.filter2D(image, -1, kernel_blur) gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur) emboss_image = cv2.filter2D(image, -1, kernel_emboss)", "= np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]]) kernel_unsharp_masking = -1", "sobel_x_kernel = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) sobel_y_kernel =", "plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using matplotlib capabilities\"\"\" # Convert", "-1], [-1, 8, -1], [-1, -1, -1]]) # Apply all the kernels: original_image", "6, 4, 1]]) # Try different kernels for smoothing: kernel_blur = 1 /", "import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows an image using matplotlib", "0, 0], [0, 1, 0], [0, 0, 0]]) # Try different kernels for", "-1, 0], [-1, 1, 1], [0, 1, 2]]) # Try different kernels for", "1], [2, 4, 2], [1, 2, 1]]) # Try a kernel for embossing:", "= np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]) # Apply all", "[2, 4, 2], [1, 2, 1]]) # Try a kernel for embossing: kernel_emboss", "the images: show_with_matplotlib(original_image, \"identity kernel\", 1) show_with_matplotlib(edge_image_1, \"edge detection 1\", 2) show_with_matplotlib(edge_image_2, \"edge", "[0, 0, 0], [-1, 0, 1]]) kernel_edge_detection_2 = np.array([[0, 1, 0], [1, -4,", "Create the dimensions of the figure and set title: plt.figure(figsize=(12, 6)) plt.suptitle(\"Comparing different", "* np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) # Try a", "6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image, \"gaussian blur image\", 8) show_with_matplotlib(emboss_image, \"emboss image\",", "(does not modify the image) kernel_identity = np.array([[0, 0, 0], [0, 1, 0],", "4], [1, 4, 6, 4, 1]]) # Try different kernels for smoothing: kernel_blur", "= cv2.filter2D(image, -1, outline_kernel) # Show all the images: show_with_matplotlib(original_image, \"identity kernel\", 1)", "1, 1]]) gaussian_blur = 1 / 16 * np.array([[1, 2, 1], [2, 4,", "import numpy as np import matplotlib.pyplot as plt def show_with_matplotlib(color_img, title, pos): \"\"\"Shows", "4) show_with_matplotlib(sharpen_image, \"sharpen\", 5) show_with_matplotlib(unsharp_masking_image, \"unsharp masking\", 6) show_with_matplotlib(blur_image, \"blur image\", 7) show_with_matplotlib(gaussian_blur_image,", "0]]) kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6, 4, 1], [4,", "show_with_matplotlib(edge_image_2, \"edge detection 2\", 3) show_with_matplotlib(edge_image_3, \"edge detection 3\", 4) show_with_matplotlib(sharpen_image, \"sharpen\", 5)" ]
[ "what transaction scope to use to start this MR job. If True, there", "from it. If False, MR will create an independent transaction to start the", "start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job. Args: job_config: an instance of map_job.MapJobConfig.", "db from mapreduce import util # pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None,", "If True, there has to be an already opened cross-group transaction scope. MR", "map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR job.", "when in_xg_transaction is True but no transaction scope is detected. \"\"\" if in_xg_transaction", "this MR job. If True, there has to be an already opened cross-group", "group from it. If False, MR will create an independent transaction to start", "circular dependency. # pylint: disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name, mapper_spec=job_config._get_mapper_spec(),", "to start the job regardless of any existing transaction scopes. Returns: the id", "raise ValueError(\"Expects an opened xg transaction to start mapreduce.\") # Break circular dependency.", "detected. \"\"\" if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction", "for controlling Map job execution.\"\"\" from google.appengine.ext import db from mapreduce import util", "job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to", "is detected. \"\"\" if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an opened xg", "in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction to start mapreduce.\")", "def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job. Args: job_config: an instance of", "to use to start this MR job. If True, there has to be", "ValueError(\"Expects an opened xg transaction to start mapreduce.\") # Break circular dependency. #", "will create an independent transaction to start the job regardless of any existing", "create an independent transaction to start the job regardless of any existing transaction", "to start this MR job. If True, there has to be an already", "# pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job. Args: job_config:", "# pylint: disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name, mapper_spec=job_config._get_mapper_spec(), mapreduce_params=job_config._get_mr_params(), queue_name=job_config.queue_name,", "import db from mapreduce import util # pylint: disable=g-bad-name # pylint: disable=protected-access def", "the job regardless of any existing transaction scopes. Returns: the id of this", "it. If False, MR will create an independent transaction to start the job", "will use one entity group from it. If False, MR will create an", "from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name, mapper_spec=job_config._get_mapper_spec(), mapreduce_params=job_config._get_mr_params(), queue_name=job_config.queue_name, hooks_class_name=util._obj_to_path(job_config._hooks_cls), _app=job_config._app, in_xg_transaction=in_xg_transaction)", "pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job. Args: job_config: an", "job. If True, there has to be an already opened cross-group transaction scope.", "in_xg_transaction=False): \"\"\"Start a new map job. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction:", "pylint: disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name, mapper_spec=job_config._get_mapper_spec(), mapreduce_params=job_config._get_mr_params(), queue_name=job_config.queue_name, hooks_class_name=util._obj_to_path(job_config._hooks_cls),", "use to start this MR job. If True, there has to be an", "already opened cross-group transaction scope. MR will use one entity group from it.", "google.appengine.ext import db from mapreduce import util # pylint: disable=g-bad-name # pylint: disable=protected-access", "If False, MR will create an independent transaction to start the job regardless", "import util # pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a", "start mapreduce.\") # Break circular dependency. # pylint: disable=g-import-not-at-top from mapreduce import handlers", "# pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map", "transaction scope to use to start this MR job. If True, there has", "instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this", "this map job. Raises: ValueError: when in_xg_transaction is True but no transaction scope", "Returns: the id of this map job. Raises: ValueError: when in_xg_transaction is True", "map job. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope", "in_xg_transaction is True but no transaction scope is detected. \"\"\" if in_xg_transaction and", "disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job. Args:", "db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction to start mapreduce.\") # Break circular", "disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job. Args: job_config: an instance", "of this map job. Raises: ValueError: when in_xg_transaction is True but no transaction", "Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use", "Break circular dependency. # pylint: disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name,", "<filename>python/src/mapreduce/api/map_job/map_job_control.py #!/usr/bin/env python \"\"\"User API for controlling Map job execution.\"\"\" from google.appengine.ext import", "an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start", "\"\"\"User API for controlling Map job execution.\"\"\" from google.appengine.ext import db from mapreduce", "MR job. If True, there has to be an already opened cross-group transaction", "controls what transaction scope to use to start this MR job. If True,", "if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction to start", "is True but no transaction scope is detected. \"\"\" if in_xg_transaction and not", "scope to use to start this MR job. If True, there has to", "an already opened cross-group transaction scope. MR will use one entity group from", "id of this map job. Raises: ValueError: when in_xg_transaction is True but no", "transaction scope. MR will use one entity group from it. If False, MR", "new map job. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction", "there has to be an already opened cross-group transaction scope. MR will use", "opened xg transaction to start mapreduce.\") # Break circular dependency. # pylint: disable=g-import-not-at-top", "dependency. # pylint: disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name, mapper_spec=job_config._get_mapper_spec(), mapreduce_params=job_config._get_mr_params(),", "start the job regardless of any existing transaction scopes. Returns: the id of", "of any existing transaction scopes. Returns: the id of this map job. Raises:", "MR will use one entity group from it. If False, MR will create", "opened cross-group transaction scope. MR will use one entity group from it. If", "#!/usr/bin/env python \"\"\"User API for controlling Map job execution.\"\"\" from google.appengine.ext import db", "an independent transaction to start the job regardless of any existing transaction scopes.", "transaction to start mapreduce.\") # Break circular dependency. # pylint: disable=g-import-not-at-top from mapreduce", "an opened xg transaction to start mapreduce.\") # Break circular dependency. # pylint:", "job. Raises: ValueError: when in_xg_transaction is True but no transaction scope is detected.", "be an already opened cross-group transaction scope. MR will use one entity group", "API for controlling Map job execution.\"\"\" from google.appengine.ext import db from mapreduce import", "not db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction to start mapreduce.\") # Break", "disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map( name=job_config.job_name, mapper_spec=job_config._get_mapper_spec(), mapreduce_params=job_config._get_mr_params(), queue_name=job_config.queue_name, hooks_class_name=util._obj_to_path(job_config._hooks_cls), _app=job_config._app,", "and not db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction to start mapreduce.\") #", "xg transaction to start mapreduce.\") # Break circular dependency. # pylint: disable=g-import-not-at-top from", "pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new map job.", "has to be an already opened cross-group transaction scope. MR will use one", "cross-group transaction scope. MR will use one entity group from it. If False,", "entity group from it. If False, MR will create an independent transaction to", "of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to use to start this MR", "# Break circular dependency. # pylint: disable=g-import-not-at-top from mapreduce import handlers return handlers.StartJobHandler._start_map(", "job execution.\"\"\" from google.appengine.ext import db from mapreduce import util # pylint: disable=g-bad-name", "controlling Map job execution.\"\"\" from google.appengine.ext import db from mapreduce import util #", "mapreduce import util # pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start", "True but no transaction scope is detected. \"\"\" if in_xg_transaction and not db.is_in_transaction():", "MR will create an independent transaction to start the job regardless of any", "scope. MR will use one entity group from it. If False, MR will", "use one entity group from it. If False, MR will create an independent", "in_xg_transaction: controls what transaction scope to use to start this MR job. If", "from google.appengine.ext import db from mapreduce import util # pylint: disable=g-bad-name # pylint:", "mapreduce.\") # Break circular dependency. # pylint: disable=g-import-not-at-top from mapreduce import handlers return", "but no transaction scope is detected. \"\"\" if in_xg_transaction and not db.is_in_transaction(): raise", "util # pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False): \"\"\"Start a new", "a new map job. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what", "\"\"\" if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an opened xg transaction to", "no transaction scope is detected. \"\"\" if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects", "job. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls what transaction scope to", "\"\"\"Start a new map job. Args: job_config: an instance of map_job.MapJobConfig. in_xg_transaction: controls", "transaction scopes. Returns: the id of this map job. Raises: ValueError: when in_xg_transaction", "False, MR will create an independent transaction to start the job regardless of", "to start mapreduce.\") # Break circular dependency. # pylint: disable=g-import-not-at-top from mapreduce import", "scopes. Returns: the id of this map job. Raises: ValueError: when in_xg_transaction is", "independent transaction to start the job regardless of any existing transaction scopes. Returns:", "transaction scope is detected. \"\"\" if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an", "Map job execution.\"\"\" from google.appengine.ext import db from mapreduce import util # pylint:", "Raises: ValueError: when in_xg_transaction is True but no transaction scope is detected. \"\"\"", "ValueError: when in_xg_transaction is True but no transaction scope is detected. \"\"\" if", "the id of this map job. Raises: ValueError: when in_xg_transaction is True but", "to be an already opened cross-group transaction scope. MR will use one entity", "from mapreduce import util # pylint: disable=g-bad-name # pylint: disable=protected-access def start(job_config=None, in_xg_transaction=False):", "job regardless of any existing transaction scopes. Returns: the id of this map", "transaction to start the job regardless of any existing transaction scopes. Returns: the", "python \"\"\"User API for controlling Map job execution.\"\"\" from google.appengine.ext import db from", "map job. Raises: ValueError: when in_xg_transaction is True but no transaction scope is", "existing transaction scopes. Returns: the id of this map job. Raises: ValueError: when", "regardless of any existing transaction scopes. Returns: the id of this map job.", "one entity group from it. If False, MR will create an independent transaction", "start this MR job. If True, there has to be an already opened", "True, there has to be an already opened cross-group transaction scope. MR will", "scope is detected. \"\"\" if in_xg_transaction and not db.is_in_transaction(): raise ValueError(\"Expects an opened", "execution.\"\"\" from google.appengine.ext import db from mapreduce import util # pylint: disable=g-bad-name #", "any existing transaction scopes. Returns: the id of this map job. Raises: ValueError:" ]
[ "<reponame>andrewp-as-is/django-admin-commands.py from django.apps import AppConfig class Config(AppConfig): name = 'django_command_admin' verbose_name = 'command-admin'" ]
[ "import Grid from mc2d.core.inventory import Inventory from mc2d.core.player import Player from mc2d.core.world import", "Grid from mc2d.core.inventory import Inventory from mc2d.core.player import Player from mc2d.core.world import World", "import Player from mc2d.core.world import World __all__ = ( 'Grid', 'Inventory', 'MapGenerator', 'Player',", "Player from mc2d.core.world import World __all__ = ( 'Grid', 'Inventory', 'MapGenerator', 'Player', 'World'", "import Inventory from mc2d.core.player import Player from mc2d.core.world import World __all__ = (", "from mc2d.core.inventory import Inventory from mc2d.core.player import Player from mc2d.core.world import World __all__", "mc2d.core.generators import MapGenerator from mc2d.core.grid import Grid from mc2d.core.inventory import Inventory from mc2d.core.player", "mc2d.core.grid import Grid from mc2d.core.inventory import Inventory from mc2d.core.player import Player from mc2d.core.world", "from mc2d.core.player import Player from mc2d.core.world import World __all__ = ( 'Grid', 'Inventory',", "from mc2d.core.grid import Grid from mc2d.core.inventory import Inventory from mc2d.core.player import Player from", "mc2d.core.player import Player from mc2d.core.world import World __all__ = ( 'Grid', 'Inventory', 'MapGenerator',", "from mc2d.core.world import World __all__ = ( 'Grid', 'Inventory', 'MapGenerator', 'Player', 'World' )", "<reponame>Den4200/mc2d from mc2d.core.generators import MapGenerator from mc2d.core.grid import Grid from mc2d.core.inventory import Inventory", "from mc2d.core.generators import MapGenerator from mc2d.core.grid import Grid from mc2d.core.inventory import Inventory from", "Inventory from mc2d.core.player import Player from mc2d.core.world import World __all__ = ( 'Grid',", "mc2d.core.inventory import Inventory from mc2d.core.player import Player from mc2d.core.world import World __all__ =", "MapGenerator from mc2d.core.grid import Grid from mc2d.core.inventory import Inventory from mc2d.core.player import Player", "import MapGenerator from mc2d.core.grid import Grid from mc2d.core.inventory import Inventory from mc2d.core.player import" ]
[ "and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not", "to see the posts up for election`') return post = matching_posts[0] author =", "post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}')", "post you are running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST>", "a private message to me.') return True @stand.error async def stand_error(self, context, error):", "author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises your name to be {name}')", "are running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return", "you\\'re not registered yet, ' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has", "from standing for {post}') await context.send(f'You have stood down from running for {post}')", "now standing for the position of {post}. If you no longer wish to", "return email = input[-1] post = ' '.join(input[:-1]) if not post: await context.send('Must", "return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change your", "' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not in email: await context.send('Must supply", "' f'standing for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author)", "def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error async def", "'Error' if author in helpers.registered_members: if [i for i in helpers.standing[post] if i", "can\\'t make it to the actual election call, you must get in touch", "'.join(name) if not name: await context.send(f'Must supply the name you are wanting to", "has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in helpers.standing:", "for the position of {post}. If you no longer wish to stand, you", "context.author.id members = helpers.get_members() output_str = 'Error' if author in helpers.registered_members: if [i", "# Error handling # async def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await", "stand for {post} because they are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand", "context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like you weren\\'t standing for", "import commands from cogs import helpers class Running(commands.Cog): # Initialisation # def __init__(self,", "try again in a private message to me.') return True @stand.error async def", "f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your name') return async with helpers.current_live_post_lock.reader_lock:", "looks like you, {members[helpers.registered_members[author]]} are already ' f'standing for the position of: {post}')", "<NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context, *name): name = ' '.join(name) if", "email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not in email: await", "<STUDENT NUMBER>` before you can update your name') return async with helpers.current_live_post_lock.reader_lock: if", "like you\\'re not registered yet, you must first register using ' f'`{helpers.PREFIX}register <STUDENT", "you cannot stand for this post') return author = context.author.id members = helpers.get_members()", "not matching_posts: await context.send('Looks like that post isn\\'t available for this election, '", "context, *post): post = ' '.join(post) if not post: await context.send(f'Must supply the", "if [i for i in helpers.standing[post] if i == helpers.registered_members[author]]: output_str = (f'It", "can\\'t change your name whilst a vote is ongoing, ' 'please wait until", "are now standing for the position of {post}. If you no longer wish", "usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name = name.strip('\\'') author = context.author.id if", "= (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing", "await context.send(f'I\\'m afraid voting for {post} has already begun, you cannot stand for", "DM only, please try again in a private message to me.') return True", "the post you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts =", "like you weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]]", "return if '@' not in email: await context.send('Must supply the post you are", "if not name: await context.send(f'Must supply the name you are wanting to change", "<EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context, *input): if not", "async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change your name", "wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name = name.strip('\\'')", "your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}') #", "@commands.command(name='standdown', help=f'Stand down from running for a post - DM Only. Usage: {helpers.PREFIX}standdown", "post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only()", "= ' '.join(input[:-1]) if not post: await context.send('Must supply the post you are", "== helpers.registered_members[author]]: output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are already ' f'standing", "for running, and managing your run, in elections # import traceback from pyrankvote", "= (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises your name", "context.send('This command is DM only, please try again in a private message to", "bot # Commands # @commands.command(name='stand', help=f'Stand for a post - DM Only. Usage:", "f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n' 'If you can\\'t make it to", "failed to stand for {post} because they are not registered') helpers.save_standing() await context.send(output_str)", "of {post}. If you no longer wish to stand, you ' f'can send", "that post isn\\'t available for this election, ' f'use `{helpers.PREFIX}posts` to see the", "posts up for election`') return post = matching_posts[0] author = context.author.id if helpers.registered_members[author]", "elections # import traceback from pyrankvote import Candidate from discord.ext import commands from", "like you, {members[helpers.registered_members[author]]} are already ' f'standing for the position of: {post}') else:", "you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare a 2", "post = ' '.join(input[:-1]) if not post: await context.send('Must supply the post you", "you, {members[helpers.registered_members[author]]} are already ' f'standing for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]]", "not registered yet, you must first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before", "output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing for the position of", "- DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context, *post):", "{name}') # Error handling # async def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly):", "post in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names()", "if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM only, please try again in", "{members[helpers.registered_members[author]]}, ' f'you are now standing for the position of {post}. If you", "context.send('Looks like that post isn\\'t available for this election, ' f'use `{helpers.PREFIX}posts` to", "' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1] post = ' '.join(input[:-1]) if", "see the posts up for election') return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock:", "to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}') # Error handling", "= context.author.id members = helpers.get_members() output_str = 'Error' if author in helpers.registered_members: if", "help=f'Stand down from running for a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>',", "' f'use `{helpers.PREFIX}posts` to see the posts up for election') return post =", "helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises your", "election`') return post = matching_posts[0] author = context.author.id if helpers.registered_members[author] not in helpers.standing[post]:", "def standdown(self, context, *post): post = ' '.join(post) if not post: await context.send(f'Must", "stand(self, context, *input): if not input: await context.send('Must supply the post you are", "the bot - DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def", "await context.send('I\\'m afraid you can\\'t change your name whilst a vote is ongoing,", "isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM only, please try again in a", "from running for a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only()", "using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for {post} because they", "running for {post}') @commands.command(name='changename', help='Change your name as used by the bot -", "the posts up for election') return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if", "email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing for the", "f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1] post = ' '.join(input[:-1]) if not", "if author in helpers.registered_members: if [i for i in helpers.standing[post] if i ==", "= matching_posts[0] author = context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like", "NUMBER>` before you can update your name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post:", "usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context, *input): if not input: await", "{helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n' 'If you can\\'t make it", "arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks", "helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id})", "with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change your name whilst", "to the actual election call, you must get in touch with the '", "await context.send(f'You have stood down from running for {post}') @commands.command(name='changename', help='Change your name", "address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1] post = ' '.join(input[:-1])", "standing for {post}') await context.send(f'You have stood down from running for {post}') @commands.command(name='changename',", "not registered yet, ' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed", "f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not in email: await context.send('Must supply the", "helpers class Running(commands.Cog): # Initialisation # def __init__(self, bot): self.bot = bot #", "= helpers.get_members() output_str = 'Error' if author in helpers.registered_members: if [i for i", "Running(commands.Cog): # Initialisation # def __init__(self, bot): self.bot = bot # Commands #", "async def changename(self, context, *name): name = ' '.join(name) if not name: await", "isn\\'t available for this election, ' f'use `{helpers.PREFIX}posts` to see the posts up", "if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change your name whilst a vote", "to {name}') # Error handling # async def dm_error(self, context, error): if isinstance(error,", "wait until the vote has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name", "this election, ' f'use `{helpers.PREFIX}posts` to see the posts up for election`') return", "from cogs import helpers class Running(commands.Cog): # Initialisation # def __init__(self, bot): self.bot", "await context.send(f'The bot now recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed", "post) else: output_str = ('Looks like you\\'re not registered yet, ' f'please register", "commands from cogs import helpers class Running(commands.Cog): # Initialisation # def __init__(self, bot):", "a 2 minute speech to be given in the election call.\\n' f'If you", "looks like you\\'re not registered yet, you must first register using ' f'`{helpers.PREFIX}register", "are standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if not", "for {post}') await context.send(f'You have stood down from running for {post}') @commands.command(name='changename', help='Change", "bot - DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self,", "name.startswith('\\''): name = name.strip('\\'') author = context.author.id if author not in helpers.registered_members: await", "vote is ongoing, ' 'please wait until the vote has finished') return author_id", "you\\'ll need to prepare a 2 minute speech to be given in the", "author not in helpers.registered_members: await context.send('It looks like you\\'re not registered yet, you", "voting for {post} has already begun, you cannot stand for this post') return", "post') return author = context.author.id members = helpers.get_members() output_str = 'Error' if author", "post = matching_posts[0] author = context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks", "first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your name')", "address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if not matching_posts: await", "to prepare a 2 minute speech to be given in the election call.\\n'", "# def __init__(self, bot): self.bot = bot # Commands # @commands.command(name='stand', help=f'Stand for", "{name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}') # Error handling # async", "{post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you", "valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not in email:", "context.send('I\\'m afraid you can\\'t change your name whilst a vote is ongoing, '", "usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like", "helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like you\\'re not registered yet, ' f'please", "f'you are now standing for the position of {post}. If you no longer", "@stand.error async def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error", "questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n'", "whilst a vote is ongoing, ' 'please wait until the vote has finished')", "for {post}') @commands.command(name='changename', help='Change your name as used by the bot - DM", "DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context, *post): post", "has already begun, you cannot stand for this post') return author = context.author.id", "{post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like you\\'re not registered yet, '", "async def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM", "valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1] post =", "`{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for {post} because they are", "' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for", "context.send('Must supply the post you are running for and a valid email address,", "post: await context.send('Must supply the post you are running for and a valid", "<EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context, *input): if not input: await context.send('Must", "you\\'re not registered yet, you must first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>`", "helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises", "*post): post = ' '.join(post) if not post: await context.send(f'Must supply the post", "for election') return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post", "down from running for {post}') @commands.command(name='changename', help='Change your name as used by the", "if i == helpers.registered_members[author]]: output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are already", "out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str", "helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like", "from discord.ext import commands from cogs import helpers class Running(commands.Cog): # Initialisation #", "' '.join(post) if not post: await context.send(f'Must supply the post you are standing", "to see the posts up for election') return post = matching_posts[0] async with", "in touch with the ' 'secretary ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]})", "standing for the position of {post}. If you no longer wish to stand,", "traceback from pyrankvote import Candidate from discord.ext import commands from cogs import helpers", "helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change your name whilst a vote is", "@commands.dm_only() async def stand(self, context, *input): if not input: await context.send('Must supply the", "if '@' not in email: await context.send('Must supply the post you are running", "context, *name): name = ' '.join(name) if not name: await context.send(f'Must supply the", "helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises your name to be", "in helpers.registered_members: if [i for i in helpers.standing[post] if i == helpers.registered_members[author]]: output_str", "error) @changename.error async def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error)", "A Cog for running, and managing your run, in elections # import traceback", "be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}') # Error handling #", "for {post} because they are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down", "registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for a post -", "context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error async def changename_error(self, context,", "if not post: await context.send(f'Must supply the post you are standing down from,", "f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for {post}", "' '.join(input[:-1]) if not post: await context.send('Must supply the post you are running", "supply the name you are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`') return", "author = context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like you weren\\'t", "a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self,", "('Looks like you\\'re not registered yet, ' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`')", "and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1]", "touch with the ' 'secretary ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is", "registered yet, ' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to", "context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for a post - DM Only. Usage:", "<EMAIL>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like that post", "to stand for {post} because they are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown',", "in helpers.registered_members: await context.send('It looks like you\\'re not registered yet, you must first", "del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}') await context.send(f'You", "<STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for {post} because they are not", "if not post: await context.send('Must supply the post you are running for and", "(Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing for", "has failed to stand for {post} because they are not registered') helpers.save_standing() await", "or someone else on the committee.\\n' 'If you can\\'t make it to the", "has changed their name to {name}') # Error handling # async def dm_error(self,", "me.') return True @stand.error async def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await", "the election call.\\n' f'If you have any questions please contact the secretary {helpers.SECRETARY_NAME}'", "output_str = ('Looks like you\\'re not registered yet, ' f'please register using `{helpers.PREFIX}register", "Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context, *post): post = '", "call, you must get in touch with the ' 'secretary ASAP to sort", "stood down from standing for {post}') await context.send(f'You have stood down from running", "stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}') await", "*name): name = ' '.join(name) if not name: await context.send(f'Must supply the name", "Candidate from discord.ext import commands from cogs import helpers class Running(commands.Cog): # Initialisation", "post - DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context,", "yet, you must first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can", "Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context, *name): name", "if not input: await context.send('Must supply the post you are running for and", "helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing for", "helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post} has already begun, you cannot stand", "await context.send(f'Must supply the post you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`')", "email = input[-1] post = ' '.join(input[:-1]) if not post: await context.send('Must supply", "2 minute speech to be given in the election call.\\n' f'If you have", "helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post}", "' 'secretary ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for", "for election`') return post = matching_posts[0] author = context.author.id if helpers.registered_members[author] not in", "i in helpers.standing[post] if i == helpers.registered_members[author]]: output_str = (f'It looks like you,", "`{helpers.PREFIX}posts` to see the posts up for election`') return post = matching_posts[0] author", "name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change", "standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like you\\'re not registered", "you must first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update", "using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your name') return async", "please try again in a private message to me.') return True @stand.error async", "context, *input): if not input: await context.send('Must supply the post you are running", "DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def", "for {post} has already begun, you cannot stand for this post') return author", "f'standing for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str", "address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not in email: await context.send('Must", "available for this election, ' f'use `{helpers.PREFIX}posts` to see the posts up for", "helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now", "update your name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you", "standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error async def changename_error(self,", "await context.send('Looks like that post isn\\'t available for this election, ' f'use `{helpers.PREFIX}posts`", "helpers.preferred_names[author_id] = name for post in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] =", "supply the post you are running for and a valid email address, '", "this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down", "*input): if not input: await context.send('Must supply the post you are running for", "you are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name", "to me.') return True @stand.error async def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__)", "@commands.dm_only() async def standdown(self, context, *post): post = ' '.join(post) if not post:", "author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot", "minute speech to be given in the election call.\\n' f'If you have any", "have any questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on", "to stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare", "{post}') @commands.command(name='changename', help='Change your name as used by the bot - DM Only.", "await self.dm_error(context, error) @standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await", "<POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context, *input): if", "helpers.standing[post]: await context.send('Looks like you weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post,", "in the election call.\\n' f'If you have any questions please contact the secretary", "Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context, *post): post =", "it to the actual election call, you must get in touch with the", "Initialisation # def __init__(self, bot): self.bot = bot # Commands # @commands.command(name='stand', help=f'Stand", "error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error async def changename_error(self, context, error):", "__init__(self, bot): self.bot = bot # Commands # @commands.command(name='stand', help=f'Stand for a post", "f'If you have any questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone", "only, please try again in a private message to me.') return True @stand.error", "this post') return author = context.author.id members = helpers.get_members() output_str = 'Error' if", "you have any questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else", "return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing", "make it to the actual election call, you must get in touch with", "helpers.get_members() output_str = 'Error' if author in helpers.registered_members: if [i for i in", "helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}') await context.send(f'You have stood", "self.dm_error(context, error) @standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context,", "standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has", "if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post} has already begun,", "# Commands # @commands.command(name='stand', help=f'Stand for a post - DM Only. Usage: {helpers.PREFIX}stand", "bot): self.bot = bot # Commands # @commands.command(name='stand', help=f'Stand for a post -", "context.send(f'You have stood down from running for {post}') @commands.command(name='changename', help='Change your name as", "name whilst a vote is ongoing, ' 'please wait until the vote has", "' f'you are now standing for the position of {post}. If you no", "committee.\\n' 'If you can\\'t make it to the actual election call, you must", "for this election, ' f'use `{helpers.PREFIX}posts` to see the posts up for election')", "in elections # import traceback from pyrankvote import Candidate from discord.ext import commands", "(Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now recognises your name to", "supply the post you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts", "def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM only,", "async def stand(self, context, *input): if not input: await context.send('Must supply the post", "vote has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in", "context.send(f'Must supply the post you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return", "already ' f'standing for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email,", "on the committee.\\n' 'If you can\\'t make it to the actual election call,", "(f'It looks like you, {members[helpers.registered_members[author]]} are already ' f'standing for the position of:", "not post: await context.send(f'Must supply the post you are standing down from, usage:", "f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context, *name): name = '", "post isn\\'t available for this election, ' f'use `{helpers.PREFIX}posts` to see the posts", "context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM only, please try", "the posts up for election`') return post = matching_posts[0] author = context.author.id if", "not in helpers.registered_members: await context.send('It looks like you\\'re not registered yet, you must", "name = ' '.join(name) if not name: await context.send(f'Must supply the name you", "= matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m", "'If you can\\'t make it to the actual election call, you must get", "author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in helpers.standing: if author_id in", "input: await context.send('Must supply the post you are running for and a valid", "is DM only, please try again in a private message to me.') return", "are already ' f'standing for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]),", "await self.dm_error(context, error) @changename.error async def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await", "error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error async def standdown_error(self, context, error):", "helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}') # Error handling # async def", "f'use `{helpers.PREFIX}posts` to see the posts up for election') return post = matching_posts[0]", "in helpers.standing[post] if i == helpers.registered_members[author]]: output_str = (f'It looks like you, {members[helpers.registered_members[author]]}", "NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for {post} because they are not registered')", "helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for a post - DM", "return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in helpers.standing: if author_id", "post: await context.send(f'Must supply the post you are standing down from, usage: `{helpers.PREFIX}standdown", "not post: await context.send('Must supply the post you are running for and a", "helpers.registered_members[author]]: output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are already ' f'standing for", "helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id]", "author = context.author.id members = helpers.get_members() output_str = 'Error' if author in helpers.registered_members:", "if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The", "output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are already ' f'standing for the", "await context.send('This command is DM only, please try again in a private message", "stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error async def standdown_error(self,", "not name: await context.send(f'Must supply the name you are wanting to change to,", "# A Cog for running, and managing your run, in elections # import", "usage='<NAME>') @commands.dm_only() async def changename(self, context, *name): name = ' '.join(name) if not", "email: await context.send('Must supply the post you are running for and a valid", "handling # async def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command", "f'use `{helpers.PREFIX}posts` to see the posts up for election`') return post = matching_posts[0]", "email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if not matching_posts:", "are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for a", "registered yet, you must first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you", "up for election') return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if", "if name.startswith('\\''): name = name.strip('\\'') author = context.author.id if author not in helpers.registered_members:", "error, error.__traceback__) await self.dm_error(context, error) @changename.error async def changename_error(self, context, error): traceback.print_exception(type(error), error,", "begun, you cannot stand for this post') return author = context.author.id members =", "- DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context,", "and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post)", "{post}. If you no longer wish to stand, you ' f'can send `{helpers.PREFIX}standdown", "helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await", "down from running for a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>')", "context.send('Looks like you weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del", "call.\\n' f'If you have any questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or", "post you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post)", "# import traceback from pyrankvote import Candidate from discord.ext import commands from cogs", "@standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error", "if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like you weren\\'t standing for this", "the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations", "changename(self, context, *name): name = ' '.join(name) if not name: await context.send(f'Must supply", "' f'use `{helpers.PREFIX}posts` to see the posts up for election`') return post =", "post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]: await", "[i for i in helpers.standing[post] if i == helpers.registered_members[author]]: output_str = (f'It looks", "async def standdown(self, context, *post): post = ' '.join(post) if not post: await", "= name for post in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name),", "(f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing for the position of {post}. If", "matching_posts[0] author = context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like you", "name: await context.send(f'Must supply the name you are wanting to change to, usage:", "cannot stand for this post') return author = context.author.id members = helpers.get_members() output_str", "async def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) def setup(bot):", "and managing your run, in elections # import traceback from pyrankvote import Candidate", "running for a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async", "self.bot = bot # Commands # @commands.command(name='stand', help=f'Stand for a post - DM", "name you are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''):", "'please wait until the vote has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] =", "import traceback from pyrankvote import Candidate from discord.ext import commands from cogs import", "' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks", "election, ' f'use `{helpers.PREFIX}posts` to see the posts up for election`') return post", "running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email", "not in helpers.standing[post]: await context.send('Looks like you weren\\'t standing for this post') return", "now recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to", "need to prepare a 2 minute speech to be given in the election", "to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name = name.strip('\\'') author = context.author.id", "error, error.__traceback__) await self.dm_error(context, error) @standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error), error,", "before you can update your name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await", "traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error),", "= (f'It looks like you, {members[helpers.registered_members[author]]} are already ' f'standing for the position", "you are running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`')", "Error handling # async def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This", "the committee.\\n' 'If you can\\'t make it to the actual election call, you", "await context.send('It looks like you\\'re not registered yet, you must first register using", "' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare a 2 minute", "name for post in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1],", "= (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing for the position of {post}.", "error.__traceback__) await self.dm_error(context, error) @changename.error async def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__)", "is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like you\\'re", "for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@'", "= context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like you weren\\'t standing", "= 'Error' if author in helpers.registered_members: if [i for i in helpers.standing[post] if", "= ' '.join(name) if not name: await context.send(f'Must supply the name you are", "managing your run, in elections # import traceback from pyrankvote import Candidate from", "return True @stand.error async def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context,", "<POST> <EMAIL>`') return if '@' not in email: await context.send('Must supply the post", "def stand(self, context, *input): if not input: await context.send('Must supply the post you", "async def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error async", "await context.send('Must supply the post you are running for and a valid email", "' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context, *name): name =", "else: output_str = ('Looks like you\\'re not registered yet, ' f'please register using", "your name whilst a vote is ongoing, ' 'please wait until the vote", "stood down from running for {post}') @commands.command(name='changename', help='Change your name as used by", "class Running(commands.Cog): # Initialisation # def __init__(self, bot): self.bot = bot # Commands", "contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n' 'If you", "{post}`\\n\\n' 'Now you\\'ll need to prepare a 2 minute speech to be given", "i == helpers.registered_members[author]]: output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are already '", "<NAME>`') return if name.startswith('\\''): name = name.strip('\\'') author = context.author.id if author not", "help=f'Stand for a post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST>", "for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str =", "{members[helpers.registered_members[author]]} are already ' f'standing for the position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] =", "yet, ' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand", "register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your name') return", "ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context, *input): if not input:", "= ('Looks like you\\'re not registered yet, ' f'please register using `{helpers.PREFIX}register <STUDENT", "await context.send(f'Must supply the name you are wanting to change to, usage: `{helpers.PREFIX}changename", "election') return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post ==", "return post = matching_posts[0] author = context.author.id if helpers.registered_members[author] not in helpers.standing[post]: await", "a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if", "you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if", "{helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context, *post): post = ' '.join(post)", "def __init__(self, bot): self.bot = bot # Commands # @commands.command(name='stand', help=f'Stand for a", "helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}') await context.send(f'You have stood down", "name.strip('\\'') author = context.author.id if author not in helpers.registered_members: await context.send('It looks like", "<POST>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like that post", "<EMAIL>`') return email = input[-1] post = ' '.join(input[:-1]) if not post: await", "for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like you\\'re not registered yet,", "you weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing()", "pyrankvote import Candidate from discord.ext import commands from cogs import helpers class Running(commands.Cog):", "up for election`') return post = matching_posts[0] author = context.author.id if helpers.registered_members[author] not", "from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks", "your name as used by the bot - DM Only. ' f'Usage: {helpers.PREFIX}changename", "usage='<POST>') @commands.dm_only() async def standdown(self, context, *post): post = ' '.join(post) if not", "speech to be given in the election call.\\n' f'If you have any questions", "from running for {post}') @commands.command(name='changename', help='Change your name as used by the bot", "@commands.command(name='changename', help='Change your name as used by the bot - DM Only. '", "dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM only, please", "you no longer wish to stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now", "afraid voting for {post} has already begun, you cannot stand for this post')", "alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str =", "running, and managing your run, in elections # import traceback from pyrankvote import", "longer wish to stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need", "input[-1] post = ' '.join(input[:-1]) if not post: await context.send('Must supply the post", "no longer wish to stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll", "are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name =", "with the ' 'secretary ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now", "register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name} has failed to stand for {post} because", "If you no longer wish to stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n'", "import Candidate from discord.ext import commands from cogs import helpers class Running(commands.Cog): #", "by the bot - DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async", "for a post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL", "afraid you can\\'t change your name whilst a vote is ongoing, ' 'please", "context.send('It looks like you\\'re not registered yet, you must first register using '", "election call, you must get in touch with the ' 'secretary ASAP to", "'@' not in email: await context.send('Must supply the post you are running for", "in a private message to me.') return True @stand.error async def stand_error(self, context,", "stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare a", "{helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context, *input):", "<POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like that", "output_str = 'Error' if author in helpers.registered_members: if [i for i in helpers.standing[post]", "cogs import helpers class Running(commands.Cog): # Initialisation # def __init__(self, bot): self.bot =", "'.join(input[:-1]) if not post: await context.send('Must supply the post you are running for", "`{helpers.PREFIX}posts` to see the posts up for election') return post = matching_posts[0] async", "your name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t", "Commands # @commands.command(name='stand', help=f'Stand for a post - DM Only. Usage: {helpers.PREFIX}stand <POST>", "error) @standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error)", "your run, in elections # import traceback from pyrankvote import Candidate from discord.ext", "as used by the bot - DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>')", "def changename(self, context, *name): name = ' '.join(name) if not name: await context.send(f'Must", "post = ' '.join(post) if not post: await context.send(f'Must supply the post you", "to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post)", "# Initialisation # def __init__(self, bot): self.bot = bot # Commands # @commands.command(name='stand',", "standdown(self, context, *post): post = ' '.join(post) if not post: await context.send(f'Must supply", "down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if not matching_posts: await", "the post you are running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand", "if helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post} has", "return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like that post isn\\'t", "members = helpers.get_members() output_str = 'Error' if author in helpers.registered_members: if [i for", "given in the election call.\\n' f'If you have any questions please contact the", "a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1] post", "get in touch with the ' 'secretary ASAP to sort out alternative arrangements.')", "for this election, ' f'use `{helpers.PREFIX}posts` to see the posts up for election`')", "{helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context, *name): name = ' '.join(name)", "election, ' f'use `{helpers.PREFIX}posts` to see the posts up for election') return post", "name as used by the bot - DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>',", "def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error async def", "the position of {post}. If you no longer wish to stand, you '", "the ' 'secretary ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing", "def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) def setup(bot): bot.add_cog(Running(bot))", "return if name.startswith('\\''): name = name.strip('\\'') author = context.author.id if author not in", "not input: await context.send('Must supply the post you are running for and a", "discord.ext import commands from cogs import helpers class Running(commands.Cog): # Initialisation # def", "<POST>', usage='<POST>') @commands.dm_only() async def standdown(self, context, *post): post = ' '.join(post) if", "author = context.author.id if author not in helpers.registered_members: await context.send('It looks like you\\'re", "return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]:", "# @commands.command(name='stand', help=f'Stand for a post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL", "sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else:", "`{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name = name.strip('\\'') author = context.author.id if author", "they are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for", "author in helpers.registered_members: if [i for i in helpers.standing[post] if i == helpers.registered_members[author]]:", "changed their name to {name}') # Error handling # async def dm_error(self, context,", "can update your name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid", "error.__traceback__) await self.dm_error(context, error) @standdown.error async def standdown_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__)", "ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]],", "helpers.match_post(post) if not matching_posts: await context.send('Looks like that post isn\\'t available for this", "from pyrankvote import Candidate from discord.ext import commands from cogs import helpers class", "{post}') await context.send(f'You have stood down from running for {post}') @commands.command(name='changename', help='Change your", "private message to me.') return True @stand.error async def stand_error(self, context, error): traceback.print_exception(type(error),", "valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if not", "running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts", "has stood down from standing for {post}') await context.send(f'You have stood down from", "`{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like that", "helpers.standing[post] if i == helpers.registered_members[author]]: output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are", "send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare a 2 minute speech to", "stand for this post') return author = context.author.id members = helpers.get_members() output_str =", "author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are now standing for the position", "finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in helpers.standing: if", "posts up for election') return post = matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post:", "== helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post} has already begun, you cannot", "you can\\'t make it to the actual election call, you must get in", "post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post} has already begun, you", "error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is DM only, please try again", "'Now you\\'ll need to prepare a 2 minute speech to be given in", "' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your name') return async with", "traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @changename.error async def changename_error(self, context, error): traceback.print_exception(type(error),", "`{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare a 2 minute speech to be", "like that post isn\\'t available for this election, ' f'use `{helpers.PREFIX}posts` to see", "for a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>', usage='<POST>') @commands.dm_only() async def", "have stood down from running for {post}') @commands.command(name='changename', help='Change your name as used", "context.send(f'I\\'m afraid voting for {post} has already begun, you cannot stand for this", "for post in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author)", "be given in the election call.\\n' f'If you have any questions please contact", "<POST> <EMAIL>`') return email = input[-1] post = ' '.join(input[:-1]) if not post:", "<reponame>Piturnah/Society-voting-bot<filename>cogs/running.py<gh_stars>0 # A Cog for running, and managing your run, in elections #", "@changename.error async def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) def", "@commands.dm_only() async def changename(self, context, *name): name = ' '.join(name) if not name:", "Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self, context,", "f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like", "# async def dm_error(self, context, error): if isinstance(error, commands.errors.PrivateMessageOnly): await context.send('This command is", "helpers.registered_members: await context.send('It looks like you\\'re not registered yet, you must first register", "down from standing for {post}') await context.send(f'You have stood down from running for", "matching_posts = helpers.match_post(post) if not matching_posts: await context.send('Looks like that post isn\\'t available", "someone else on the committee.\\n' 'If you can\\'t make it to the actual", "message to me.') return True @stand.error async def stand_error(self, context, error): traceback.print_exception(type(error), error,", "must get in touch with the ' 'secretary ASAP to sort out alternative", "for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return matching_posts =", "standing down from, usage: `{helpers.PREFIX}standdown <POST>`') return matching_posts = helpers.match_post(post) if not matching_posts:", "secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n' 'If you can\\'t make", "the actual election call, you must get in touch with the ' 'secretary", "position of {post}. If you no longer wish to stand, you ' f'can", "await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for a post - DM Only.", "must first register using ' f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your", "any questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the", "the name you are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`') return if", "again in a private message to me.') return True @stand.error async def stand_error(self,", "running for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if", "you can\\'t change your name whilst a vote is ongoing, ' 'please wait", "position of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]},", "= input[-1] post = ' '.join(input[:-1]) if not post: await context.send('Must supply the", "helpers.registered_members[author] not in helpers.standing[post]: await context.send('Looks like you weren\\'t standing for this post')", "not in email: await context.send('Must supply the post you are running for and", "context.send(f'Must supply the name you are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`')", "helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}') await context.send(f'You have", "bot now recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name", "this election, ' f'use `{helpers.PREFIX}posts` to see the posts up for election') return", "if author not in helpers.registered_members: await context.send('It looks like you\\'re not registered yet,", "to be given in the election call.\\n' f'If you have any questions please", "you can update your name') return async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m", "{post} has already begun, you cannot stand for this post') return author =", "a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return if '@' not in", "context.send(f'The bot now recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their", "= bot # Commands # @commands.command(name='stand', help=f'Stand for a post - DM Only.", "matching_posts[0] async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid", "recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}')", "name to {name}') # Error handling # async def dm_error(self, context, error): if", "email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email = input[-1] post = '", "prepare a 2 minute speech to be given in the election call.\\n' f'If", "name to be {name}') helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}') # Error", "True @stand.error async def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error)", "election call.\\n' f'If you have any questions please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}),", "to change to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name = name.strip('\\'') author", "already begun, you cannot stand for this post') return author = context.author.id members", "like you\\'re not registered yet, ' f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`') helpers.log(f'{context.author.name}", "in helpers.standing: if author_id in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing()", "helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: await context.send('I\\'m afraid you can\\'t change your name whilst a", "'secretary ASAP to sort out alternative arrangements.') helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}')", "self.dm_error(context, error) @changename.error async def changename_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context,", "f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to prepare a 2 minute speech", "= helpers.match_post(post) if not matching_posts: await context.send('Looks like that post isn\\'t available for", "ongoing, ' 'please wait until the vote has finished') return author_id = helpers.registered_members[author]", "please contact the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n' 'If", "async def stand_error(self, context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error async", "with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for", "post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood down from", "now standing for {post}') helpers.email_secretary(members[helpers.registered_members[author]], post) else: output_str = ('Looks like you\\'re not", "import helpers class Running(commands.Cog): # Initialisation # def __init__(self, bot): self.bot = bot", "of: {post}') else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, '", "in helpers.standing[post]: await context.send('Looks like you weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]),", "else on the committee.\\n' 'If you can\\'t make it to the actual election", "return author = context.author.id members = helpers.get_members() output_str = 'Error' if author in", "used by the bot - DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only()", "{post} because they are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from", "for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]} has stood", "is ongoing, ' 'please wait until the vote has finished') return author_id =", "actual election call, you must get in touch with the ' 'secretary ASAP", "the secretary {helpers.SECRETARY_NAME}' f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\\n' 'If you can\\'t", "else: helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author) output_str = (f'Congratulations {members[helpers.registered_members[author]]}, ' f'you are", "= context.author.id if author not in helpers.registered_members: await context.send('It looks like you\\'re not", "because they are not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running", "in email: await context.send('Must supply the post you are running for and a", "context.author.id if author not in helpers.registered_members: await context.send('It looks like you\\'re not registered", "the vote has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for post", "helpers.log(f'{context.author.name} has failed to stand for {post} because they are not registered') helpers.save_standing()", "their name to {name}') # Error handling # async def dm_error(self, context, error):", "- DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async", "Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>') @commands.dm_only() async def stand(self,", "for and a valid email address, ' f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`') return email =", "= ' '.join(post) if not post: await context.send(f'Must supply the post you are", "if not matching_posts: await context.send('Looks like that post isn\\'t available for this election,", "for this post') return author = context.author.id members = helpers.get_members() output_str = 'Error'", "DM Only. ' f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>') @commands.dm_only() async def changename(self, context, *name):", "matching_posts: await context.send('Looks like that post isn\\'t available for this election, ' f'use", "you must get in touch with the ' 'secretary ASAP to sort out", "await context.send('Looks like you weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True)", "context, error): traceback.print_exception(type(error), error, error.__traceback__) await self.dm_error(context, error) @standdown.error async def standdown_error(self, context,", "in helpers.standing[post]: helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author) helpers.save_names() helpers.save_standing() await context.send(f'The bot now", "helpers.save_standing() await context.send(f'The bot now recognises your name to be {name}') helpers.log(f'{context.author.name}({author_id}) has", "<EMAIL>`') return if '@' not in email: await context.send('Must supply the post you", "helpers.registered_members: if [i for i in helpers.standing[post] if i == helpers.registered_members[author]]: output_str =", "change to, usage: `{helpers.PREFIX}changename <NAME>`') return if name.startswith('\\''): name = name.strip('\\'') author =", "' 'please wait until the vote has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id]", "run, in elections # import traceback from pyrankvote import Candidate from discord.ext import", "wish to stand, you ' f'can send `{helpers.PREFIX}standdown {post}`\\n\\n' 'Now you\\'ll need to", "Cog for running, and managing your run, in elections # import traceback from", "command is DM only, please try again in a private message to me.')", "helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting for {post} has already", "until the vote has finished') return author_id = helpers.registered_members[author] helpers.preferred_names[author_id] = name for", "help='Change your name as used by the bot - DM Only. ' f'Usage:", "change your name whilst a vote is ongoing, ' 'please wait until the", "= name.strip('\\'') author = context.author.id if author not in helpers.registered_members: await context.send('It looks", "= helpers.registered_members[author] helpers.preferred_names[author_id] = name for post in helpers.standing: if author_id in helpers.standing[post]:", "name = name.strip('\\'') author = context.author.id if author not in helpers.registered_members: await context.send('It", "a post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>', usage='<POST> <EMAIL ADDRESS>')", "for i in helpers.standing[post] if i == helpers.registered_members[author]]: output_str = (f'It looks like", "'.join(post) if not post: await context.send(f'Must supply the post you are standing down", "see the posts up for election`') return post = matching_posts[0] author = context.author.id", "' '.join(name) if not name: await context.send(f'Must supply the name you are wanting", "a vote is ongoing, ' 'please wait until the vote has finished') return", "ADDRESS>') @commands.dm_only() async def stand(self, context, *input): if not input: await context.send('Must supply", "commands.errors.PrivateMessageOnly): await context.send('This command is DM only, please try again in a private", "not registered') helpers.save_standing() await context.send(output_str) @commands.command(name='standdown', help=f'Stand down from running for a post", "@commands.command(name='stand', help=f'Stand for a post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>',", "async with helpers.current_live_post_lock.reader_lock: if helpers.current_live_post: if post == helpers.current_live_post[1]: await context.send(f'I\\'m afraid voting", "weren\\'t standing for this post') return helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True) del helpers.standing[post][helpers.registered_members[author]] helpers.save_standing() helpers.log(f'{helpers.registered_members[author]}" ]
[ "Flask,render_template app = Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome():", "hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return render_template(\"welcome.html\") if __name__ == '__main__':", "Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return render_template(\"welcome.html\") if", "app = Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return", "flask import Flask,render_template app = Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\")", "from flask import Flask,render_template app = Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\"", "return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return render_template(\"welcome.html\") if __name__ == '__main__': app.run(host='0.0.0.0',debug=True)", "def hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return render_template(\"welcome.html\") if __name__ ==", "@app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return render_template(\"welcome.html\") if __name__", "= Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\") def welcome(): return render_template(\"welcome.html\")", "import Flask,render_template app = Flask(__name__) @app.route(\"/\") def hello(): return \"Hello World!\" @app.route(\"/welcome\") def" ]
[ "return stack def isCyclicUtil(self, v, visited, recStack): visited[v] = True recStack[v] = True", "pred = 1 if logit > 0 else 0 pos_s1, pos_s2 = pos[0],", "def isCyclicUtil(self, v, visited, recStack): visited[v] = True recStack[v] = True for neighbour", "self.graph = defaultdict(list) self.V = vertices def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self,", "= Graph(nvert) # read pred label for logit, pos in zip(logits, positions): if", "recStack[v] = True for neighbour in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0],", "0 else 0 else: pred = 1 if logit > 0 else 0", "** 0.5)+1 # create graph obj g = Graph(nvert) # read pred label", "g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort() gold_order = list(range(nvert)) return calculate_metrics_list(order,", "The code for this class is based on geeksforgeeks.com \"\"\" def __init__(self, vertices):", "visited, stack) stack.insert(0, v) def topologicalSort(self): visited = [False] * self.V stack =", "stack) stack.insert(0, v) def topologicalSort(self): visited = [False] * self.V stack = []", "v, visited, stack): visited[v] = True for i in self.graph[v]: if not visited[i[0]]:", "obj g = Graph(nvert) # read pred label for logit, pos in zip(logits,", "pred label for logit, pos in zip(logits, positions): if flipped: pred = 1", "stack = [] for i in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack)", "return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False return False def", "create graph obj g = Graph(nvert) # read pred label for logit, pos", "from collections import defaultdict from .metrics import calculate_metrics_list class Graph: \"\"\" The code", "class is based on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph = defaultdict(list) self.V", "# read pred label for logit, pos in zip(logits, positions): if flipped: pred", "= 1 if logit < 0 else 0 else: pred = 1 if", "topologicalSortUtil(self, v, visited, stack): visited[v] = True for i in self.graph[v]: if not", "neighbour in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return True", "from .metrics import calculate_metrics_list class Graph: \"\"\" The code for this class is", "recStack[v] = False return False def isCyclic(self): visited = [False] * self.V recStack", "int((2 * len(logits)) ** 0.5)+1 # create graph obj g = Graph(nvert) #", "for node in range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited, recStack): return True", "pos_s2 = pos[0], pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2) elif pred ==", "self.V = vertices def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack):", "isCyclicUtil(self, v, visited, recStack): visited[v] = True recStack[v] = True for neighbour in", "def topologicalSortUtil(self, v, visited, stack): visited[v] = True for i in self.graph[v]: if", "for i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def", "= [False] * self.V for node in range(self.V): if not visited[node]: if self.isCyclicUtil(node,", "* self.V stack = [] for i in range(self.V): if not visited[i]: self.topologicalSortUtil(i,", "0 pos_s1, pos_s2 = pos[0], pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2) elif", "visited = [False] * self.V stack = [] for i in range(self.V): if", "recStack = [False] * self.V for node in range(self.V): if not visited[node]: if", "= True for i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0,", "logits = n(n-1)/2 nvert = int((2 * len(logits)) ** 0.5)+1 # create graph", "label for logit, pos in zip(logits, positions): if flipped: pred = 1 if", "recStack): visited[v] = True recStack[v] = True for neighbour in self.graph[v]: if not", "True recStack[v] = False return False def isCyclic(self): visited = [False] * self.V", "calculate_metrics_list class Graph: \"\"\" The code for this class is based on geeksforgeeks.com", "visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self): visited = [False] * self.V", "elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False return False def isCyclic(self): visited", "pred == 0: g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic():", "self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self): visited = [False] * self.V stack", "if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self): visited = [False]", "for this class is based on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph =", "v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v] = True for i in", "self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self, v, visited, recStack): visited[v] = True", "if logit > 0 else 0 pos_s1, pos_s2 = pos[0], pos[1] if pred", "convert_to_graph(logits, positions, flipped=False): # get no vertices (len logits = n(n-1)/2 nvert =", "for i in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def", "self.isCyclicUtil( neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] =", "addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v] = True for", "import calculate_metrics_list class Graph: \"\"\" The code for this class is based on", "stack def isCyclicUtil(self, v, visited, recStack): visited[v] = True recStack[v] = True for", "= True for neighbour in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited,", "True for i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v)", "class Graph: \"\"\" The code for this class is based on geeksforgeeks.com \"\"\"", "[False] * self.V for node in range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited,", "visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self, v, visited, recStack): visited[v] =", "self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]:", "self.V for node in range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited, recStack): return", "1 if logit > 0 else 0 pos_s1, pos_s2 = pos[0], pos[1] if", "True for neighbour in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack):", "in range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited, recStack): return True return False", "defaultdict(list) self.V = vertices def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited,", "not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return", "in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self): visited", "== 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort() gold_order = list(range(nvert))", "* self.V recStack = [False] * self.V for node in range(self.V): if not", "pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort() gold_order =", "defaultdict from .metrics import calculate_metrics_list class Graph: \"\"\" The code for this class", "[False] * self.V recStack = [False] * self.V for node in range(self.V): if", "else: pred = 1 if logit > 0 else 0 pos_s1, pos_s2 =", "zip(logits, positions): if flipped: pred = 1 if logit < 0 else 0", "if pred == 0: g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1) while", "True recStack[v] = True for neighbour in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil(", "pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort()", "v) def topologicalSort(self): visited = [False] * self.V stack = [] for i", "visited, recStack): visited[v] = True recStack[v] = True for neighbour in self.graph[v]: if", "collections import defaultdict from .metrics import calculate_metrics_list class Graph: \"\"\" The code for", "< 0 else 0 else: pred = 1 if logit > 0 else", "stack): visited[v] = True for i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited,", "return False def convert_to_graph(logits, positions, flipped=False): # get no vertices (len logits =", "import defaultdict from .metrics import calculate_metrics_list class Graph: \"\"\" The code for this", "self.V stack = [] for i in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited,", "i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self):", "False return False def isCyclic(self): visited = [False] * self.V recStack = [False]", "on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph = defaultdict(list) self.V = vertices def", "def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v] = True", "i in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self,", "topologicalSort(self): visited = [False] * self.V stack = [] for i in range(self.V):", "1 if logit < 0 else 0 else: pred = 1 if logit", "def topologicalSort(self): visited = [False] * self.V stack = [] for i in", "v, visited, recStack): visited[v] = True recStack[v] = True for neighbour in self.graph[v]:", "return False def isCyclic(self): visited = [False] * self.V recStack = [False] *", "visited, stack) return stack def isCyclicUtil(self, v, visited, recStack): visited[v] = True recStack[v]", "visited[node]: if self.isCyclicUtil(node, visited, recStack): return True return False def convert_to_graph(logits, positions, flipped=False):", "= [] for i in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack) return", "True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False return False def isCyclic(self):", "g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order =", "u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v] = True for i", "stack.insert(0, v) def topologicalSort(self): visited = [False] * self.V stack = [] for", "= [False] * self.V recStack = [False] * self.V for node in range(self.V):", "n(n-1)/2 nvert = int((2 * len(logits)) ** 0.5)+1 # create graph obj g", "stack) return stack def isCyclicUtil(self, v, visited, recStack): visited[v] = True recStack[v] =", "visited, recStack): return True return False def convert_to_graph(logits, positions, flipped=False): # get no", "for neighbour in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return", "# create graph obj g = Graph(nvert) # read pred label for logit,", "= pos[0], pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2) elif pred == 1:", "1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort() gold_order = list(range(nvert)) return", "this class is based on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph = defaultdict(list)", "= True recStack[v] = True for neighbour in self.graph[v]: if not visited[neighbour[0]]: if", "recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False return False", "self.graph[v].remove(neighbour) return True recStack[v] = False return False def isCyclic(self): visited = [False]", "__init__(self, vertices): self.graph = defaultdict(list) self.V = vertices def addEdge(self, u, v): self.graph[u].append([v])", "flipped: pred = 1 if logit < 0 else 0 else: pred =", "pos in zip(logits, positions): if flipped: pred = 1 if logit < 0", "\"\"\" The code for this class is based on geeksforgeeks.com \"\"\" def __init__(self,", "if not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self, v, visited, recStack):", "= 1 if logit > 0 else 0 pos_s1, pos_s2 = pos[0], pos[1]", "pos[0], pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2,", "def __init__(self, vertices): self.graph = defaultdict(list) self.V = vertices def addEdge(self, u, v):", "vertices): self.graph = defaultdict(list) self.V = vertices def addEdge(self, u, v): self.graph[u].append([v]) def", "recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False return False def isCyclic(self): visited =", "logit, pos in zip(logits, positions): if flipped: pred = 1 if logit <", "len(logits)) ** 0.5)+1 # create graph obj g = Graph(nvert) # read pred", "def convert_to_graph(logits, positions, flipped=False): # get no vertices (len logits = n(n-1)/2 nvert", "if flipped: pred = 1 if logit < 0 else 0 else: pred", "pred = 1 if logit < 0 else 0 else: pred = 1", "(len logits = n(n-1)/2 nvert = int((2 * len(logits)) ** 0.5)+1 # create", "= int((2 * len(logits)) ** 0.5)+1 # create graph obj g = Graph(nvert)", "== 0: g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic()", "is based on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph = defaultdict(list) self.V =", "0 else 0 pos_s1, pos_s2 = pos[0], pos[1] if pred == 0: g.addEdge(pos_s1,", "= vertices def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v]", "based on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph = defaultdict(list) self.V = vertices", "not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self): visited = [False] *", "isCyclic(self): visited = [False] * self.V recStack = [False] * self.V for node", "not visited[node]: if self.isCyclicUtil(node, visited, recStack): return True return False def convert_to_graph(logits, positions,", "graph obj g = Graph(nvert) # read pred label for logit, pos in", "get no vertices (len logits = n(n-1)/2 nvert = int((2 * len(logits)) **", "positions): if flipped: pred = 1 if logit < 0 else 0 else:", "= [False] * self.V stack = [] for i in range(self.V): if not", "visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False return", "[] for i in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack", "= defaultdict(list) self.V = vertices def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v,", "vertices def addEdge(self, u, v): self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v] =", "else 0 pos_s1, pos_s2 = pos[0], pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2)", "read pred label for logit, pos in zip(logits, positions): if flipped: pred =", "flipped=False): # get no vertices (len logits = n(n-1)/2 nvert = int((2 *", "recStack): return True return False def convert_to_graph(logits, positions, flipped=False): # get no vertices", "= False return False def isCyclic(self): visited = [False] * self.V recStack =", "else 0 else: pred = 1 if logit > 0 else 0 pos_s1,", "= n(n-1)/2 nvert = int((2 * len(logits)) ** 0.5)+1 # create graph obj", "range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self, v, visited,", "in self.graph[v]: if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return True elif", "Graph(nvert) # read pred label for logit, pos in zip(logits, positions): if flipped:", "if self.isCyclicUtil( neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v]", "visited[v] = True for i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack)", "self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0], visited, stack) stack.insert(0, v) def topologicalSort(self): visited =", "not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self, v, visited, recStack): visited[v]", "if not visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour)", "if not visited[node]: if self.isCyclicUtil(node, visited, recStack): return True return False def convert_to_graph(logits,", "elif pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort() gold_order", "[False] * self.V stack = [] for i in range(self.V): if not visited[i]:", "visited[v] = True recStack[v] = True for neighbour in self.graph[v]: if not visited[neighbour[0]]:", "if logit < 0 else 0 else: pred = 1 if logit >", "0 else: pred = 1 if logit > 0 else 0 pos_s1, pos_s2", "self.V recStack = [False] * self.V for node in range(self.V): if not visited[node]:", "False def convert_to_graph(logits, positions, flipped=False): # get no vertices (len logits = n(n-1)/2", "False def isCyclic(self): visited = [False] * self.V recStack = [False] * self.V", "visited[neighbour[0]]: if self.isCyclicUtil( neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True", "g = Graph(nvert) # read pred label for logit, pos in zip(logits, positions):", "* len(logits)) ** 0.5)+1 # create graph obj g = Graph(nvert) # read", "pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1)", "nvert = int((2 * len(logits)) ** 0.5)+1 # create graph obj g =", "in zip(logits, positions): if flipped: pred = 1 if logit < 0 else", "# get no vertices (len logits = n(n-1)/2 nvert = int((2 * len(logits))", "visited = [False] * self.V recStack = [False] * self.V for node in", "logit < 0 else 0 else: pred = 1 if logit > 0", ".metrics import calculate_metrics_list class Graph: \"\"\" The code for this class is based", "code for this class is based on geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph", "neighbour[0], visited, recStack): return True elif recStack[neighbour[0]]: self.graph[v].remove(neighbour) return True recStack[v] = False", "if self.isCyclicUtil(node, visited, recStack): return True return False def convert_to_graph(logits, positions, flipped=False): #", "pos_s1) while g.isCyclic(): g.isCyclic() order = g.topologicalSort() gold_order = list(range(nvert)) return calculate_metrics_list(order, gold_order)", "self.isCyclicUtil(node, visited, recStack): return True return False def convert_to_graph(logits, positions, flipped=False): # get", "return True recStack[v] = False return False def isCyclic(self): visited = [False] *", "def isCyclic(self): visited = [False] * self.V recStack = [False] * self.V for", "Graph: \"\"\" The code for this class is based on geeksforgeeks.com \"\"\" def", "in range(self.V): if not visited[i]: self.topologicalSortUtil(i, visited, stack) return stack def isCyclicUtil(self, v,", "True return False def convert_to_graph(logits, positions, flipped=False): # get no vertices (len logits", "logit > 0 else 0 pos_s1, pos_s2 = pos[0], pos[1] if pred ==", "\"\"\" def __init__(self, vertices): self.graph = defaultdict(list) self.V = vertices def addEdge(self, u,", "range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited, recStack): return True return False def", "pos_s1, pos_s2 = pos[0], pos[1] if pred == 0: g.addEdge(pos_s1, pos_s2) elif pred", "self.graph[u].append([v]) def topologicalSortUtil(self, v, visited, stack): visited[v] = True for i in self.graph[v]:", "* self.V for node in range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited, recStack):", "no vertices (len logits = n(n-1)/2 nvert = int((2 * len(logits)) ** 0.5)+1", "geeksforgeeks.com \"\"\" def __init__(self, vertices): self.graph = defaultdict(list) self.V = vertices def addEdge(self,", "return True return False def convert_to_graph(logits, positions, flipped=False): # get no vertices (len", "node in range(self.V): if not visited[node]: if self.isCyclicUtil(node, visited, recStack): return True return", "visited, stack): visited[v] = True for i in self.graph[v]: if not visited[i[0]]: self.topologicalSortUtil(i[0],", "0: g.addEdge(pos_s1, pos_s2) elif pred == 1: g.addEdge(pos_s2, pos_s1) while g.isCyclic(): g.isCyclic() order", "for logit, pos in zip(logits, positions): if flipped: pred = 1 if logit", "positions, flipped=False): # get no vertices (len logits = n(n-1)/2 nvert = int((2", "0.5)+1 # create graph obj g = Graph(nvert) # read pred label for", "vertices (len logits = n(n-1)/2 nvert = int((2 * len(logits)) ** 0.5)+1 #", "> 0 else 0 pos_s1, pos_s2 = pos[0], pos[1] if pred == 0:" ]
[ "pass def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0,", "pass def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor,", "crop from center # img = TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor,", "import random import numpy as np import torch import torchvision.transforms.functional as TF log", "as np import torch import torchvision.transforms.functional as TF log = logging.getLogger(__name__) # define", "img def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor,", "fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "= [ # auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for", "int(level * 4) if kernel_size % 2 == 0: if random.random() > 0.5:", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor, level: float, fill=None)", "torch.Tensor: fill = img[0, 0, 0].item() if fill is None else fill #", "-> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout,", "torch.Tensor: pass def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def solarize(img:", "def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor, level:", "float, fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "TF.rotate(img, degrees, fill=fill) return img def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "float, fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor, level:", "2 == 0: if random.random() > 0.5: kernel_size = kernel_size + 1 else:", "-> torch.Tensor: pass def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "import torchvision.transforms.functional as TF log = logging.getLogger(__name__) # define augmentation functions def auto_contrast(img:", "> 0.5: kernel_size = kernel_size + 1 else: kernel_size = kernel_size - 1", "auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for t in ALL_TRANSFORMS}", "level: float, fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float, fill=None) ->", "is None else fill # max 30 degrees of rotation degrees = level", "degrees, fill=fill) return img def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item()", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor, level: float, fill=None)", "if random.random() > 0.5: kernel_size = kernel_size + 1 else: kernel_size = kernel_size", "float, fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor, level:", "flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor, level: float,", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float, fill=None)", "-30 img = TF.rotate(img, degrees, fill=fill) return img def shear_x(img: torch.Tensor, level: float,", "1 if kernel_size > 0: img = TF.gaussian_blur(img, kernel_size = kernel_size) return img", "= TF.rotate(img, degrees, fill=fill) return img def rotate_right(img: torch.Tensor, level: float, fill=None) ->", "else fill if level > 0.1: img = TF.autocontrast(img) return img def blur(img:", "0].item() if fill is None else fill # max 30 degrees of rotation", "solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor, level: float,", "define augmentation functions def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill =", "1 else: kernel_size = kernel_size - 1 if kernel_size > 0: img =", "auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y,", "None else fill if level > 0.1: img = TF.autocontrast(img) return img def", "else fill # max 30 degrees of rotation degrees = level * -30", "if kernel_size % 2 == 0: if random.random() > 0.5: kernel_size = kernel_size", "blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for t in ALL_TRANSFORMS} TRANSFORM_NAMES", "kernel_size) return img def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: height, width", "= int(width * level) # crop from center # img = TF.resized_crop(img,[crop_h, crop_w])", "30 img = TF.rotate(img, degrees, fill=fill) return img def rotate_right(img: torch.Tensor, level: float,", "torchvision.transforms.functional as TF log = logging.getLogger(__name__) # define augmentation functions def auto_contrast(img: torch.Tensor,", "torch.Tensor: pass def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def posterize(img:", "0: img = TF.gaussian_blur(img, kernel_size = kernel_size) return img def crop(img: torch.Tensor, level:", "* level) # crop from center # img = TF.resized_crop(img,[crop_h, crop_w]) return img", "int(width * level) # crop from center # img = TF.resized_crop(img,[crop_h, crop_w]) return", "-> torch.Tensor: pass def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "-> torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h = int(height * level) crop_w", "logging.getLogger(__name__) # define augmentation functions def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "max 30 degrees of rotation degrees = level * -30 img = TF.rotate(img,", "of rotation degrees = level * -30 img = TF.rotate(img, degrees, fill=fill) return", "torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if fill", "def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor, level:", "img = TF.autocontrast(img) return img def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "-> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "-> torch.Tensor: fill = img[0, 0, 0].item() if fill is None else fill", "degrees of rotation degrees = level * -30 img = TF.rotate(img, degrees, fill=fill)", "else fill # max 30 degrees of rotation degrees = level * 30", "import torch import torchvision.transforms.functional as TF log = logging.getLogger(__name__) # define augmentation functions", "kernel_size + 1 else: kernel_size = kernel_size - 1 if kernel_size > 0:", "rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y, ] # actual working augmentations.", "actual working augmentations. just add more here! ALL_TRANSFORMS = [ # auto_contrast, blur,", "int(height * level) crop_w = int(width * level) # crop from center #", "[ auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x,", "level * 30 img = TF.rotate(img, degrees, fill=fill) return img def rotate_right(img: torch.Tensor,", "auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if", "fill is None else fill # max 30 degrees of rotation degrees =", "pass def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor,", "> 0: img = TF.gaussian_blur(img, kernel_size = kernel_size) return img def crop(img: torch.Tensor,", "import logging import PIL import random import numpy as np import torch import", "= int(height * level) crop_w = int(width * level) # crop from center", "np import torch import torchvision.transforms.functional as TF log = logging.getLogger(__name__) # define augmentation", "= TF.autocontrast(img) return img def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size", "of rotation degrees = level * 30 img = TF.rotate(img, degrees, fill=fill) return", "fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item()", "torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_updown(img:", "center # img = TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor, level: float,", "degrees of rotation degrees = level * 30 img = TF.rotate(img, degrees, fill=fill)", "torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0,", "0, 0].item() if fill is None else fill # max 30 degrees of", "= img[0, 0, 0].item() if fill is None else fill if level >", "def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor, level:", "crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor, level: float, fill=None)", "-> torch.Tensor: pass def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size = int(level * 4) if kernel_size", "kernel_size = kernel_size + 1 else: kernel_size = kernel_size - 1 if kernel_size", "img = TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor, level: float, fill=None) ->", "level: float, fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor, level: float, fill=None) ->", "img def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor,", "identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor, level: float,", "pass def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor,", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor, level: float, fill=None)", "fill = img[0, 0, 0].item() if fill is None else fill if level", "torch.Tensor: pass def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def smooth(img:", "torch import torchvision.transforms.functional as TF log = logging.getLogger(__name__) # define augmentation functions def", "return img def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0,", "level: float, fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor, level: float, fill=None) ->", "def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item()", "* level) crop_w = int(width * level) # crop from center # img", "fill = img[0, 0, 0].item() if fill is None else fill # max", "level: float, fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float, fill=None) ->", "level: float, fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor, level: float, fill=None) ->", "def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: height, width = img.shape[1], img.shape[2]", "0.1: img = TF.autocontrast(img) return img def blur(img: torch.Tensor, level: float, fill=None) ->", "torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout, equalize,", "pass def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor,", "crop_h = int(height * level) crop_w = int(width * level) # crop from", "def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level:", "torch.Tensor: pass def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_leftright(img:", "return img def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size = int(level", "] # actual working augmentations. just add more here! ALL_TRANSFORMS = [ #", "as TF log = logging.getLogger(__name__) # define augmentation functions def auto_contrast(img: torch.Tensor, level:", "kernel_size - 1 if kernel_size > 0: img = TF.gaussian_blur(img, kernel_size = kernel_size)", "torch.Tensor, level: float, fill=None) -> torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h =", "img = TF.rotate(img, degrees, fill=fill) return img def shear_x(img: torch.Tensor, level: float, fill=None)", "= TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if fill is", "level) # crop from center # img = TF.resized_crop(img,[crop_h, crop_w]) return img def", "logging import PIL import random import numpy as np import torch import torchvision.transforms.functional", "= level * -30 img = TF.rotate(img, degrees, fill=fill) return img def shear_x(img:", "smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor, level: float,", "* -30 img = TF.rotate(img, degrees, fill=fill) return img def shear_x(img: torch.Tensor, level:", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor, level: float, fill=None)", "float, fill=None) -> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur,", "identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y, ] # actual", "float, fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "shear_x, shear_y, smooth, solarize, translate_x, translate_y, ] # actual working augmentations. just add", "import numpy as np import torch import torchvision.transforms.functional as TF log = logging.getLogger(__name__)", "shear_y, smooth, solarize, translate_x, translate_y, ] # actual working augmentations. just add more", "pass def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor,", "level > 0.1: img = TF.autocontrast(img) return img def blur(img: torch.Tensor, level: float,", "translate_x, translate_y, ] # actual working augmentations. just add more here! ALL_TRANSFORMS =", "numpy as np import torch import torchvision.transforms.functional as TF log = logging.getLogger(__name__) #", "def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor, level:", "fill=None) -> torch.Tensor: kernel_size = int(level * 4) if kernel_size % 2 ==", "float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if fill is None", "torch.Tensor: pass def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_y(img):", "rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y, ] # actual working augmentations. just", "TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "# auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for t in", "ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t", "= TF.rotate(img, degrees, fill=fill) return img def shear_x(img: torch.Tensor, level: float, fill=None) ->", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor, level: float, fill=None)", "img = TF.rotate(img, degrees, fill=fill) return img def rotate_right(img: torch.Tensor, level: float, fill=None)", "crop_w = int(width * level) # crop from center # img = TF.resized_crop(img,[crop_h,", "return img def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def shear_y(img:", "posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float,", "fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if fill is None else", "TF log = logging.getLogger(__name__) # define augmentation functions def auto_contrast(img: torch.Tensor, level: float,", "random import numpy as np import torch import torchvision.transforms.functional as TF log =", "smooth, solarize, translate_x, translate_y, ] # actual working augmentations. just add more here!", "PIL import random import numpy as np import torch import torchvision.transforms.functional as TF", "if kernel_size > 0: img = TF.gaussian_blur(img, kernel_size = kernel_size) return img def", "img = TF.gaussian_blur(img, kernel_size = kernel_size) return img def crop(img: torch.Tensor, level: float,", "blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size = int(level * 4) if", "shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor, level: float,", "level: float, fill=None) -> torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h = int(height", "img[0, 0, 0].item() if fill is None else fill if level > 0.1:", "torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def identity(img:", "# crop from center # img = TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img:", "from center # img = TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor, level:", "0, 0].item() if fill is None else fill if level > 0.1: img", "kernel_size > 0: img = TF.gaussian_blur(img, kernel_size = kernel_size) return img def crop(img:", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor, level: float, fill=None)", "fill=None) -> torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h = int(height * level)", "torch.Tensor: fill = img[0, 0, 0].item() if fill is None else fill if", "= img.shape[1], img.shape[2] crop_h = int(height * level) crop_w = int(width * level)", "= [ auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right,", "solarize, translate_x, translate_y, ] # actual working augmentations. just add more here! ALL_TRANSFORMS", "cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x,", "add more here! ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM", "-> torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill =", "translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown, identity,", "level: float, fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor, level: float, fill=None) ->", "pass def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_y(img): pass", "float, fill=None) -> torch.Tensor: kernel_size = int(level * 4) if kernel_size % 2", "= img[0, 0, 0].item() if fill is None else fill # max 30", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float, fill=None)", "shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor, level: float,", "if fill is None else fill if level > 0.1: img = TF.autocontrast(img)", "= kernel_size + 1 else: kernel_size = kernel_size - 1 if kernel_size >", "rotation degrees = level * -30 img = TF.rotate(img, degrees, fill=fill) return img", "# max 30 degrees of rotation degrees = level * -30 img =", "img.shape[2] crop_h = int(height * level) crop_w = int(width * level) # crop", "level * -30 img = TF.rotate(img, degrees, fill=fill) return img def shear_x(img: torch.Tensor,", "== 0: if random.random() > 0.5: kernel_size = kernel_size + 1 else: kernel_size", "> 0.1: img = TF.autocontrast(img) return img def blur(img: torch.Tensor, level: float, fill=None)", "kernel_size = int(level * 4) if kernel_size % 2 == 0: if random.random()", "* 30 img = TF.rotate(img, degrees, fill=fill) return img def rotate_right(img: torch.Tensor, level:", "rotation degrees = level * 30 img = TF.rotate(img, degrees, fill=fill) return img", "rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for t in ALL_TRANSFORMS} TRANSFORM_NAMES =", "level: float, fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float, fill=None) ->", "if fill is None else fill # max 30 degrees of rotation degrees", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float, fill=None)", "level) crop_w = int(width * level) # crop from center # img =", "+ 1 else: kernel_size = kernel_size - 1 if kernel_size > 0: img", "float, fill=None) -> torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h = int(height *", "0].item() if fill is None else fill if level > 0.1: img =", "max 30 degrees of rotation degrees = level * 30 img = TF.rotate(img,", "fill # max 30 degrees of rotation degrees = level * 30 img", "0: if random.random() > 0.5: kernel_size = kernel_size + 1 else: kernel_size =", "pass ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize,", "fill=None) -> torch.Tensor: pass def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "img.shape[1], img.shape[2] crop_h = int(height * level) crop_w = int(width * level) #", "-> torch.Tensor: pass def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "fill=None) -> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur, crop,", "rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for t in ALL_TRANSFORMS} TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()", "fill=fill) return img def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill =", "<reponame>akaver/pbt-demo-mnist import logging import PIL import random import numpy as np import torch", "flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y, ]", "fill=fill) return img def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "float, fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "0.5: kernel_size = kernel_size + 1 else: kernel_size = kernel_size - 1 if", "level: float, fill=None) -> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast,", "random.random() > 0.5: kernel_size = kernel_size + 1 else: kernel_size = kernel_size -", "fill if level > 0.1: img = TF.autocontrast(img) return img def blur(img: torch.Tensor,", "rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if", "more here! ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM =", "float, fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "degrees, fill=fill) return img def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill", "pass def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor,", "return img def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: height, width =", "width = img.shape[1], img.shape[2] crop_h = int(height * level) crop_w = int(width *", "fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "level: float, fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor, level: float, fill=None) ->", "fill is None else fill if level > 0.1: img = TF.autocontrast(img) return", "= logging.getLogger(__name__) # define augmentation functions def auto_contrast(img: torch.Tensor, level: float, fill=None) ->", "translate_y, ] # actual working augmentations. just add more here! ALL_TRANSFORMS = [", "kernel_size = kernel_size - 1 if kernel_size > 0: img = TF.gaussian_blur(img, kernel_size", "img def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0,", "height, width = img.shape[1], img.shape[2] crop_h = int(height * level) crop_w = int(width", "-> torch.Tensor: pass def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "float, fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "kernel_size % 2 == 0: if random.random() > 0.5: kernel_size = kernel_size +", "# actual working augmentations. just add more here! ALL_TRANSFORMS = [ # auto_contrast,", "def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor, level:", "% 2 == 0: if random.random() > 0.5: kernel_size = kernel_size + 1", "flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y, ] #", "import PIL import random import numpy as np import torch import torchvision.transforms.functional as", "torch.Tensor: height, width = img.shape[1], img.shape[2] crop_h = int(height * level) crop_w =", "4) if kernel_size % 2 == 0: if random.random() > 0.5: kernel_size =", "working augmentations. just add more here! ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left,", "rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0, 0].item() if", "ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left,", "fill=None) -> torch.Tensor: pass def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "img def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size = int(level *", "blur, crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth,", "posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y, ] # actual working", "fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "float, fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "float, fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "TF.rotate(img, degrees, fill=fill) return img def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "return img def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def equalize(img:", "-> torch.Tensor: pass def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "= int(level * 4) if kernel_size % 2 == 0: if random.random() >", "pass def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor,", "= kernel_size) return img def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: height,", "level: float, fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor, level: float, fill=None) ->", "level: float, fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor, level: float, fill=None) ->", "augmentation functions def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0,", "crop_w]) return img def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "log = logging.getLogger(__name__) # define augmentation functions def auto_contrast(img: torch.Tensor, level: float, fill=None)", "kernel_size = kernel_size) return img def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor, level:", "is None else fill if level > 0.1: img = TF.autocontrast(img) return img", "else: kernel_size = kernel_size - 1 if kernel_size > 0: img = TF.gaussian_blur(img,", "fill=None) -> torch.Tensor: pass def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill", "TF.gaussian_blur(img, kernel_size = kernel_size) return img def crop(img: torch.Tensor, level: float, fill=None) ->", "torch.Tensor: kernel_size = int(level * 4) if kernel_size % 2 == 0: if", "None else fill # max 30 degrees of rotation degrees = level *", "if level > 0.1: img = TF.autocontrast(img) return img def blur(img: torch.Tensor, level:", "# max 30 degrees of rotation degrees = level * 30 img =", "fill # max 30 degrees of rotation degrees = level * -30 img", "[ # auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__: t for t", "degrees = level * 30 img = TF.rotate(img, degrees, fill=fill) return img def", "* 4) if kernel_size % 2 == 0: if random.random() > 0.5: kernel_size", "- 1 if kernel_size > 0: img = TF.gaussian_blur(img, kernel_size = kernel_size) return", "torch.Tensor: pass def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def rotate_left(img:", "img[0, 0, 0].item() if fill is None else fill # max 30 degrees", "-> torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS", "30 degrees of rotation degrees = level * 30 img = TF.rotate(img, degrees,", "fill=None) -> torch.Tensor: pass def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize, translate_x, translate_y,", "= TF.gaussian_blur(img, kernel_size = kernel_size) return img def crop(img: torch.Tensor, level: float, fill=None)", "level: float, fill=None) -> torch.Tensor: kernel_size = int(level * 4) if kernel_size %", "crop, cutout, equalize, flip_leftright, flip_updown, identity, posterize, rotate_left, rotate_right, shear_x, shear_y, smooth, solarize,", "torch.Tensor: pass def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_x(img:", "fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass", "just add more here! ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left, rotate_right, ]", "img def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: height, width = img.shape[1],", "here! ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left, rotate_right, ] NAME_TO_TRANSFORM = {t.__name__:", "30 degrees of rotation degrees = level * -30 img = TF.rotate(img, degrees,", "def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def posterize(img: torch.Tensor, level:", "augmentations. just add more here! ALL_TRANSFORMS = [ # auto_contrast, blur, rotate_left, rotate_right,", "pass def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def solarize(img: torch.Tensor,", "pass def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout, equalize, flip_leftright,", "-> torch.Tensor: kernel_size = int(level * 4) if kernel_size % 2 == 0:", "degrees = level * -30 img = TF.rotate(img, degrees, fill=fill) return img def", "translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS =", "cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def equalize(img: torch.Tensor, level: float,", "def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size = int(level * 4)", "# img = TF.resized_crop(img,[crop_h, crop_w]) return img def cutout(img: torch.Tensor, level: float, fill=None)", "-> torch.Tensor: pass def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def", "functions def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill = img[0, 0,", "def translate_y(img): pass ALL_TRANSFORMS = [ auto_contrast, blur, crop, cutout, equalize, flip_leftright, flip_updown,", "torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def translate_y(img): pass ALL_TRANSFORMS = [", "= level * 30 img = TF.rotate(img, degrees, fill=fill) return img def rotate_right(img:", "equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_leftright(img: torch.Tensor, level: float,", "TF.autocontrast(img) return img def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: kernel_size =", "float, fill=None) -> torch.Tensor: pass def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:", "# define augmentation functions def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: fill", "= kernel_size - 1 if kernel_size > 0: img = TF.gaussian_blur(img, kernel_size =", "flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor: pass def flip_updown(img: torch.Tensor, level: float," ]
[ "sqlalchemy import create_engine import pg_copy if __name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table", "import pg_copy if __name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table' objs", "create_engine import pg_copy if __name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table'", "from sqlalchemy import create_engine import pg_copy if __name__ == \"__main__\": engine = create_engine(sys.argv[1])", "engine = create_engine(sys.argv[1]) target_table = 'example_table' objs = [ { 'id': i, 'description':", "'description': f'record description {i}' } for i in range(100_000) ] pg_copy.insert_with_copy(engine, objs, target_table)", "objs = [ { 'id': i, 'description': f'record description {i}' } for i", "= 'example_table' objs = [ { 'id': i, 'description': f'record description {i}' }", "[ { 'id': i, 'description': f'record description {i}' } for i in range(100_000)", "if __name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table' objs = [", "i, 'description': f'record description {i}' } for i in range(100_000) ] pg_copy.insert_with_copy(engine, objs,", "__name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table' objs = [ {", "target_table = 'example_table' objs = [ { 'id': i, 'description': f'record description {i}'", "pg_copy if __name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table' objs =", "sys from sqlalchemy import create_engine import pg_copy if __name__ == \"__main__\": engine =", "import create_engine import pg_copy if __name__ == \"__main__\": engine = create_engine(sys.argv[1]) target_table =", "create_engine(sys.argv[1]) target_table = 'example_table' objs = [ { 'id': i, 'description': f'record description", "= create_engine(sys.argv[1]) target_table = 'example_table' objs = [ { 'id': i, 'description': f'record", "import sys from sqlalchemy import create_engine import pg_copy if __name__ == \"__main__\": engine", "= [ { 'id': i, 'description': f'record description {i}' } for i in", "'example_table' objs = [ { 'id': i, 'description': f'record description {i}' } for", "'id': i, 'description': f'record description {i}' } for i in range(100_000) ] pg_copy.insert_with_copy(engine,", "{ 'id': i, 'description': f'record description {i}' } for i in range(100_000) ]", "\"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table' objs = [ { 'id': i,", "== \"__main__\": engine = create_engine(sys.argv[1]) target_table = 'example_table' objs = [ { 'id':" ]
[ "i in range(remaining): # if s[i] == 'a': # total += 1 #", "# def repeatedString(s, n): # dictionary = {} # length = n //", "i in s: # if i == 'a': # if 'a' not in", "length total = int(dictionary['a'] * length) if remaining > 0: for i in", "0 # for i in s: # if i == 'a': # if", "== 'a': # if 'a' not in dictionary: # dictionary['a'] = 1 #", "+= 1 # remaining = n - len(s) * length # total =", "remaining = n - len(s) * length total = int(dictionary['a'] * length) if", "for i in s: if i == 'a': dictionary['a'] += 1 remaining =", "> 0: for i in range(remaining): if s[i] == 'a': total += 1", "# dictionary['a'] = 1 # else: # dictionary['a'] += 1 # remaining =", "* length # total = int(dictionary['a'] * length) # if remaining > 0:", "# dictionary['a'] += 1 # remaining = n - len(s) * length #", "in s: return 0 for i in s: if i == 'a': dictionary['a']", "return total # def repeatedString(s, n): # dictionary = {} # length =", "s: if i == 'a': dictionary['a'] += 1 remaining = n - len(s)", "'a': total += 1 return total # def repeatedString(s, n): # dictionary =", "remaining > 0: for i in range(remaining): if s[i] == 'a': total +=", "in range(remaining): if s[i] == 'a': total += 1 return total # def", "dictionary = {} # length = n // len(s) # if 'a' not", "# remaining = n - len(s) * length # total = int(dictionary['a'] *", "int(dictionary['a'] * length) if remaining > 0: for i in range(remaining): if s[i]", "# if 'a' not in s: # return 0 # for i in", "== 'a': total += 1 return total # def repeatedString(s, n): # dictionary", "= n - len(s) * length # total = int(dictionary['a'] * length) #", "range(remaining): # if s[i] == 'a': # total += 1 # return total", "dictionary['a'] += 1 # remaining = n - len(s) * length # total", "dictionary: # dictionary['a'] = 1 # else: # dictionary['a'] += 1 # remaining", "= n - len(s) * length total = int(dictionary['a'] * length) if remaining", "'a': # if 'a' not in dictionary: # dictionary['a'] = 1 # else:", "= {} # length = n // len(s) # if 'a' not in", "n): dictionary = {'a': 0} length = n // len(s) if 'a' not", "s: # return 0 # for i in s: # if i ==", "# if i == 'a': # if 'a' not in dictionary: # dictionary['a']", "total # def repeatedString(s, n): # dictionary = {} # length = n", "# else: # dictionary['a'] += 1 # remaining = n - len(s) *", "* length total = int(dictionary['a'] * length) if remaining > 0: for i", "1 remaining = n - len(s) * length total = int(dictionary['a'] * length)", "for i in range(remaining): if s[i] == 'a': total += 1 return total", "for i in s: # if i == 'a': # if 'a' not", "return 0 for i in s: if i == 'a': dictionary['a'] += 1", "length = n // len(s) if 'a' not in s: return 0 for", "else: # dictionary['a'] += 1 # remaining = n - len(s) * length", "repeatedString(s, n): dictionary = {'a': 0} length = n // len(s) if 'a'", "// len(s) # if 'a' not in s: # return 0 # for", "'a' not in s: return 0 for i in s: if i ==", "# return 0 # for i in s: # if i == 'a':", "dictionary['a'] = 1 # else: # dictionary['a'] += 1 # remaining = n", "= n // len(s) if 'a' not in s: return 0 for i", "length = n // len(s) # if 'a' not in s: # return", "'a' not in dictionary: # dictionary['a'] = 1 # else: # dictionary['a'] +=", "if remaining > 0: for i in range(remaining): if s[i] == 'a': total", "i == 'a': dictionary['a'] += 1 remaining = n - len(s) * length", "if s[i] == 'a': total += 1 return total # def repeatedString(s, n):", "if i == 'a': # if 'a' not in dictionary: # dictionary['a'] =", "{} # length = n // len(s) # if 'a' not in s:", "remaining = n - len(s) * length # total = int(dictionary['a'] * length)", "if remaining > 0: # for i in range(remaining): # if s[i] ==", "not in dictionary: # dictionary['a'] = 1 # else: # dictionary['a'] += 1", "for i in range(remaining): # if s[i] == 'a': # total += 1", "* length) # if remaining > 0: # for i in range(remaining): #", "i in s: if i == 'a': dictionary['a'] += 1 remaining = n", "0} length = n // len(s) if 'a' not in s: return 0", "dictionary['a'] += 1 remaining = n - len(s) * length total = int(dictionary['a']", "def repeatedString(s, n): # dictionary = {} # length = n // len(s)", "# for i in s: # if i == 'a': # if 'a'", "n): # dictionary = {} # length = n // len(s) # if", "// len(s) if 'a' not in s: return 0 for i in s:", "in range(remaining): # if s[i] == 'a': # total += 1 # return", "i in range(remaining): if s[i] == 'a': total += 1 return total #", "= int(dictionary['a'] * length) # if remaining > 0: # for i in", "1 # else: # dictionary['a'] += 1 # remaining = n - len(s)", "n // len(s) # if 'a' not in s: # return 0 #", "0: # for i in range(remaining): # if s[i] == 'a': # total", "s: # if i == 'a': # if 'a' not in dictionary: #", "in dictionary: # dictionary['a'] = 1 # else: # dictionary['a'] += 1 #", "= int(dictionary['a'] * length) if remaining > 0: for i in range(remaining): if", "in s: # if i == 'a': # if 'a' not in dictionary:", "= 1 # else: # dictionary['a'] += 1 # remaining = n -", "<reponame>Zealll/HackerRank def repeatedString(s, n): dictionary = {'a': 0} length = n // len(s)", "== 'a': dictionary['a'] += 1 remaining = n - len(s) * length total", "- len(s) * length total = int(dictionary['a'] * length) if remaining > 0:", "length # total = int(dictionary['a'] * length) # if remaining > 0: #", "s[i] == 'a': total += 1 return total # def repeatedString(s, n): #", "if 'a' not in s: return 0 for i in s: if i", "'a' not in s: # return 0 # for i in s: #", "if i == 'a': dictionary['a'] += 1 remaining = n - len(s) *", "not in s: # return 0 # for i in s: # if", "length) # if remaining > 0: # for i in range(remaining): # if", "- len(s) * length # total = int(dictionary['a'] * length) # if remaining", "len(s) * length # total = int(dictionary['a'] * length) # if remaining >", "if 'a' not in s: # return 0 # for i in s:", "* length) if remaining > 0: for i in range(remaining): if s[i] ==", "len(s) # if 'a' not in s: # return 0 # for i", "n // len(s) if 'a' not in s: return 0 for i in", "if 'a' not in dictionary: # dictionary['a'] = 1 # else: # dictionary['a']", "n - len(s) * length total = int(dictionary['a'] * length) if remaining >", "> 0: # for i in range(remaining): # if s[i] == 'a': #", "def repeatedString(s, n): dictionary = {'a': 0} length = n // len(s) if", "# dictionary = {} # length = n // len(s) # if 'a'", "range(remaining): if s[i] == 'a': total += 1 return total # def repeatedString(s,", "total += 1 return total # def repeatedString(s, n): # dictionary = {}", "not in s: return 0 for i in s: if i == 'a':", "int(dictionary['a'] * length) # if remaining > 0: # for i in range(remaining):", "= {'a': 0} length = n // len(s) if 'a' not in s:", "# if 'a' not in dictionary: # dictionary['a'] = 1 # else: #", "+= 1 remaining = n - len(s) * length total = int(dictionary['a'] *", "return 0 # for i in s: # if i == 'a': #", "# for i in range(remaining): # if s[i] == 'a': # total +=", "length) if remaining > 0: for i in range(remaining): if s[i] == 'a':", "+= 1 return total # def repeatedString(s, n): # dictionary = {} #", "total = int(dictionary['a'] * length) if remaining > 0: for i in range(remaining):", "len(s) * length total = int(dictionary['a'] * length) if remaining > 0: for", "in s: # return 0 # for i in s: # if i", "1 return total # def repeatedString(s, n): # dictionary = {} # length", "n - len(s) * length # total = int(dictionary['a'] * length) # if", "len(s) if 'a' not in s: return 0 for i in s: if", "= n // len(s) # if 'a' not in s: # return 0", "{'a': 0} length = n // len(s) if 'a' not in s: return", "0: for i in range(remaining): if s[i] == 'a': total += 1 return", "# if remaining > 0: # for i in range(remaining): # if s[i]", "total = int(dictionary['a'] * length) # if remaining > 0: # for i", "in s: if i == 'a': dictionary['a'] += 1 remaining = n -", "# total = int(dictionary['a'] * length) # if remaining > 0: # for", "'a': dictionary['a'] += 1 remaining = n - len(s) * length total =", "0 for i in s: if i == 'a': dictionary['a'] += 1 remaining", "i == 'a': # if 'a' not in dictionary: # dictionary['a'] = 1", "# length = n // len(s) # if 'a' not in s: #", "dictionary = {'a': 0} length = n // len(s) if 'a' not in", "repeatedString(s, n): # dictionary = {} # length = n // len(s) #", "remaining > 0: # for i in range(remaining): # if s[i] == 'a':", "s: return 0 for i in s: if i == 'a': dictionary['a'] +=", "1 # remaining = n - len(s) * length # total = int(dictionary['a']" ]
[ "\"Directory {} does not exist.\".format(directory)) # The directory does not exist; nothing to", "= iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError", "and not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory", "iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return", "if not os.path.isdir(directory): return # Based on if we are removing contents or", "# Based on if we are removing contents or not, use shutil or", "FileNotFoundError( \"Directory {} does not exist.\".format(directory)) # The directory does not exist; nothing", "on if we are removing contents or not, use shutil or os. if", "# Copyright 2021 Fabrica Software, LLC import os import shutil import iograft import", "class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents", "must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents)", "not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory {}", "= IOError raise FileNotFoundError( \"Directory {} does not exist.\".format(directory)) # The directory does", "if we are removing contents or not, use shutil or os. if remove_contents:", "def Create(): return RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents =", "iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def", "must_exist and not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError = IOError raise FileNotFoundError(", "def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create():", "not exist.\".format(directory)) # The directory does not exist; nothing to do. if not", "GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create(): return", "the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False)", "shutil or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def LoadPlugin(plugin): node = RemoveDirectory.GetDefinition()", "os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def LoadPlugin(plugin): node = RemoveDirectory.GetDefinition() plugin.RegisterNode(node, RemoveDirectory.Create)", "= iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist)", "= iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create(): return RemoveDirectory() def", "nothing to do. if not os.path.isdir(directory): return # Based on if we are", "or not, use shutil or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def LoadPlugin(plugin):", "import shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory.", "contents or not, use shutil or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def", "= iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node", "iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist and", "use shutil or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def LoadPlugin(plugin): node =", "are removing contents or not, use shutil or os. if remove_contents: shutil.rmtree(directory) else:", "iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node =", "node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create(): return RemoveDirectory() def Process(self, data): directory", "import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\" directory", "raise FileNotFoundError( \"Directory {} does not exist.\".format(directory)) # The directory does not exist;", "remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls):", "except NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory {} does not exist.\".format(directory)) #", "node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create(): return RemoveDirectory() def Process(self, data):", "os.path.isdir(directory): return # Based on if we are removing contents or not, use", "# The directory does not exist; nothing to do. if not os.path.isdir(directory): return", "iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\")", "\"\"\" Remove the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\",", "IOError raise FileNotFoundError( \"Directory {} does not exist.\".format(directory)) # The directory does not", "RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist", "iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\" directory =", "Fabrica Software, LLC import os import shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node):", "Based on if we are removing contents or not, use shutil or os.", "def Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist =", "removing contents or not, use shutil or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory)", "remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory):", "does not exist; nothing to do. if not os.path.isdir(directory): return # Based on", "Copyright 2021 Fabrica Software, LLC import os import shutil import iograft import iobasictypes", "\"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\",", "Software, LLC import os import shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\"", "= iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist", "The directory does not exist; nothing to do. if not os.path.isdir(directory): return #", "node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create(): return RemoveDirectory()", "do. if not os.path.isdir(directory): return # Based on if we are removing contents", "LLC import os import shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove", "return RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data)", "iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory): try: FileNotFoundError", "not, use shutil or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def LoadPlugin(plugin): node", "@classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def", "RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents =", "directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist =", "must_exist = iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory): try: FileNotFoundError except NameError:", "exist.\".format(directory)) # The directory does not exist; nothing to do. if not os.path.isdir(directory):", "given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist", "2021 Fabrica Software, LLC import os import shutil import iograft import iobasictypes class", "directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(),", "import os import shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the", "iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String())", "= iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory): try:", "import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\",", "Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist,", "NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory {} does not exist.\".format(directory)) # The", "exist; nothing to do. if not os.path.isdir(directory): return # Based on if we", "if must_exist and not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError = IOError raise", "not exist; nothing to do. if not os.path.isdir(directory): return # Based on if", "data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory): try: FileNotFoundError except", "shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given directory. \"\"\"", "os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory {} does", "not os.path.isdir(directory): return # Based on if we are removing contents or not,", "return node @staticmethod def Create(): return RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory,", "to do. if not os.path.isdir(directory): return # Based on if we are removing", "we are removing contents or not, use shutil or os. if remove_contents: shutil.rmtree(directory)", "data): directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data)", "try: FileNotFoundError except NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory {} does not", "node.AddInput(cls.must_exist) return node @staticmethod def Create(): return RemoveDirectory() def Process(self, data): directory =", "@staticmethod def Create(): return RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents", "directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if", "{} does not exist.\".format(directory)) # The directory does not exist; nothing to do.", "or os. if remove_contents: shutil.rmtree(directory) else: os.rmdir(directory) def LoadPlugin(plugin): node = RemoveDirectory.GetDefinition() plugin.RegisterNode(node,", "data) remove_contents = iograft.GetInput(self.remove_contents, data) must_exist = iograft.GetInput(self.must_exist, data) if must_exist and not", "= iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False)", "iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node", "<filename>nodes/remove_directory.py # Copyright 2021 Fabrica Software, LLC import os import shutil import iograft", "Create(): return RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory, data) remove_contents = iograft.GetInput(self.remove_contents,", "os import shutil import iograft import iobasictypes class RemoveDirectory(iograft.Node): \"\"\" Remove the given", "does not exist.\".format(directory)) # The directory does not exist; nothing to do. if", "data) if must_exist and not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError = IOError", "iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod def Create(): return RemoveDirectory() def Process(self,", "FileNotFoundError = IOError raise FileNotFoundError( \"Directory {} does not exist.\".format(directory)) # The directory", "directory does not exist; nothing to do. if not os.path.isdir(directory): return # Based", "Remove the given directory. \"\"\" directory = iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(),", "node @staticmethod def Create(): return RemoveDirectory() def Process(self, data): directory = iograft.GetInput(self.directory, data)", "FileNotFoundError except NameError: FileNotFoundError = IOError raise FileNotFoundError( \"Directory {} does not exist.\".format(directory))", "iograft.InputDefinition(\"directory\", iobasictypes.String()) remove_contents = iograft.InputDefinition(\"remove_contents\", iobasictypes.Bool(), default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod", "iograft.GetInput(self.must_exist, data) if must_exist and not os.path.isdir(directory): try: FileNotFoundError except NameError: FileNotFoundError =", "default_value=False) must_exist = iograft.InputDefinition(\"must_exist\", iobasictypes.Bool(), default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory)", "default_value=False) @classmethod def GetDefinition(cls): node = iograft.NodeDefinition(\"remove_directory\") node.AddInput(cls.directory) node.AddInput(cls.remove_contents) node.AddInput(cls.must_exist) return node @staticmethod", "return # Based on if we are removing contents or not, use shutil" ]
[ "which returns a successful result on a 202. Parse the common CLB errors,", "settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB errors. \"\"\" expected =", "\" \"configured on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422)", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent assert_parses_common_clb_errors( self, svc_intent, get_clb_node_feed(\"12\", \"13\"), \"12\")", "stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is down\", 'code': 500}), 500), stub_pure_response( json.dumps({", "feed part of the result \"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\"))", "code, 'details': ''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg,", "= get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)),", "returns the feed part of the result \"\"\" from otter.cloud_client.clb import cf self.patch(cf,", "which returns a successful result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\",", "'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed`", "removed)) def test_get_clbs(self): \"\"\"Returns all the load balancer details from the LBs endpoint.\"\"\"", "the nodes are invalid, the request is retried without the offending nodes. \"\"\"", "202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234',", "status of 'BROKEN' \" \"and is considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({", "six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the load balancer details from the", ".../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET',", "on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent:", "functions, such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is my LB ID\"\"\"", "etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff,", "CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_DELETE' and is \" \"considered", "and is \" \"considered immutable.\", 422, CLBDeletedError), (\"The load balancer is deleted and", "[ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\"", "Assert that the effect produced performs the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`,", "self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self):", "self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for a LB.\"\"\"", "all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a", "\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self):", "common CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent,", "= [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req),", "no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))])", "log_intent, service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import", "service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT,", "'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher =", "] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed", "def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is bubbled up as an APIError.\"\"\"", "eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent,", "id #1234 not found for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg,", "eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes must not", "a request for modifying a node on a load balancer with the default", "{'nodes': 'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _:", "seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self):", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq =", "for a node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str,", "CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids", "\"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings} seq =", "otter.cloud_client.clb\"\"\" import json from effect import sync_perform from effect.testing import ( EQFDispatcher, const,", "cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase):", "the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body", "is deleted and considered immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError),", "\"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected", "# all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)):", "with CLB servicetype and atom URL and returns the feed part of the", "\"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" }", "Produce a request for modifying a node on a load balancer, which returns", "considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is", "'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is", "81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id),", "from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq =", "failure msg = (\"Duplicate nodes detected. One or more nodes already \" \"configured", "and considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers", "ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [ (expected.intent, lambda i:", "self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the", "successful result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected =", "= add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) #", "i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes`", "req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'}", "that can't be parsed are bubbled up as an APIError.\"\"\" error_bodies = [", "cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps", "intent, eff, lb_id): \"\"\" Assert that the effect produced performs the common CLB", "APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ]", "\"\"\" Produce a request for adding nodes to a load balancer, which returns", "test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is bubbled up as an APIError.\"\"\" eff", "request is retried without the offending nodes. \"\"\" node_ids = map(str, range(1, 5))", "successful result on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`.", "(expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\"", "\"configured on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher", "NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType", "import SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError,", "self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected.", "common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id),", "\"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id,", "self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the load balancer", "stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a request for adding nodes to a", "perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it being", "'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load balancer is deleted and considered", "] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB errors and raises", "CLB errors about it being in a deleted state, pending update, etc. are", "422, CLBDeletedError), (\"The load balancer is deleted and considered immutable.\", 422, CLBDeletedError), (\"Load", "update, etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent,", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests", "def test_add_clb_nodes(self): \"\"\" Produce a request for adding nodes to a load balancer,", "(self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that", "perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps =", "\"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\"", "parsed are bubbled up as an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}},", "from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError,", "result \"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\")", "def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB errors. \"\"\" expected = service_request(", "\"random non-json\" ] for body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq", "an over limit message\", 'code': 413}), 413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random", "eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the load", "test_get_clbs(self): \"\"\"Returns all the load balancer details from the LBs endpoint.\"\"\" expected =", "immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_UPDATE' and is", "'message': (\"Load Balancer '{0}' has a status of 'BROKEN' \" \"and is considered", "all the nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "\"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype", "ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for", "400s that can't be parsed are bubbled up as an APIError.\"\"\" error_bodies =", "(\"Load Balancer '{0}' has a status of 'BROKEN' \" \"and is considered immutable.\"),", "EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a", "endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers':", "413) ] for resp in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff)", "all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\"", "eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id),", "nodes=(1, 2)): \"\"\" :return: Expected effect for a node removal request. \"\"\" return", "with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def", "node_ids) response = stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3 are not a", ":class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected ``ServiceRequest`` intent", "which returns a successful result on 202. Parse the common CLB errors, and", "as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg =", "self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed are", "EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id)))", "NoSuchCLBNode failure msg = \"Node with id #1234 not found for loadbalancer #{0}\".format(", "returns all the nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request(", "service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes", "method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions, such as :obj:`change_clb_node`.", "class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with", "\"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"})))", "and returns setting inside {\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor')", "[ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When", "= [ (\"Load Balancer '{0}' has a status of 'BUILD' and is \"", "of 'PENDING_UPDATE' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has", "from otter.cloud_client import service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError,", "get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB errors. \"\"\" expected", "is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of", "the common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\":", "Retry is different because it's produced by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\":", "422, CLBNotActiveError), (\"The loadbalancer is marked as deleted.\", 410, CLBDeletedError), ] for msg,", "] for msg, code, err in json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response(", "202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None)", "\"\"\"Random 400s that can't be parsed are bubbled up as an APIError.\"\"\" error_bodies", "limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))),", "service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import APIError", "seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _:", "{'messages': [ 'Node ids 1,3 are not a part of your loadbalancer']}}, 400)", "not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the load balancer details from the LBs", "calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...} \"\"\" expected = service_request(", "\"\"\" Produce a request for modifying a node on a load balancer, which", "service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns", "stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {}))", "a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\",", "= remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result =", "from otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import (", "in a deleted state, pending update, etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id,", "{ \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }),", "get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from", "None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with", "\"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB errors and", "load balancers is down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this is not", "\"considered immutable.\", 422, CLBDeletedError), (\"The load balancer is deleted and considered immutable.\", 422,", "#1234 not found for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code':", "is sent, and the Effect returns None if 202 is returned. \"\"\" eff", "is deleted and considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud", "errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id)", "assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect for", "a successful result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected", "] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB", "err in json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code': code,", "\"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def", "perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\"", "= [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req),", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\": 10,", "\"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST',", "413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose error message\", 413) ] for", "remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq,", "immutable.\", 422, CLBDeletedError), (\"The load balancer is deleted and considered immutable.\", 422, CLBDeletedError),", "marked as deleted.\", 410, CLBDeletedError), ] for msg, code, err in json_responses_and_errs: msg", "as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common", "up as an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors':", "Ignored errors bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has a", "service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different because it's produced", "otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq = [", "request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self):", "msg = \"Node with id #1234 not found for loadbalancer #{0}\".format( self.lb_id) no_such_node", "sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes must", "test_add_clb_nodes(self): \"\"\" Produce a request for adding nodes to a load balancer, which", "50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None,", "202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = \"Node with id", "and raises corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent", "common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request for", "= stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with", "\"\"\" Tests for CLB client functions, such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self):", "CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError),", "json.dumps({ 'message': (\"Load Balancer '{0}' has a status of 'BROKEN' \" \"and is", "weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50,", "# CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected. One or more nodes already", "data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({}, 202,", "load balancer being accessed in the function being tested \"\"\" json_responses_and_errs = [", "= [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq,", "'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher,", "is down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this is not an over", "random HTTP response code is bubbled up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id,", "CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants", "as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns", "request is sent, and the Effect returns None if 202 is returned. \"\"\"", "seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff) self.assertIs(result, None)", "Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError)", "\"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413)", "range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({},", "lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes must not exceed 25 per load", "\"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff) self.assertIs(result,", "immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load balancer is deleted and", "nodes. \"\"\" node_ids = map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response =", ":class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\":", "CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\":", "more nodes already \" \"configured on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg,", "json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as", "# all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce", "effect.testing import ( EQFDispatcher, const, intent_func, noop, perform_sequence) import six from twisted.trial.unittest import", "code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit", ":func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype and atom URL", "success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self):", "otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that the effect", "\"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect for a node removal", "eff: Effect returned from function being tested :param lb_id: ID of load balancer", "test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes')", "dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\"", "as an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages':", "seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq,", "for msg, code, err in json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message':", "Expected effect for a node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id),", "six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb import (", "def test_change_clb_node(self): \"\"\" Produce a request for modifying a node on a load", "on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes", "per load balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher =", "404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked as", "status of 'PENDING_UPDATE' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}'", ":obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected ``ServiceRequest`` intent :param eff: Effect returned", "eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result", "a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes =", "dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure", "request for modifying a node on a load balancer with the default type,", "CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs,", "as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors", "import json from effect import sync_perform from effect.testing import ( EQFDispatcher, const, intent_func,", "returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))),", "\"\"\" When CLB returns an error indicating that some of the nodes are", "= map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id,", "Parses regular CLB errors and raises corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher,", "CLBDeletedError), (\"The load balancer is deleted and considered immutable.\", 422, CLBDeletedError), (\"Load balancer", "50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher,", "\"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80,", "'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff),", "in the function being tested \"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}' has", "'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i:", "def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent, and the Effect returns None", "202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) #", "sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self,", "CLBNodeLimitError failure msg = \"Nodes must not exceed 25 per load balancer.\" limit", "node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq,", "perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB errors. \"\"\"", "GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB", "[\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff)", "A DELETE request is sent, and the Effect returns None if 202 is", "= [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\"", "( EQFDispatcher, const, intent_func, noop, perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase from", "a status of 'PENDING_UPDATE' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer", "otter.cloud_client import service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved,", "<filename>otter/test/cloud_client/test_clb.py \"\"\"Tests for otter.cloud_client.clb\"\"\" import json from effect import sync_perform from effect.testing import", "condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight':", "...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\",", "an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}},", "eff = remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved)", "} body = {\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop)", "different because it's produced by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\":", "modifying a node on a load balancer with the default type, which returns", "{\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq,", "ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def", "= CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2)) removed = map(six.text_type, range(limit)) not_removed", "up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent,", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202))", "'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "balancer details from the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req", "APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random", "an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))),", "\"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...} \"\"\" expected", "range(limit + 2)) removed = map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit +", "some of the nodes are invalid, the request is retried without the offending", "json.dumps({ 'message': \"this is not an over limit message\", 'code': 413}), 413), stub_pure_response(\"random", "loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher =", "expected ``ServiceRequest`` intent :param eff: Effect returned from function being tested :param lb_id:", "= \"Node with id #1234 not found for loadbalancer #{0}\".format( self.lb_id) no_such_node =", "ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all", "lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes", "def test_error_handling(self): \"\"\" Parses regular CLB errors and raises corresponding exceptions \"\"\" svc_intent", "= EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg", "(log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the", "common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80,", "413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual(", "noop, perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request from", "APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions,", "is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked as deleted.\", 410, CLBDeletedError),", "code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions, such", "it being in a deleted state, pending update, etc. are handled. \"\"\" eff", "json.dumps({ 'message': (\"The load balancer is deleted and considered \" \"immutable\"), 'code': 404}),", "ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success", "] result = perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors", "are bubbled up as an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages':", "parses the common CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors(", "message\", 404), stub_pure_response(\"random repose error message\", 413) ] for resp in bad_resps: with", "service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError", "error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param", "balancers is down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this is not an", "body = {\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ]", "already \" \"configured on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}),", "cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client", "= {'nodes': 'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda", "lb_id(self): \"\"\"What is my LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce a", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher", "with id #1234 not found for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message':", "message\", 413) ] for resp in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))],", "Balancer '{0}' has a status of 'BUILD' and is \" \"considered immutable.\", 422,", "def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error indicating that some of the", "json from effect import sync_perform from effect.testing import ( EQFDispatcher, const, intent_func, noop,", "] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and", "change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init import", "non-json\" ] for body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq =", "part of your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent,", "body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common", "the default type, which returns a successful result on 202. \"\"\" eff =", "\"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))],", "'BROKEN' \" \"and is considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The", "being tested :param lb_id: ID of load balancer being accessed in the function", "Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\",", "a node on a load balancer with the default type, which returns a", "(expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def", "with the default type, which returns a successful result on 202. \"\"\" eff", "a load balancer, which returns a successful result on 202. Parse the common", "CLB client functions, such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is my", "\" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is down\",", "found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked", "#{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher = EQFDispatcher([(", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [ (expected.intent,", "cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures", "the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return:", "and atom URL and returns the feed part of the result \"\"\" from", "500}), 500), stub_pure_response( json.dumps({ 'message': \"this is not an over limit message\", 'code':", "with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method',", "class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions, such as :obj:`change_clb_node`. \"\"\" @property", "cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes must not exceed 25", "immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_DELETE' and is", "range(limit)) not_removed = map(six.text_type, range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq", "{}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg", "``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE", "[ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self):", "returned from function being tested :param lb_id: ID of load balancer being accessed", "expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect for a node removal request. \"\"\"", "# CLBNodeLimitError failure msg = \"Nodes must not exceed 25 per load balancer.\"", "the request is retried without the offending nodes. \"\"\" node_ids = map(str, range(1,", "\"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings} seq", "# NoSuchCLBNode failure msg = \"Node with id #1234 not found for loadbalancer", "202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT',", "lb_id): \"\"\" Assert that the effect produced performs the common CLB error parsing:", "part of the result \"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff", "{\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\":", "are invalid, the request is retried without the offending nodes. \"\"\" node_ids =", "stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns", "an error indicating that some of the nodes are invalid, the request is", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\"", "\"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes},", "error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ]", "status of 'unexpected status' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer", "with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure", "of your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)),", "'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher,", "\" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_DELETE'", "[ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!')", "message\", 'code': 413}), 413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose error message\",", "eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202))", "1,3 are not a part of your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202)", "[ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ] for", "cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\")", "limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))])", "add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init", "msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}), code)", "'unexpected status' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has", "not a part of your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq =", "CLB servicetype and atom URL and returns the feed part of the result", "LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes':", "balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer", "retried without the offending nodes. \"\"\" node_ids = map(str, range(1, 5)) eff =", "response = stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3 are not a part", "self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\",", "'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor``", "eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent,", "{'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _:", "from otter.util.http import APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id):", "on 202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id,", "as deleted.\", 410, CLBDeletedError), ] for msg, code, err in json_responses_and_errs: msg =", "response2 = stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ]", "= [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def", "service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests", "CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes,", "has a status of 'PENDING_DELETE' and is \" \"considered immutable.\", 422, CLBDeletedError), (\"The", "500), stub_pure_response( json.dumps({ 'message': \"this is not an over limit message\", 'code': 413}),", "state, pending update, etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors(", "testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL'))", "remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import", "'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes',", "load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([(", "\"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular", "is marked as deleted.\", 410, CLBDeletedError), ] for msg, code, err in json_responses_and_errs:", "stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent,", "{\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request(", "the common CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self,", "msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm:", "] for resp in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(", "= EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg,", "\"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_UPDATE' and", "details from the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req =", "const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor`", "default type, which returns a successful result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id,", "lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self):", "lb_id: ID of load balancer being accessed in the function being tested \"\"\"", "balancer being accessed in the function being tested \"\"\" json_responses_and_errs = [ (\"Load", "of 'BROKEN' \" \"and is considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message':", "produced by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\":", ":param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected ``ServiceRequest`` intent :param eff: Effect", "down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this is not an over limit", "lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self):", "nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent, and the Effect", "'{0}' has a status of 'unexpected status' and is \" \"considered immutable.\", 422,", "\"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon',", "resp = stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}), code) with testcase.assertRaises(err) as", "'{0}' has a status of 'BUILD' and is \" \"considered immutable.\", 422, CLBImmutableError),", "for CLB client functions, such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is", "resp in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={},", "modifying a node on a load balancer, which returns a successful result on", "}), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\",", "remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP", "80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes)", "success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda", "{'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent,", "\"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings} seq = [ (expected.intent,", "or more nodes already \" \"configured on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message':", "CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load", "\"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\":", "Produce a request for adding nodes to a load balancer, which returns a", "\"\"\" Assert that the effect produced performs the common CLB error parsing: :class:`CLBImmutableError`,", "a request for modifying a node on a load balancer, which returns a", "25 per load balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher", "seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings)", "balancer with the default type, which returns a successful result on 202. \"\"\"", "(\"Load Balancer '{0}' has a status of 'BUILD' and is \" \"considered immutable.\",", "be parsed are bubbled up as an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages':", "request for modifying a node on a load balancer, which returns a successful", "(StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected. One or", "eff, lb_id): \"\"\" Assert that the effect produced performs the common CLB error", "'BUILD' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a", "such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is my LB ID\"\"\" return", "CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from", "has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that the effect produced performs", "error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\"", "corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent assert_parses_common_clb_errors( self,", "def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id)", "cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [ stub_pure_response( json.dumps({ 'message':", "for otter.cloud_client.clb\"\"\" import json from effect import sync_perform from effect.testing import ( EQFDispatcher,", "tested \"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}' has a status of 'BUILD'", "expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect for a", "self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error indicating", "def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining", "change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition':", "422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load balancer is deleted and considered \"", "returns a successful result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50)", "body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body,", "raises corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent assert_parses_common_clb_errors(", "CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor,", "\"\"\"Returns all the load balancer details from the LBs endpoint.\"\"\" expected = service_request(", "all the load balancer details from the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({},", "\"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\":", "considered immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not", "nodes to a load balancer, which returns a successful result on a 202.", "load balancer with the default type, which returns a successful result on 202.", "stub_json_response, stub_pure_response ) from otter.util.http import APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase,", "= remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3 are", "otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError,", "= [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ]", "URL and returns the feed part of the result \"\"\" from otter.cloud_client.clb import", "not found for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}),", "test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...} \"\"\"", "# OverLimit Retry is different because it's produced by repose over_limit = stub_pure_response(", "'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))])", "import log_intent, service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http", "ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\":", "\"\"\" :func:`get_clb_health_monitor` parses the common CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET',", "None if 202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq =", "200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor`", "= stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with", "eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error indicating that some of", "'{0}' has a status of 'PENDING_UPDATE' and is \" \"considered immutable.\", 422, CLBImmutableError),", "Calls `cf.read_entries` with CLB servicetype and atom URL and returns the feed part", "\"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\":", "ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce a request for modifying a node", "with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) #", "def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET',", "EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id),", "lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202,", "= stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}), code) with testcase.assertRaises(err) as cm:", "import ( EQFDispatcher, const, intent_func, noop, perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase", "422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_DELETE' and is \"", "is different because it's produced by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": {", "nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2)) removed =", "deleted.\", 410, CLBDeletedError), ] for msg, code, err in json_responses_and_errs: msg = msg.format(lb_id)", "body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions, such as", "json.dumps({ 'message': \"Cloud load balancers is down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message':", "get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq =", "\" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'unexpected", "'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is down\", 'code': 500}),", "ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request", "svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent assert_parses_common_clb_errors( self, svc_intent, get_clb_node_feed(\"12\", \"13\"),", "CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the load balancer details from", "'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET", "returns None if 202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq", "is my LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce a request for", "{\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff", "performs the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase`", "\"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is bubbled up as an", "404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual(", "'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda", "sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self,", "effect for a node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id':", "] for body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [", "\"\"\"Tests for otter.cloud_client.clb\"\"\" import json from effect import sync_perform from effect.testing import (", "NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def", "LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body =", "testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB", "remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq,", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [ (expected.intent, lambda", "get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls", "bubbled up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [", "'PENDING_DELETE' and is \" \"considered immutable.\", 422, CLBDeletedError), (\"The load balancer is deleted", "loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent,", "in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))),", "immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'unexpected status' and", "NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType from", "eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected. One", "a successful result on 202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\"", "common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected", "service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't", "success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202))", "returns a successful result on a 202. Parse the common CLB errors, and", "intent :param eff: Effect returned from function being tested :param lb_id: ID of", "eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect for a node", "the Effect returns None if 202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\",", "bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1],", "StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import APIError from otter.util.pure_http import has_code def", "'code': code, 'details': ''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception,", "common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY')", "remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3 are not", "load balancer, which returns a successful result on 202. Parse the common CLB", "'message': \"Cloud load balancers is down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this", "\"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}' has a status of 'BUILD' and", "function being tested :param lb_id: ID of load balancer being accessed in the", "ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the load balancer details", "404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff)", "400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an", "eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying a node on", "self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected =", "request for adding nodes to a load balancer, which returns a successful result", "for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body", "\"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq", "of 'unexpected status' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}'", "as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is my LB ID\"\"\" return \"123456\"", "client functions, such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is my LB", "eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = \"Node with id #1234 not", "cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different because", "import sync_perform from effect.testing import ( EQFDispatcher, const, intent_func, noop, perform_sequence) import six", "eff = get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"),", "= EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce", "def lb_id(self): \"\"\"What is my LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce", "{'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ] for body", "offending nodes. \"\"\" node_ids = map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response", "\"123456\" def test_change_clb_node(self): \"\"\" Produce a request for modifying a node on a", "+ 2)) removed = map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit + 2))", "'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)]", "[ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!')", "(log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure", "intent_func, noop, perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request", "'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('',", "lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}'", "] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed))", "CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions, such as :obj:`change_clb_node`. \"\"\" @property def", "\"\"\" :return: Expected effect for a node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "_: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for", "lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg =", "(\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked as deleted.\", 410,", "and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request(", "nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes')", "test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error indicating that some of the nodes", "self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB errors.", "as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class", "and the Effect returns None if 202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id,", "\"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying a node on a", "data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('',", "HTTP response code is bubbled up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\",", ":func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...} \"\"\" expected =", "remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce:", "has a status of 'BROKEN' \" \"and is considered immutable.\"), 'code': 422}), 422),", "{\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB errors", "2)): \"\"\" :return: Expected effect for a node removal request. \"\"\" return service_request(", "expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25))", "nodes detected. One or more nodes already \" \"configured on load balancer.\") duplicate_nodes", "returns setting inside {\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings", "map(str, range(limit + 2)) removed = map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit", "[ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq,", "error message\", 404), stub_pure_response(\"random repose error message\", 413) ] for resp in bad_resps:", "stub_pure_response( json.dumps({ 'message': (\"The load balancer is deleted and considered \" \"immutable\"), 'code':", "self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes must not exceed", "} }), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit", "[\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response", "(self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes``", "'{0}' has a status of 'PENDING_DELETE' and is \" \"considered immutable.\", 422, CLBDeletedError),", "cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\")", "the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase:", "202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent,", "for modifying a node on a load balancer, which returns a successful result", "3 } body = {\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body),", "nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\":", "2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with", "bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has a status of", "'code': 413}), 413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose error message\", 413)", "failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying", "CLB errors and raises corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\",", "found for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404)", "\"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is bubbled up", "node on a load balancer, which returns a successful result on 202. Parse", "'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff),", "node on a load balancer with the default type, which returns a successful", "status' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a", "deleted state, pending update, etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"])", "= map(six.text_type, range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq = [", "add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success", "if 202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [", "intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT,", "eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed are bubbled up as", "from function being tested :param lb_id: ID of load balancer being accessed in", "self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is bubbled", "immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\",", "node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1,", "\"\"\" Produce a request for modifying a node on a load balancer with", "effect import sync_perform from effect.testing import ( EQFDispatcher, const, intent_func, noop, perform_sequence) import", "import service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError,", "SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError,", "APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that", "json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\"", "not an over limit message\", 'code': 413}), 413), stub_pure_response(\"random repose error message\", 404),", "[\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff)", "repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\":", "assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def", "= change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node':", "deleted and considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load", "result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request(", "perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self): \"\"\"Returns all the", "not_removed = map(six.text_type, range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq =", "dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception,", "(log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all", "test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object", "expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) #", "Tests for CLB client functions, such as :obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What", "\"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT',", "can't be parsed are bubbled up as an APIError.\"\"\" error_bodies = [ {'validationErrors':", "a status of 'BUILD' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer", "cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures", "422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff)", "removed = map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit + 2)) eff =", "errors bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has a status", "CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes)", "eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any", "balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent,", "from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import APIError from", "\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class", "and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status", "dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception,", "load balancer, which returns a successful result on a 202. Parse the common", "400) response2 = stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2))", "[\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff)", "balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent,", "\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\":", "\"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype and atom URL and", "'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ] for body in error_bodies: eff =", "None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for a", "(log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses", "\"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {},", "test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it being in a deleted state, pending", "errors about it being in a deleted state, pending update, etc. are handled.", "stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff),", "return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A", "'message': (\"The load balancer is deleted and considered \" \"immutable\"), 'code': 404}), 404),", "success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent, and the Effect returns", "= [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"},", "response code is bubbled up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"])", "self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed, removed)) def test_get_clbs(self):", "\"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT", "msg, 'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm:", "body = {'nodes': 'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body),", "413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id)))", "common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test", "= stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\":", "413}), 413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose error message\", 413) ]", "202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a request for adding", "stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses", "node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight':", "test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed are bubbled up as an APIError.\"\"\"", "load balancer is deleted and considered immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\",", "eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is bubbled up as", "\"\"\" Calls `cf.read_entries` with CLB servicetype and atom URL and returns the feed", "self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg", "a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body =", "self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff,", "seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def", "the offending nodes. \"\"\" node_ids = map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids)", "eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB errors and raises corresponding exceptions", "lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self,", "202)) # NoSuchCLBNode failure msg = \"Node with id #1234 not found for", "expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting", "\"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent assert_parses_common_clb_errors( self, svc_intent, get_clb_node_feed(\"12\",", "80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81,", "CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def", "params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent, and", "CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_UPDATE' and is \" \"considered", "of 'PENDING_DELETE' and is \" \"considered immutable.\", 422, CLBDeletedError), (\"The load balancer is", "stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has a status of 'BROKEN' \" \"and", "= change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition':", "not exceed 25 per load balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}),", "\"Node with id #1234 not found for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response(", "eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError,", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self):", "otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response ) from", "= map(str, range(limit + 2)) removed = map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit,", "the common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent,", "= stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with", "is retried without the offending nodes. \"\"\" node_ids = map(str, range(1, 5)) eff", "eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different because it's produced by", "\"Nodes must not exceed 25 per load balancer.\" limit = stub_pure_response( json.dumps({'message': msg,", "object :param intent: expected ``ServiceRequest`` intent :param eff: Effect returned from function being", "CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected. One or more nodes already \"", "inside {\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = {", "test_error_handling(self): \"\"\" Parses regular CLB errors and raises corresponding exceptions \"\"\" svc_intent =", "a load balancer, which returns a successful result on a 202. Parse the", "repose error message\", 404), stub_pure_response(\"random repose error message\", 413) ] for resp in", "\"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def", "eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a request for adding nodes to", "weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type':", "perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed are bubbled", "a status of 'BROKEN' \" \"and is considered immutable.\"), 'code': 422}), 422), stub_pure_response(", "'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({},", "test_change_clb_node(self): \"\"\" Produce a request for modifying a node on a load balancer,", "One or more nodes already \" \"configured on load balancer.\") duplicate_nodes = stub_pure_response(", "msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}), code) with testcase.assertRaises(err)", "map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids)", "{'validationErrors': {'messages': []}}, \"random non-json\" ] for body in error_bodies: eff = remove_clb_nodes(self.lb_id,", "and returns the feed part of the result \"\"\" from otter.cloud_client.clb import cf", "\"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\",", "def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it being in a deleted state,", "(self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB", "nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq", "to a load balancer, which returns a successful result on a 202. Parse", "\"\"\" A DELETE request is sent, and the Effect returns None if 202", "(\"Load Balancer '{0}' has a status of 'unexpected status' and is \" \"considered", "[\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB errors and raises corresponding exceptions \"\"\"", "'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202))", ":param intent: expected ``ServiceRequest`` intent :param eff: Effect returned from function being tested", "i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}),", "CLB returns an error indicating that some of the nodes are invalid, the", "in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code,", "and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str,", "self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries`", "test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype and atom URL and returns the", "eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError,", "status of 'PENDING_DELETE' and is \" \"considered immutable.\", 422, CLBDeletedError), (\"The load balancer", "load balancer is deleted and considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({", "otter.util.http import APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\"", "= {\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual(", "for adding nodes to a load balancer, which returns a successful result on", "\"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes':", "seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error indicating that some", "'{0}' has a status of 'BROKEN' \" \"and is considered immutable.\"), 'code': 422}),", "'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3", "assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and", "map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages': [", "= [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)]", "\"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings}", "lb_id=six.text_type(lb_id))) # OverLimit Retry is different because it's produced by repose over_limit =", "on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher =", "the load balancer details from the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET',", "intent: expected ``ServiceRequest`` intent :param eff: Effect returned from function being tested :param", "[{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\":", "stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError)", "balancer is deleted and considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message':", "] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error", "lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self):", "self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a request for adding nodes", "req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected = service_request(", "assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code is", "and considered immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is", "errors and raises corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={},", "get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf", "tested :param lb_id: ID of load balancer being accessed in the function being", "testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different because it's produced by repose", "load balancer details from the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers')", "the feed part of the result \"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\",", "json.dumps({'message': msg, 'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as", "# Ignored errors bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10,", "= get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"],", "(self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\"", "import APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert", "success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes',", "stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = \"Node with id #1234 not found", "self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff,", "settings = { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body", "\" \"and is considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [", "Balancer '{0}' has a status of 'PENDING_UPDATE' and is \" \"considered immutable.\", 422,", "CLBImmutableError), (\"Load Balancer '{0}' has a status of 'unexpected status' and is \"", "'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this is not an over limit message\",", "\"ENABLED\"}, {\"address\": \"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}]", "404), stub_pure_response(\"random repose error message\", 413) ] for resp in bad_resps: with testcase.assertRaises(APIError)", "CLBDeletedError), ] for msg, code, err in json_responses_and_errs: msg = msg.format(lb_id) resp =", "test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying a node on a load balancer", "\"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\"", "nodes are invalid, the request is retried without the offending nodes. \"\"\" node_ids", "is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({},", "'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([(", "self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns", "422), stub_pure_response( json.dumps({ 'message': (\"The load balancer is deleted and considered \" \"immutable\"),", "only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT", "the effect produced performs the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`,", "Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer", "type, which returns a successful result on 202. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234',", "422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual(", "'message': \"this is not an over limit message\", 'code': 413}), 413), stub_pure_response(\"random repose", "{'messages': []}}, \"random non-json\" ] for body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\",", "my LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce a request for modifying", "not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is", "{'messages': ['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ] for body in", "202)) def test_add_clb_nodes(self): \"\"\" Produce a request for adding nodes to a load", "(expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def", "is not an over limit message\", 'code': 413}), 413), stub_pure_response(\"random repose error message\",", "perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self): \"\"\" When CLB returns an error indicating that", "are not a part of your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq", "def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that the effect produced performs the", "msg = (\"Duplicate nodes detected. One or more nodes already \" \"configured on", "_: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = (\"Duplicate", "over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\",", "node_ids = map(str, range(limit + 2)) removed = map(six.text_type, range(limit)) not_removed = map(six.text_type,", ") from otter.util.http import APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff,", "over limit message\", 'code': 413}), 413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose", "error indicating that some of the nodes are invalid, the request is retried", "ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked as deleted.\", 410, CLBDeletedError), ] for", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase):", "\"details\": \"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff)", "get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs',", "stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError)", "cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses", "202))), ] result = perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB", "for body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent,", "nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})),", "with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all", "import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\",", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) #", "map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent, and the", "handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def", ":obj:`change_clb_node`. \"\"\" @property def lb_id(self): \"\"\"What is my LB ID\"\"\" return \"123456\" def", "eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it being in", "{})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError", "stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error", "url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for CLB client functions, such as :obj:`change_clb_node`. \"\"\"", "expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234'))", "limit message\", 'code': 413}), 413), stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose error", "json.dumps({'message': msg, 'code': code, 'details': ''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))],", "result on 202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff =", "invalid, the request is retried without the offending nodes. \"\"\" node_ids = map(str,", "\"\"\" @property def lb_id(self): \"\"\"What is my LB ID\"\"\" return \"123456\" def test_change_clb_node(self):", "``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit +", "\"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'unexpected status'", "def test_get_clbs(self): \"\"\"Returns all the load balancer details from the LBs endpoint.\"\"\" expected", "'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for a LB.\"\"\" req =", "test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected", "that some of the nodes are invalid, the request is retried without the", "EQFDispatcher, const, intent_func, noop, perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client", "lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB", "ID of load balancer being accessed in the function being tested \"\"\" json_responses_and_errs", "\"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes) expected =", "raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit", "map(six.text_type, range(limit, limit + 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent,", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}},", "service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id), node_id=u'1234')) #", "Produce a request for modifying a node on a load balancer with the", "has a status of 'PENDING_UPDATE' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load", "= remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random", "cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS,", "{}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\"", "Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\",", "'Node ids 1,3 are not a part of your loadbalancer']}}, 400) response2 =", "considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load balancer is deleted", "a successful result on a 202. Parse the common CLB errors, and a", "= remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as", "None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected", "settings} seq = [ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)),", "is bubbled up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq =", "on a load balancer with the default type, which returns a successful result", "accessed in the function being tested \"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}'", "# success seq = [ (expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}),", ":class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS,", "= get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq", "= perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it", "service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common", "status of 'BUILD' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}'", "balancer, which returns a successful result on 202. Parse the common CLB errors,", "in json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code': code, 'details':", "of load balancer being accessed in the function being tested \"\"\" json_responses_and_errs =", "def test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying a node on a load", "= [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ] result = perform_sequence(seq, eff) self.assertIs(result, None) def", "perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different because it's", "it's produced by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\",", "\"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2)) removed = map(six.text_type,", "ids 1,3 are not a part of your loadbalancer']}}, 400) response2 = stub_pure_response({},", "expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = \"Node", "get_clbs, remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils", ":class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected", "10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings} seq = [ (expected.intent, const(stub_json_response(body))),", "removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit =", "as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the common", "= { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body =", "\"1.1.1.2\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\": \"1.1.1.5\", \"port\": 81, \"condition\": \"ENABLED\"}] eff =", "\"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent,", "body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the", "indicating that some of the nodes are invalid, the request is retried without", "json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as", "413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff)", "code, err in json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code':", "\"\"\":func:`get_clb_nodes` returns all the nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected =", "self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) # all the", "= remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence,", "service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id),", "err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different because it's produced by repose over_limit", "has a status of 'BUILD' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load", "None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it being in a deleted", "Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception,", "(self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved(", "of the result \"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff =", "Common CLB errors about it being in a deleted state, pending update, etc.", "\"Cloud load balancers is down\", 'code': 500}), 500), stub_pure_response( json.dumps({ 'message': \"this is", "''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) #", "a request for adding nodes to a load balancer, which returns a successful", "remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2)) removed", "import six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb import", "duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))])", "'code': 413}), 413) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher,", "must not exceed 25 per load balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code':", "(\"Duplicate nodes detected. One or more nodes already \" \"configured on load balancer.\")", "Balancer '{0}' has a status of 'unexpected status' and is \" \"considered immutable.\",", "seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed are bubbled up", "\"\"\" Common CLB errors about it being in a deleted state, pending update,", "testcase: Test object :param intent: expected ``ServiceRequest`` intent :param eff: Effect returned from", "def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect for a node removal request.", "for modifying a node on a load balancer with the default type, which", "= (\"Duplicate nodes detected. One or more nodes already \" \"configured on load", "for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype and atom", "eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node':", "\"OverLimit Retry...\", \"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413) with", "ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent, lambda", "@property def lb_id(self): \"\"\"What is my LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\"", "not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked as deleted.\", 410, CLBDeletedError), ]", "410, CLBDeletedError), ] for msg, code, err in json_responses_and_errs: msg = msg.format(lb_id) resp", "= EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, NoSuchCLBNodeError(msg,", "5)) eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages': [ 'Node ids", "\"this is not an over limit message\", 'code': 413}), 413), stub_pure_response(\"random repose error", "NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The loadbalancer is marked as deleted.\",", "testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [ stub_pure_response( json.dumps({", "error message\", 413) ] for resp in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent,", "= \"Nodes must not exceed 25 per load balancer.\" limit = stub_pure_response( json.dumps({'message':", "a status of 'unexpected status' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load", "(\"Load Balancer '{0}' has a status of 'PENDING_DELETE' and is \" \"considered immutable.\",", "parses the common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self,", "service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq = [ (expected.intent,", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'POST', 'loadbalancers/{0}/nodes'.format(self.lb_id), data={'nodes': nodes}, success_pred=has_code(202)) # success seq =", "CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\")", "for loadbalancer #{0}\".format( self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher", "422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_UPDATE' and is \"", "successful result on 202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff", "(self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only", "\"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual(", "seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ]", "expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\"", "Tests for :func:`get_clb_node_feed` \"\"\" def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype and", "errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def", ":class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected ``ServiceRequest`` intent :param eff:", "atom URL and returns the feed part of the result \"\"\" from otter.cloud_client.clb", "return \"123456\" def test_change_clb_node(self): \"\"\" Produce a request for modifying a node on", "is considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load balancer is", "CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed,", "[ (expected.intent, const(stub_json_response(body))), (log_intent('request-get-clb-healthmon', body), noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self):", "stub_pure_response( json.dumps({ 'message': \"this is not an over limit message\", 'code': 413}), 413),", "\" \"considered immutable.\", 422, CLBDeletedError), (\"The load balancer is deleted and considered immutable.\",", "assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that the effect produced performs the common", "CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected", "\"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self):", "Effect returns None if 202 is returned. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"])", "(\"Load Balancer '{0}' has a status of 'PENDING_UPDATE' and is \" \"considered immutable.\",", "failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def expected_node_removal_req(self, nodes=(1, 2)): \"\"\" :return: Expected effect", "and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"}, {\"address\":", "'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None,", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'}", "((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"])", "a deleted state, pending update, etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\",", "(expected.intent, lambda i: stub_json_response({}, 202, {})), (log_intent('request-add-clb-nodes', {}), lambda _: None)] self.assertEqual(perform_sequence(seq, eff),", ":param lb_id: ID of load balancer being accessed in the function being tested", "{'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))])", "get_clb_node_feed(\"12\", \"13\") seq = [ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\":", "setting inside {\"healthMonitor\": ...} \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings =", "req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the nodes for a LB.\"\"\" req", "exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\", \"loadbalancers/12/nodes/13.atom\", params={}, json_response=False).intent assert_parses_common_clb_errors( self, svc_intent,", "has a status of 'unexpected status' and is \" \"considered immutable.\", 422, CLBImmutableError),", "cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes", "self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises", "seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff)", "service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes", "from the LBs endpoint.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs()", "[ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s", "nodes already \" \"configured on load balancer.\") duplicate_nodes = stub_pure_response( json.dumps({'message': msg, 'code':", "# success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) #", "self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about it being in a", "{'validationErrors': {'messages': [ 'Node ids 1,3 are not a part of your loadbalancer']}},", "def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside {\"healthMonitor\": ...}", "eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3", "the nodes for a LB.\"\"\" req = get_clb_nodes(self.lb_id) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET',", "perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb", "sync_perform from effect.testing import ( EQFDispatcher, const, intent_func, noop, perform_sequence) import six from", "'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent,", "remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq,", "CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2)) removed = map(six.text_type, range(limit)) not_removed =", "noop) ] self.assertEqual( perform_sequence(seq, get_clb_health_monitor(self.lb_id)), settings) def test_get_clb_health_mon_error(self): \"\"\" :func:`get_clb_health_monitor` parses the common", "'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id), self.lb_id) class GetCLBNodeFeedTests(SynchronousTestCase): \"\"\" Tests for :func:`get_clb_node_feed` \"\"\"", "CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node, get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import", "= msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}), code) with", "EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg =", "\"immutable\"), 'code': 404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is down\", 'code':", "\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 202))), ]", "None)] self.assertEqual(perform_sequence(seq, eff), (StubResponse(202, {}), {})) # CLBDuplicateNodesError failure msg = (\"Duplicate nodes", ":class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected ``ServiceRequest``", "being accessed in the function being tested \"\"\" json_responses_and_errs = [ (\"Load Balancer", "ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher =", "``ServiceRequest`` intent :param eff: Effect returned from function being tested :param lb_id: ID", "stub_pure_response(\"random repose error message\", 404), stub_pure_response(\"random repose error message\", 413) ] for resp", "service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [", "CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') assert_parses_common_clb_errors( self, expected.intent, get_clb_health_monitor(self.lb_id),", "result = perform_sequence(seq, eff) self.assertIs(result, None) def test_remove_clb_nodes_handles_standard_clb_errors(self): \"\"\" Common CLB errors about", "'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher", "node_id=u'1234')) # all the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\"", "about it being in a deleted state, pending update, etc. are handled. \"\"\"", "is \" \"considered immutable.\", 422, CLBDeletedError), (\"The load balancer is deleted and considered", "node_ids = map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors':", "DELETE request is sent, and the Effect returns None if 202 is returned.", "def test_remove_clb_nodes_random_400(self): \"\"\"Random 400s that can't be parsed are bubbled up as an", "[ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has a status of 'BROKEN' \"", "change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING',", "import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that the effect produced", "\"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\") def test_remove_clb_nodes_non_202(self): \"\"\"Any random HTTP response code", "the result \"\"\" from otter.cloud_client.clb import cf self.patch(cf, \"read_entries\", intent_func(\"re\")) eff = get_clb_node_feed(\"12\",", "422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'unexpected status' and is", "[ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception,", "Balancer '{0}' has a status of 'BROKEN' \" \"and is considered immutable.\"), 'code':", "`cf.read_entries` with CLB servicetype and atom URL and returns the feed part of", "twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb import ( CLBDeletedError, CLBDuplicateNodesError,", "'details': ''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id)))", "returns a successful result on 202. Parse the common CLB errors, and :class:`NoSuchCLBNodeError`.", "returns an error indicating that some of the nodes are invalid, the request", "otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import APIError from otter.util.pure_http", "\"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors(", "on a load balancer, which returns a successful result on 202. Parse the", "{ \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\":", "exceed 25 per load balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413)", "(\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422, CLBNotActiveError), (\"The", "404}), 404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is down\", 'code': 500}), 500),", "testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is", "seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clb-nodes', body), lambda _: None)] self.assertEqual(perform_sequence(seq,", "stub_pure_response(\"random repose error message\", 413) ] for resp in bad_resps: with testcase.assertRaises(APIError) as", "ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls", "import ( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes,", "from effect.testing import ( EQFDispatcher, const, intent_func, noop, perform_sequence) import six from twisted.trial.unittest", "success dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode", "'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def", "data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202)) # success dispatcher = EQFDispatcher([(", "from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response )", "balancer is deleted and considered immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\", 404,", "i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes`", "with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry", "being in a deleted state, pending update, etc. are handled. \"\"\" eff =", "with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2))", "= {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda", "a status of 'PENDING_DELETE' and is \" \"considered immutable.\", 422, CLBDeletedError), (\"The load", "errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected =", "202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\":", "'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [ (expected.intent, lambda i: stub_json_response(body)),", "import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response,", "202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual( ce.exception, CLBPartialNodesRemoved( six.text_type(self.lb_id), not_removed,", "\" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_UPDATE'", "[]}}, \"random non-json\" ] for body in error_bodies: eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"])", "the common CLB errors, and :class:`NoSuchCLBNodeError`. \"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50,", "stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError)", "loadbalancer is marked as deleted.\", 410, CLBDeletedError), ] for msg, code, err in", "errors, and a :class:`CLBDuplicateNodesError`. \"\"\" nodes = [{\"address\": \"1.1.1.1\", \"port\": 80, \"condition\": \"ENABLED\"},", "{}), {})) # CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected. One or more", "a load balancer with the default type, which returns a successful result on", "def test_calls_read_entries(self): \"\"\" Calls `cf.read_entries` with CLB servicetype and atom URL and returns", "node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202))", ":class:`CLBRateLimitError`, :class:`APIError` :param :obj:`twisted.trial.unittest.TestCase` testcase: Test object :param intent: expected ``ServiceRequest`` intent :param", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}},", "[ 'Node ids 1,3 are not a part of your loadbalancer']}}, 400) response2", "limit = CLB_BATCH_DELETE_LIMIT node_ids = map(str, range(limit + 2)) removed = map(six.text_type, range(limit))", "msg, 'code': 422}), 422) dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm:", "service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = \"Node with", "LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce a request for modifying a", "servicetype and atom URL and returns the feed part of the result \"\"\"", "self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) # NoSuchCLBNode failure msg = \"Node with id #1234", "const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB", "that the effect produced performs the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`,", "import ( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import APIError from otter.util.pure_http import", "body), lambda _: None)] self.assertEqual(perform_sequence(seq, req), 'lbs!') def test_get_clb_nodes(self): \"\"\":func:`get_clb_nodes` returns all the", "msg, code, err in json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg,", "of the nodes are invalid, the request is retried without the offending nodes.", "function being tested \"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}' has a status", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\":", "= [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ] with self.assertRaises(CLBPartialNodesRemoved) as ce: perform_sequence(seq, eff) self.assertEqual(", "_: None)] self.assertEqual(perform_sequence(seq, req), 'nodes!') def test_get_clb_nodes_error_handling(self): \"\"\":func:`get_clb_nodes` parses the common CLB errors.\"\"\"", "eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored errors bad_resps = [ stub_pure_response(", "'PENDING_UPDATE' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a", "deleted and considered immutable.\", 422, CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer", "json_responses_and_errs = [ (\"Load Balancer '{0}' has a status of 'BUILD' and is", "a part of your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq = [", "detected. One or more nodes already \" \"configured on load balancer.\") duplicate_nodes =", "msg = \"Nodes must not exceed 25 per load balancer.\" limit = stub_pure_response(", "from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import service_request from otter.cloud_client.clb import ( CLBDeletedError,", "result on a 202. Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`. \"\"\"", "expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') body = {'nodes': 'nodes!'} seq = [", "CLBNotActiveError), (\"The loadbalancer is marked as deleted.\", 410, CLBDeletedError), ] for msg, code,", "without the offending nodes. \"\"\" node_ids = map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id,", "self.lb_id) no_such_node = stub_pure_response( json.dumps({'message': msg, 'code': 404}), 404) dispatcher = EQFDispatcher([( expected.intent,", "a node on a load balancer, which returns a successful result on 202.", "\"\"\" eff = change_clb_node(lb_id=self.lb_id, node_id='1234', condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id),", "assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying a", "``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit", "\"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has a status of 'PENDING_DELETE' and", "because it's produced by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit", "expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request for modifying a node", "failure msg = \"Node with id #1234 not found for loadbalancer #{0}\".format( self.lb_id)", "testcase.assertRaises(CLBRateLimitError) as cm: perform_sequence([(intent, service_request_eqf(over_limit))], eff) testcase.assertEqual( cm.exception, CLBRateLimitError(\"OverLimit Retry...\", lb_id=six.text_type(lb_id))) # Ignored", "get_clb_health_monitor, get_clb_node_feed, get_clb_nodes, get_clbs, remove_clb_nodes) from otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent,", "(\"The load balancer is deleted and considered \" \"immutable\"), 'code': 404}), 404), stub_pure_response(", "json_responses_and_errs: msg = msg.format(lb_id) resp = stub_pure_response( json.dumps({'message': msg, 'code': code, 'details': ''}),", "balancer, which returns a successful result on a 202. Parse the common CLB", "bubbled up as an APIError.\"\"\" error_bodies = [ {'validationErrors': {'messages': ['bar']}}, {'messages': 'bar'},", "get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\" :func:`get_clb_health_monitor` calls ``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting inside", "const, intent_func, noop, perform_sequence) import six from twisted.trial.unittest import SynchronousTestCase from otter.cloud_client import", "Balancer '{0}' has a status of 'PENDING_DELETE' and is \" \"considered immutable.\", 422,", "= remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence,", "msg, 'code': code, 'details': ''}), code) with testcase.assertRaises(err) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff)", "adding nodes to a load balancer, which returns a successful result on a", "Test object :param intent: expected ``ServiceRequest`` intent :param eff: Effect returned from function", "422, CLBDeletedError), (\"Load balancer not found.\", 404, NoSuchCLBError), (\"LoadBalancer is not ACTIVE\", 422,", "Effect returned from function being tested :param lb_id: ID of load balancer being", "code is bubbled up as an APIError.\"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) seq", ":return: Expected effect for a node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE',", "_type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type':", "the common failures assert_parses_common_clb_errors(self, expected.intent, eff, \"123456\") def test_change_clb_node_default_type(self): \"\"\" Produce a request", "= EQFDispatcher([( expected.intent, service_request_eqf(duplicate_nodes))]) with self.assertRaises(CLBDuplicateNodesError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBDuplicateNodesError(msg,", "as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual(cm.exception, err(msg, lb_id=six.text_type(lb_id))) # OverLimit Retry is different", "your loadbalancer']}}, 400) response2 = stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\",", "OverLimit Retry is different because it's produced by repose over_limit = stub_pure_response( json.dumps({", "sent, and the Effect returns None if 202 is returned. \"\"\" eff =", "being tested \"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}' has a status of", "effect produced performs the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError`", "by repose over_limit = stub_pure_response( json.dumps({ \"overLimit\": { \"message\": \"OverLimit Retry...\", \"code\": 413,", ":func:`get_clb_health_monitor` parses the common CLB errors. \"\"\" expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/healthmonitor')", "= stub_pure_response({}, 202) seq = [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq,", "load balancer.\" limit = stub_pure_response( json.dumps({'message': msg, 'code': 413}), 413) dispatcher = EQFDispatcher([(", "test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes", "\"\"\"Any random HTTP response code is bubbled up as an APIError.\"\"\" eff =", "pending update, etc. are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self,", "[ (\"Load Balancer '{0}' has a status of 'BUILD' and is \" \"considered", "the function being tested \"\"\" json_responses_and_errs = [ (\"Load Balancer '{0}' has a", "'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent,", "removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)}, success_pred=has_code(202)) def", "otter.constants import ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import ( StubResponse,", "service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a request for", "When CLB returns an error indicating that some of the nodes are invalid,", "self.assertEqual(perform_sequence(seq, eff), [\"feed1\"]) def test_error_handling(self): \"\"\" Parses regular CLB errors and raises corresponding", "{})) # CLBDuplicateNodesError failure msg = (\"Duplicate nodes detected. One or more nodes", "ServiceType from otter.test.cloud_client.test_init import log_intent, service_request_eqf from otter.test.utils import ( StubResponse, stub_json_response, stub_pure_response", "413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as cm:", "seq = [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response(body, 400))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_retry_on_some_invalid_nodes(self):", "ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [", "= [ (self.expected_node_removal_req(node_ids).intent, service_request_eqf(response)), (self.expected_node_removal_req([\"2\", \"4\"]).intent, service_request_eqf(response2)) ] self.assertIs(perform_sequence(seq, eff), None) def test_remove_clb_nodes_partial_success(self):", "+ 2)) eff = remove_clb_nodes(self.lb_id, node_ids) seq = [ (self.expected_node_removal_req(removed).intent, service_request_eqf(stub_pure_response({}, 202))), ]", "node_id='1234', condition=\"DRAINING\", weight=50, _type='SECONDARY') expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING',", ":param eff: Effect returned from function being tested :param lb_id: ID of load", "(\"The load balancer is deleted and considered immutable.\", 422, CLBDeletedError), (\"Load balancer not", "test_remove_clb_nodes_success(self): \"\"\" A DELETE request is sent, and the Effect returns None if", "[ ((\"re\", ServiceType.CLOUD_LOAD_BALANCERS, \"loadbalancers/12/nodes/13.atom\", {}, cf.Direction.NEXT, \"request-get-clb-node-feed\"), const(([\"feed1\"], {\"param\": \"2\"}))) ] self.assertEqual(perform_sequence(seq, eff),", "= [ stub_pure_response( json.dumps({ 'message': (\"Load Balancer '{0}' has a status of 'BROKEN'", "['bar']}}, {'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ] for body in error_bodies:", "a node removal request. \"\"\" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'DELETE', 'loadbalancers/{}/nodes'.format(self.lb_id), params={'id': map(str, nodes)},", "= map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages':", "\"\"\"What is my LB ID\"\"\" return \"123456\" def test_change_clb_node(self): \"\"\" Produce a request", "condition=\"DRAINING\", weight=50) expected = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50,", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers/123456/nodes') assert_parses_common_clb_errors( self, expected.intent, get_clb_nodes(self.lb_id), \"123456\") def test_get_clb_health_mon(self): \"\"\"", "eff) testcase.assertEqual( cm.exception, APIError(headers={}, code=resp[0].code, body=resp[1], method='method', url='original/request/URL')) class CLBClientTests(SynchronousTestCase): \"\"\" Tests for", "'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body), lambda _: None)]", "service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id), node_limit=25)) #", "of 'BUILD' and is \" \"considered immutable.\", 422, CLBImmutableError), (\"Load Balancer '{0}' has", "nodes and raises ``CLBPartialNodesRemoved`` with remaining nodes \"\"\" limit = CLB_BATCH_DELETE_LIMIT node_ids =", "regular CLB errors and raises corresponding exceptions \"\"\" svc_intent = service_request( ServiceType.CLOUD_LOAD_BALANCERS, \"GET\",", "stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3 are not a part of your", "404), stub_pure_response( json.dumps({ 'message': \"Cloud load balancers is down\", 'code': 500}), 500), stub_pure_response(", "req = get_clbs() body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i:", "are handled. \"\"\" eff = remove_clb_nodes(self.lb_id, [\"1\", \"2\"]) assert_parses_common_clb_errors( self, self.expected_node_removal_req().intent, eff, \"123456\")", "'loadbalancers/123456/healthmonitor') settings = { \"type\": \"CONNECT\", \"delay\": 10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 }", "\"code\": 413, \"retryAfter\": \"2015-06-13T22:30:10Z\", \"details\": \"Error Details...\" } }), 413) with testcase.assertRaises(CLBRateLimitError) as", "for resp in bad_resps: with testcase.assertRaises(APIError) as cm: perform_sequence([(intent, service_request_eqf(resp))], eff) testcase.assertEqual( cm.exception,", "10, \"timeout\": 10, \"attemptsBeforeDeactivation\": 3 } body = {\"healthMonitor\": settings} seq = [", "eff), None) def test_remove_clb_nodes_partial_success(self): \"\"\" ``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and raises ``CLBPartialNodesRemoved``", "range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response( {'validationErrors': {'messages': [ 'Node", "(\"The loadbalancer is marked as deleted.\", 410, CLBDeletedError), ] for msg, code, err", "2)) removed = map(six.text_type, range(limit)) not_removed = map(six.text_type, range(limit, limit + 2)) eff", "( CLBDeletedError, CLBDuplicateNodesError, CLBImmutableError, CLBNodeLimitError, CLBNotActiveError, CLBPartialNodesRemoved, CLBRateLimitError, CLB_BATCH_DELETE_LIMIT, NoSuchCLBError, NoSuchCLBNodeError, add_clb_nodes, change_clb_node,", "stub_pure_response ) from otter.util.http import APIError from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent,", "from otter.util.pure_http import has_code def assert_parses_common_clb_errors(testcase, intent, eff, lb_id): \"\"\" Assert that the", "dispatcher = EQFDispatcher([( expected.intent, service_request_eqf(no_such_node))]) with self.assertRaises(NoSuchCLBNodeError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception,", "\"\"\" Parses regular CLB errors and raises corresponding exceptions \"\"\" svc_intent = service_request(", "{'messages': 'bar'}, {'validationErrors': {'messages': []}}, \"random non-json\" ] for body in error_bodies: eff", "produced performs the common CLB error parsing: :class:`CLBImmutableError`, :class:`CLBDescription`, :class:`NoSuchCLBError`, :class:`CLBRateLimitError`, :class:`APIError` :param", "expected.intent, service_request_eqf(stub_pure_response('', 202)))]) self.assertEqual(sync_perform(dispatcher, eff), stub_pure_response(None, 202)) def test_add_clb_nodes(self): \"\"\" Produce a request", "\"and is considered immutable.\"), 'code': 422}), 422), stub_pure_response( json.dumps({ 'message': (\"The load balancer", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers') req = get_clbs() body = {'loadBalancers': 'lbs!'} seq", "body = {'loadBalancers': 'lbs!'} seq = [ (expected.intent, lambda i: stub_json_response(body)), (log_intent('request-list-clbs', body),", "\"\"\" node_ids = map(str, range(1, 5)) eff = remove_clb_nodes(self.lb_id, node_ids) response = stub_pure_response(", "= service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', 'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'SECONDARY'}}, success_pred=has_code(202))", "from effect import sync_perform from effect.testing import ( EQFDispatcher, const, intent_func, noop, perform_sequence)", "failure msg = \"Nodes must not exceed 25 per load balancer.\" limit =", "( StubResponse, stub_json_response, stub_pure_response ) from otter.util.http import APIError from otter.util.pure_http import has_code", "EQFDispatcher([( expected.intent, service_request_eqf(limit))]) with self.assertRaises(CLBNodeLimitError) as cm: sync_perform(dispatcher, eff) self.assertEqual( cm.exception, CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id),", "repose error message\", 413) ] for resp in bad_resps: with testcase.assertRaises(APIError) as cm:", "= [ (self.expected_node_removal_req().intent, service_request_eqf(stub_pure_response({}, 200))), ] self.assertRaises(APIError, perform_sequence, seq, eff) def test_remove_clb_nodes_random_400(self): \"\"\"Random", "'loadbalancers/{0}/nodes/1234'.format(self.lb_id), data={'node': {'condition': 'DRAINING', 'weight': 50, 'type': 'PRIMARY'}}, success_pred=has_code(202)) dispatcher = EQFDispatcher([( expected.intent,", "CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id))) # CLBNodeLimitError failure msg = \"Nodes must not exceed 25 per", "= stub_pure_response( {'validationErrors': {'messages': [ 'Node ids 1,3 are not a part of" ]
[ "cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']],", "save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180,", "if save_vid: if trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img,", "is_in = False for ml in my_list: is_in = is_in or (ml in", "img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1),", "= re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH", "not os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP", "= cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for trk", "'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET':", "img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])),", "# Not using out_name, too slow output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH)", "{} Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize =", "str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)),", "# Use with care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta", "-1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt =", "= ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1), str(x2), str(y2),", "f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__': # Not", "3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data", "to separate exp') parser.add_argument('--epoch', default='100', help='How many epochs you used to separate exp')", "'Car', '-1 -1 -10', str(x1), str(y1), str(x2), str(y2), '-1 -1 -1', '-1000 -1000", "for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False,", "trk['yc'] if save_vid: if trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img =", "= sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height, width out", "item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name)", "action='store_true', default=False, help='Overwrite the output files') args = parser.parse_args() return args print(' '.join(sys.argv))", "if not overwrite: if not os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name) and", "str(conf)]) ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1),", "frame['hypotheses']: x1, y1, x2, y2, conf = trk['det_box'] xc, yc = trk['xc'], trk['yc']", "save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if", "if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading", "my_list: is_in = is_in or (ml in dir_name) save_vid = is_in gen_result(SAVE_PATH, dir_name,", "save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not", "tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img", "'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15", "not overwrite: if not os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name) and save_vid:", "default='100', help='How many epochs you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for", "out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout) if save_vid:", "'{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern =", "out_name)) for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path,", "cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14,", "fps, vidsize) demoinfo = info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if save_vid: img", "out_name)) if dry_run: continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name", "'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid = args.save_vid if save_vid:", "action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running')", "save_vid: pass else: print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue if save_vid: images", "args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'})", "= re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split})", "os.makedirs(path) # Use with care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading", "if save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1,", "out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if not os.path.isfile(txt_name)", "'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP':", "saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show", "help='Overwrite the output files') args = parser.parse_args() return args print(' '.join(sys.argv)) args =", "for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name))", "save_vid: is_in = False for ml in my_list: is_in = is_in or (ml", "30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for trk in frame['hypotheses']: x1, y1,", "args.save_vid if save_vid: is_in = False for ml in my_list: is_in = is_in", "submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name,", "default='623', help='Name of the session, to separate exp') parser.add_argument('--epoch', default='100', help='How many epochs", "re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name", "save_vid: if trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1),", "else: print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq)))", "not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with care def gen_result(out_path, out_name,", "(int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4)", "help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files') args =", "(20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for trk in frame['hypotheses']: x1,", "trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1", "'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) '''", "+= '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w')", "img.shape[0]) # height, width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames']", "'val', 'test'], help='Which data split to use in testing') parser.add_argument('--session', default='623', help='Name of", "Generated file {} Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0])", "= cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])),", "'.join(sys.argv)) args = parse_args() if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ':", "running. Generated file {} Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img =", "= cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for idx, frame in enumerate(demoinfo):", "tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if", "args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v')", "numpy as np from glob import glob import cv2 from utils.plot_utils import RandomColor", "sys import argparse import json import numpy as np from glob import glob", "= ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2),", "in testing') parser.add_argument('--session', default='623', help='Name of the session, to separate exp') parser.add_argument('--epoch', default='100',", "idx, frame in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx),", "str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1), str(x2), str(y2), '-1 -1 -1',", "FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if save_vid:", "cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color = RandomColor(30) tid2color = {} def mkdir(path):", "if __name__ == '__main__': # Not using out_name, too slow output_list = [os.path.splitext(item)[0]", "choices=['train', 'val', 'test'], help='Which data split to use in testing') parser.add_argument('--session', default='623', help='Name", "vidsize = (img.shape[1], img.shape[0]) # height, width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize)", "1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2)", "Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1],", "'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0)", "exp') parser.add_argument('--epoch', default='100', help='How many epochs you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd',", "to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False,", "yc = trk['xc'], trk['yc'] if save_vid: if trk['id'] not in tid2color: tid2color[trk['id']] =", "parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split to use in", "import os import re import sys import argparse import json import numpy as", "str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for trk in frame['hypotheses']:", "= cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color = RandomColor(30) tid2color", "args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps =", "fps = 15 np.random.seed(777) rm_color = RandomColor(30) tid2color = {} def mkdir(path): if", "separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags", "evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags", "the output files') args = parser.parse_args() return args print(' '.join(sys.argv)) args = parse_args()", "4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img =", "np from glob import glob import cv2 from utils.plot_utils import RandomColor def parse_args():", "vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite:", "not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {}", "trk['xc'], trk['yc'] if save_vid: if trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img", "parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving", "rm_color = RandomColor(30) tid2color = {} def mkdir(path): if not os.path.isdir(path): print(\"Making directory", "out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for idx, frame in", "if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}')", "os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue if", "'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern", "save_txt: ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1),", "IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT':", "re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout = []", "-1', '-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid:", "item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for", "save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name))", "int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt = ' '.join([ str(idx),", "range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue", "seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if", "cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if", "out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT =", "if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__': # Not using out_name,", "use in testing') parser.add_argument('--session', default='623', help='Name of the session, to separate exp') parser.add_argument('--epoch',", "save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__': # Not using out_name, too", "= cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color = RandomColor(30) tid2color = {} def", "is_in = is_in or (ml in dir_name) save_vid = is_in gen_result(SAVE_PATH, dir_name, save_vid=save_vid,", "args print(' '.join(sys.argv)) args = parse_args() if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT':", "out.release() if __name__ == '__main__': # Not using out_name, too slow output_list =", "== '__main__': # Not using out_name, too slow output_list = [os.path.splitext(item)[0] for item", "'{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set,", "#''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with", "mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with care def", "care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info =", "'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color", "os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in", "print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with care def gen_result(out_path, out_name, save_vid=False, save_txt=True,", "trk['det_box'] xc, yc = trk['xc'], trk['yc'] if save_vid: if trk['id'] not in tid2color:", "str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt = '", "parser.add_argument('--session', default='623', help='Name of the session, to separate exp') parser.add_argument('--epoch', default='100', help='How many", "txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if not os.path.isfile(txt_name) and save_txt:", "-10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{}", "'\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as", "seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq)", "xc, yc = trk['xc'], trk['yc'] if save_vid: if trk['id'] not in tid2color: tid2color[trk['id']]", "= is_in or (ml in dir_name) save_vid = is_in gen_result(SAVE_PATH, dir_name, save_vid=save_vid, save_txt=args.save_txt,", "= 15 np.random.seed(777) rm_color = RandomColor(30) tid2color = {} def mkdir(path): if not", "[] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not", "**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session,", "parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split to use in testing') parser.add_argument('--session', default='623',", "= '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX", "FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color = RandomColor(30)", "running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files') args = parser.parse_args() return args", "tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2)", "= {} def mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use", "out_name, file_seq) if not overwrite: if not os.path.isfile(txt_name) and save_txt: pass elif not", "= [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if", "FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color = RandomColor(30) tid2color = {}", "return args print(' '.join(sys.argv)) args = parse_args() if args.set == 'kitti': IMAGE_PATH =", "in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']],", "used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true',", "= re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout =", "'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps", "False for ml in my_list: is_in = is_in or (ml in dir_name) save_vid", "15 np.random.seed(777) rm_color = RandomColor(30) tid2color = {} def mkdir(path): if not os.path.isdir(path):", "**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC =", "of the session, to separate exp') parser.add_argument('--epoch', default='100', help='How many epochs you used", "parser.parse_args() return args print(' '.join(sys.argv)) args = parse_args() if args.set == 'kitti': IMAGE_PATH", "with open(txt_name, 'w') as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__", "conf = trk['det_box'] xc, yc = trk['xc'], trk['yc'] if save_vid: if trk['id'] not", "2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img,", "testing') parser.add_argument('--session', default='623', help='Name of the session, to separate exp') parser.add_argument('--epoch', default='100', help='How", "rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img,", "out_name, too slow output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list", "is_in or (ml in dir_name) save_vid = is_in gen_result(SAVE_PATH, dir_name, save_vid=save_vid, save_txt=args.save_txt, dry_run=args.dry_run,", "import argparse import json import numpy as np from glob import glob import", "'.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'],", "if save_vid: is_in = False for ml in my_list: is_in = is_in or", "separate exp') parser.add_argument('--epoch', default='100', help='How many epochs you used to separate exp') parser.add_argument('--flag',", "out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r'))", "help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt',", "args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch,", "-1000 -1000 -10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if", "default=False, help='Overwrite the output files') args = parser.parse_args() return args print(' '.join(sys.argv)) args", "int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2)", "re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])')", "= cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height, width out = cv2.VideoWriter(vid_name, FOURCC,", "180, 180), 2) for trk in frame['hypotheses']: x1, y1, x2, y2, conf =", "trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1", "default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run',", "meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for", "and save_vid: pass else: print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue if save_vid:", "str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car',", "argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'],", "' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'],", "json import numpy as np from glob import glob import cv2 from utils.plot_utils", "utils.plot_utils import RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: '''", "tid2color[trk['id']], 2) if save_txt: ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1", "command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files') args = parser.parse_args()", "def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti'])", "dry_run=False, overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run:", "cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for trk in", "seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f:", "action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt')", "str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1), str(x2), str(y2), '-1 -1 -1', '-1000", "= 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split,", "def mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with care", "os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with care def gen_result(out_path, out_name, save_vid=False,", "info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in", "int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1,", "pass else: print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue if save_vid: images =", "cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color = RandomColor(30) tid2color =", "img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180,", "y2, conf = trk['det_box'] xc, yc = trk['xc'], trk['yc'] if save_vid: if trk['id']", "my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid", "= trk['det_box'] xc, yc = trk['xc'], trk['yc'] if save_vid: if trk['id'] not in", "= (img.shape[1], img.shape[0]) # height, width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo", "file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if not os.path.isfile(txt_name) and", "'{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC", "else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(", "for dir_name in output_list: print(dir_name) save_vid = args.save_vid if save_vid: is_in = False", "files') args = parser.parse_args() return args print(' '.join(sys.argv)) args = parse_args() if args.set", "str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)])", "str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name))", "for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False,", "= parser.parse_args() return args print(' '.join(sys.argv)) args = parse_args() if args.set == 'kitti':", "out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq =", "default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files') args", "= False for ml in my_list: is_in = is_in or (ml in dir_name)", "import numpy as np from glob import glob import cv2 from utils.plot_utils import", "many epochs you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation", "{} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout = [] vid_name =", "import RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set',", "args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else:", "save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout) if", "args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP':", "demoinfo = info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx])", "print(' '.join(sys.argv)) args = parse_args() if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split,", "parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without", "save_txt: pass elif not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running. Generated file", "str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = ' '.join([", "# height, width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for", "submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2),", "running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False,", "RandomColor(30) tid2color = {} def mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path)", "if save_vid: out.write(img) if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout)", "= cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1),", "Use with care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\")", "Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split to", "['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid = args.save_vid", "for ml in my_list: is_in = is_in or (ml in dir_name) save_vid =", "''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1),", "'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag})", "in frame['hypotheses']: x1, y1, x2, y2, conf = trk['det_box'] xc, yc = trk['xc'],", "cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2)", "cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)),", "'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid = args.save_vid if", "args.set, 'SETTING': args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777)", "def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path,", "save_vid = args.save_vid if save_vid: is_in = False for ml in my_list: is_in", "= 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format(", "Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split", "{}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name,", "if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with care def gen_result(out_path,", "info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img =", "(ml in dir_name) save_vid = is_in gen_result(SAVE_PATH, dir_name, save_vid=save_vid, save_txt=args.save_txt, dry_run=args.dry_run, overwrite=args.overwrite )", "exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for", "0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car',", "= cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt", "file_seq) if not overwrite: if not os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name)", "str(x1), str(y1), str(x2), str(y2), '-1 -1 -1', '-1000 -1000 -1000 -10', str(conf)]) #'''", "'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set,", "(int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)),", "'-1 -1 -10', str(x1), str(y1), str(x2), str(y2), '-1 -1 -1', '-1000 -1000 -1000", "print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img", "-10', str(x1), str(y1), str(x2), str(y2), '-1 -1 -1', '-1000 -1000 -1000 -10', str(conf)])", "out_path, out_name)) if dry_run: continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq)", "sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height, width out =", "default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite',", "with care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info", "SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name =", "if dry_run: continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name =", "saved.\".format(vid_name)) out.release() if __name__ == '__main__': # Not using out_name, too slow output_list", "= '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if", "if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list:", "'{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if not os.path.isfile(txt_name) and save_txt: pass elif", "height, width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for idx,", "re import sys import argparse import json import numpy as np from glob", "import cv2 from utils.plot_utils import RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D", "2) if save_txt: ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1',", "'w') as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__':", "str(y1), str(x2), str(y2), '-1 -1 -1', '-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt", "output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep',", "trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1),", "not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running. Generated file {} Found\".format(txt_name)) continue", "without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files') args = parser.parse_args() return", "img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for", "session, to separate exp') parser.add_argument('--epoch', default='100', help='How many epochs you used to separate", "cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt =", "args = parse_args() if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'})", "'-1 -1 -1', '-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt)", "help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true',", "(int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX,", "args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET':", "slow output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none',", "code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for", "print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name))", "re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH =", "and save_txt: pass elif not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running. Generated", "pass elif not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running. Generated file {}", "(int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX,", "too slow output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list =", "x1, y1, x2, y2, conf = trk['det_box'] xc, yc = trk['xc'], trk['yc'] if", "str(y2), '-1 -1 -1', '-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt += '\\n'", "glob import glob import cv2 from utils.plot_utils import RandomColor def parse_args(): parser =", "'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split to use in testing') parser.add_argument('--session',", "images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height, width", "gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name),", "y1, x2, y2, conf = trk['det_box'] xc, yc = trk['xc'], trk['yc'] if save_vid:", "= cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180),", "if not os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name) and save_vid: pass else:", "parser.add_argument('--epoch', default='100', help='How many epochs you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags", "Not using out_name, too slow output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if", "if save_txt: print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout) if save_vid: print(\"{}", "directory {}\".format(path)) os.makedirs(path) # Use with care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False,", "= RandomColor(30) tid2color = {} def mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path))", "you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid',", "or (ml in dir_name) save_vid = is_in gen_result(SAVE_PATH, dir_name, save_vid=save_vid, save_txt=args.save_txt, dry_run=args.dry_run, overwrite=args.overwrite", "1, (180, 180, 180), 2) for trk in frame['hypotheses']: x1, y1, x2, y2,", "'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH =", "str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'],", "180), 2) for trk in frame['hypotheses']: x1, y1, x2, y2, conf = trk['det_box']", "frame in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20,", "parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving", "x2, y2, conf = trk['det_box'] xc, yc = trk['xc'], trk['yc'] if save_vid: if", "'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch,", "tid2color = {} def mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) #", "{} def mkdir(path): if not os.path.isdir(path): print(\"Making directory {}\".format(path)) os.makedirs(path) # Use with", "str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = ' '.join([ str(idx),", "dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from", "vidsize) demoinfo = info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if save_vid: img =", "description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which", "saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if", "not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)),", "IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS':", "re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT':", "continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0])", "args.flag}) FONT = cv2.FONT_HERSHEY_SIMPLEX FOURCC = cv2.VideoWriter_fourcc(*'mp4v') fps = 15 np.random.seed(777) rm_color =", "video') parser.add_argument('--save_txt', action='store_true', default=False, help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command", "as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__': #", "for trk in frame['hypotheses']: x1, y1, x2, y2, conf = trk['det_box'] xc, yc", "continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name,", "parse_args() if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern =", "in my_list: is_in = is_in or (ml in dir_name) save_vid = is_in gen_result(SAVE_PATH,", "output files') args = parser.parse_args() return args print(' '.join(sys.argv)) args = parse_args() if", "if trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)),", "from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path,", "in output_list: print(dir_name) save_vid = args.save_vid if save_vid: is_in = False for ml", "' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1), str(x2), str(y2), '-1", "'test'], help='Which data split to use in testing') parser.add_argument('--session', default='623', help='Name of the", "split to use in testing') parser.add_argument('--session', default='623', help='Name of the session, to separate", "to use in testing') parser.add_argument('--session', default='623', help='Name of the session, to separate exp')", "img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height, width out = cv2.VideoWriter(vid_name,", "in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run:", "= args.save_vid if save_vid: is_in = False for ml in my_list: is_in =", "str(x2), str(y2), '-1 -1 -1', '-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt +=", "== 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH", "the session, to separate exp') parser.add_argument('--epoch', default='100', help='How many epochs you used to", "overwrite: if not os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name) and save_vid: pass", "parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files') args = parser.parse_args() return args print('", "= ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid =", "(int(x2), int(y2)), tid2color[trk['id']], 4) img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']],", "if save_txt: ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1', trk['alpha'],", "'-1 -1', trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt", "txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the", "args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING':", "for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video') parser.add_argument('--save_txt', action='store_true',", "RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta',", "args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag}) FONT", "cv2.FONT_HERSHEY_COMPLEX, 1, (180, 180, 180), 2) for trk in frame['hypotheses']: x1, y1, x2,", "= info[seqid]['frames'] for idx, frame in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img", "print(\"{} saved.\".format(txt_name)) with open(txt_name, 'w') as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release()", "'{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if not", "(img.shape[1], img.shape[0]) # height, width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo =", "= argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val',", "print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout = [] vid_name", "save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height,", "str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])),", "''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1),", "json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq", "parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train',", "epochs you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code')", "overwrite=False): print(\"Reading meta data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path,", "ml in my_list: is_in = is_in or (ml in dir_name) save_vid = is_in", "parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output", "cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])),", "'__main__': # Not using out_name, too slow output_list = [os.path.splitext(item)[0] for item in", "cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) # height, width out = cv2.VideoWriter(vid_name, FOURCC, fps,", "= 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'}) re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])') SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session,", "width out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize) demoinfo = info[seqid]['frames'] for idx, frame", "'lstmdeep', 'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid = args.save_vid if save_vid: is_in", "from utils.plot_utils import RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer',", "{}\".format(path)) os.makedirs(path) # Use with care def gen_result(out_path, out_name, save_vid=False, save_txt=True, dry_run=False, overwrite=False):", "int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8,", "(int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt: ''' submit_txt = ' '.join([", "dir_name in output_list: print(dir_name) save_vid = args.save_vid if save_vid: is_in = False for", "if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize = (img.shape[1], img.shape[0]) #", "action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite the output files')", "using out_name, too slow output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')]", "in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep'] for dir_name", "cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2),", "help='How many epochs you used to separate exp') parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running", "mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)): file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq,", "cv2 from utils.plot_utils import RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking", "print(dir_name) save_vid = args.save_vid if save_vid: is_in = False for ml in my_list:", "help='Name of the session, to separate exp') parser.add_argument('--epoch', default='100', help='How many epochs you", "'lstmoccdeep'] for dir_name in output_list: print(dir_name) save_vid = args.save_vid if save_vid: is_in =", "help='Flags for saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true',", "= parse_args() if args.set == 'kitti': IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern", "= '{}{}/data/{}.txt'.format(out_path, out_name, file_seq) if not overwrite: if not os.path.isfile(txt_name) and save_txt: pass", "= cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)),", "= [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep',", "-1000 -10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img) if save_txt:", "'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'}) re_pattern = re.compile('[0-9]{4}') else: IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ':", "for idx, frame in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img,", "dry_run: continue seqout = [] vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq) txt_name = '{}{}/data/{}.txt'.format(out_path,", "file_seq = re_pattern.search(info[seqid]['filename']).group(0) print('Reading {} from {}{}...'.format(file_seq, out_path, out_name)) if dry_run: continue seqout", "trk['alpha'], str(x1), str(y1), str(x2), str(y2), trk['dim'], trk['loc'], trk['rot'], str(conf)]) ''' submit_txt = '", "f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__': # Not using", "= json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid in range(len(info)):", "-1 -10', str(x1), str(y1), str(x2), str(y2), '-1 -1 -1', '-1000 -1000 -1000 -10',", "data...\") info = json.load(open('{}{}.json'.format(out_path, out_name), 'r')) if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name)) for seqid", "= trk['xc'], trk['yc'] if save_vid: if trk['id'] not in tid2color: tid2color[trk['id']] = rm_color.get_random_color(scale=255)", "trk['rot'], str(conf)]) ''' submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10',", "__name__ == '__main__': # Not using out_name, too slow output_list = [os.path.splitext(item)[0] for", "as np from glob import glob import cv2 from utils.plot_utils import RandomColor def", "int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']],", "from glob import glob import cv2 from utils.plot_utils import RandomColor def parse_args(): parser", "import re import sys import argparse import json import numpy as np from", "(180, 180, 180), 2) for trk in frame['hypotheses']: x1, y1, x2, y2, conf", "glob import cv2 from utils.plot_utils import RandomColor def parse_args(): parser = argparse.ArgumentParser( description='Monocular", "enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30), cv2.FONT_HERSHEY_COMPLEX,", "os.path.isfile(txt_name) and save_txt: pass elif not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running.", "np.random.seed(777) rm_color = RandomColor(30) tid2color = {} def mkdir(path): if not os.path.isdir(path): print(\"Making", "argparse import json import numpy as np from glob import glob import cv2", "trk in frame['hypotheses']: x1, y1, x2, y2, conf = trk['det_box'] xc, yc =", "'.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1), str(x2), str(y2), '-1 -1", "tid2color[trk['id']] = rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img", "2) img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)), cv2.FONT_HERSHEY_COMPLEX, 0.8, tid2color[trk['id']], 2) if save_txt:", "for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep']", "default='kf3doccdeep_age15_aff0.1_hit0_80m_pd', help='Flags for running evaluation code') parser.add_argument('--save_vid', action='store_true', default=False, help='Flags for saving video')", "in enumerate(demoinfo): if save_vid: img = cv2.imread(images[idx]) img = cv2.putText(img, str(idx), (20, 30),", "int(yc+1)), tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img", "import json import numpy as np from glob import glob import cv2 from", "formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split to use", "help='Which data split to use in testing') parser.add_argument('--session', default='623', help='Name of the session,", "tid2color[trk['id']], 2) img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), tid2color[trk['id']], 4) img =", "open(txt_name, 'w') as f: f.writelines(seqout) if save_vid: print(\"{} saved.\".format(vid_name)) out.release() if __name__ ==", "file {} Found\".format(txt_name)) continue if save_vid: images = sorted(glob(IMAGE_PATH.format(file_seq))) img = cv2.imread(images[0]) vidsize", "import sys import argparse import json import numpy as np from glob import", "2) for trk in frame['hypotheses']: x1, y1, x2, y2, conf = trk['det_box'] xc,", "img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)), cv2.FONT_HERSHEY_COMPLEX, 1, tid2color[trk['id']], 2) img = cv2.putText(img,", "elif not os.path.isfile(vid_name) and save_vid: pass else: print(\"SKIP running. Generated file {} Found\".format(txt_name))", "args = parser.parse_args() return args print(' '.join(sys.argv)) args = parse_args() if args.set ==", "output_list: print(dir_name) save_vid = args.save_vid if save_vid: is_in = False for ml in", "'-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if save_vid: out.write(img)", "-1 -1', '-1000 -1000 -1000 -10', str(conf)]) #''' submit_txt += '\\n' seqout.append(submit_txt) if", "'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format( **{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split}) out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format( **{'SESS':", "submit_txt = ' '.join([ str(idx), str(int(trk['id'])), 'Car', '-1 -1 -10', str(x1), str(y1), str(x2),", "os import re import sys import argparse import json import numpy as np", "import glob import cv2 from utils.plot_utils import RandomColor def parse_args(): parser = argparse.ArgumentParser(", "[os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')] my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep',", "= rm_color.get_random_color(scale=255) img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)), tid2color[trk['id']], 2) img =", "saving txt') parser.add_argument('--dry_run', action='store_true', default=False, help='Show command without running') parser.add_argument('--overwrite', action='store_true', default=False, help='Overwrite", "choices=['gta', 'kitti']) parser.add_argument('split', choices=['train', 'val', 'test'], help='Which data split to use in testing')", "print(\"{} saved.\".format(vid_name)) out.release() if __name__ == '__main__': # Not using out_name, too slow", "parse_args(): parser = argparse.ArgumentParser( description='Monocular 3D Tracking Visualizer', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('set', choices=['gta', 'kitti']) parser.add_argument('split',", "data split to use in testing') parser.add_argument('--session', default='623', help='Name of the session, to" ]
[ "X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude = np.number) # Code", "here # Code ends here cols= list(cat_df) fig, axes = plt.subplots(nrows = 2,", "= X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude", "= LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test", "files import pandas as pd from sklearn.model_selection import train_test_split as tts # Code", "#Importing header files from io import StringIO from sklearn.tree import export_graphviz from sklearn", "# Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test", "starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) #", "here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100", "below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path)) plt.axis('off') plt.show() # Code", "= list(num_df) fig, axes = plt.subplots(nrows =9, ncols= 1) for i in range(1,9):", "- do not delete/modify the code below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path)", "this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path)) plt.axis('off') plt.show() # Code ends", "# Code starts here # Code ends here cols= list(cat_df) fig, axes =", "y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts here # Code ends here cols=", "Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate']", "from IPython.display import Image import pydotplus # Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_,", "header files from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf':", "= X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df =", "header files import seaborn as sns # Code starts here # Code ends", "header files import numpy as np from sklearn.preprocessing import LabelEncoder # Code starts", "header files import pandas as pd from sklearn.model_selection import train_test_split as tts #", "not delete/modify the code below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path))", "sns # Code starts here # Code ends cols = list(num_df) fig, axes", "IPython.display import Image import pydotplus # Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None,", "LabelEncoder # Code starts here for i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder()", "# Code starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5)", "acc= accuracy_score(y_test, y_preds) # -------------- #Importing header files from sklearn.model_selection import GridSearchCV #Parameter", "accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test,", "ends here cols= list(cat_df) fig, axes = plt.subplots(nrows = 2, ncols= 2) for", "y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing header files from", "import Image import pydotplus # Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns,", "line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path)) plt.axis('off') plt.show() # Code ends here", "sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header files from sklearn.tree import DecisionTreeClassifier from", "here # -------------- #Importing header files import seaborn as sns # Code starts", "range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts here # Code ends", "train_test_split as tts # Code starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') #", "model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing header files", "import train_test_split as tts # Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan']", "le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i])", "from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds =", "Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data)", "# Code starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here", "starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here # --------------", "range(10,50,10)} # Code starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid,", "starts here # Code ends cols = list(num_df) fig, axes = plt.subplots(nrows =9,", "sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code starts here for i", "fig, axes = plt.subplots(nrows = 2, ncols= 2) for i in range (0,2):", "(0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header files", "ncols= 2) for i in range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train,", "range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header", "import LabelEncoder # Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate']", "starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) #", "acc_2 # -------------- #Importing header files from io import StringIO from sklearn.tree import", "-------------- #Importing header files import matplotlib.pyplot as plt # Code starts here import", "Code starts here import pandas as pd from sklearn.model_selection import train_test_split as tts", "as tts # Code starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code", "Code ends here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 # --------------", "list(num_df) fig, axes = plt.subplots(nrows =9, ncols= 1) for i in range(1,9): sns.boxplot(x=y_train,", "sklearn.model_selection import train_test_split as tts # Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1)", "from sklearn.preprocessing import LabelEncoder # Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] =", "matplotlib.pyplot as plt # Code starts here import pandas as pd from sklearn.model_selection", "here from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds", "cols= list(cat_df) fig, axes = plt.subplots(nrows = 2, ncols= 2) for i in", "# -------------- #Importing header files import numpy as np from sklearn.preprocessing import LabelEncoder", "grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here model_2 =", "# -------------- #Importing header files import seaborn as sns # Code starts here", "sklearn.tree import export_graphviz from sklearn import tree from sklearn import metrics from IPython.display", "ends cols = list(num_df) fig, axes = plt.subplots(nrows =9, ncols= 1) for i", "2, ncols= 2) for i in range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]],", "here cols= list(cat_df) fig, axes = plt.subplots(nrows = 2, ncols= 2) for i", "sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code", "seaborn as sns # Code starts here # Code ends cols = list(num_df)", "= y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics import accuracy_score model", "import seaborn as sns # Code starts here # Code ends cols =", "= p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header files from", "header files from io import StringIO from sklearn.tree import export_graphviz from sklearn import", "p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2 = p_tree.predict(X_test)", "= le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics", "ncols= 1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code", "1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts", "LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test =", "y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics import accuracy_score model =", "import train_test_split as tts # Code starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar')", "y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here # --------------", "y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here # -------------- #Importing header files import", "list(cat_df) fig, axes = plt.subplots(nrows = 2, ncols= 2) for i in range", "here import pandas as pd from sklearn.model_selection import train_test_split as tts # Code", "X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from", "ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header files", "code below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path)) plt.axis('off') plt.show() #", "here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) #", "= model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing header files from sklearn.model_selection import", "ends here from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train)", "import export_graphviz from sklearn import tree from sklearn import metrics from IPython.display import", "= True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not delete/modify the code", "parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here model_2 = DecisionTreeClassifier(random_state", "#Importing header files import pandas as pd from sklearn.model_selection import train_test_split as tts", "ax=axes[i]) # -------------- # Code starts here # Code ends here cols= list(cat_df)", "plt.subplots(nrows =9, ncols= 1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # --------------", "plt.figure() fully_paid.plot(kind='bar') # Code ends here # -------------- #Importing header files import numpy", "as tts # Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test,", "= 0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing", "# Code ends here # -------------- #Importing header files import matplotlib.pyplot as plt", "# Code ends here # -------------- #Importing header files import numpy as np", "here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header", "= tts(X,y,random_state=0,test_size=0.3) # Code ends here # -------------- #Importing header files import matplotlib.pyplot", "X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here # -------------- #Importing", "np.number) # Code ends here # -------------- #Importing header files import seaborn as", "pd from sklearn.model_selection import train_test_split as tts # Code starts here fully_paid =", "as pd from sklearn.model_selection import train_test_split as tts # Code starts here fully_paid", "X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df", "in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header files from sklearn.tree import", "list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i]", "Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test =", "starts here for i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i])", "ax=axes[i,j]) # -------------- #Importing header files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import", "do not delete/modify the code below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15))", "in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts here # Code", "import pydotplus # Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled =", "data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here #", "import StringIO from sklearn.tree import export_graphviz from sklearn import tree from sklearn import", "-------------- # Code starts here # Code ends here cols= list(cat_df) fig, axes", "p_tree.fit(X_train,y_train) # Code ends here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2", "pydotplus # Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True,", "fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here # -------------- #Importing header", "ends here # -------------- #Importing header files import seaborn as sns # Code", "y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here # -------------- #Importing header files import", "X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) #", "Code starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here #", "plt.subplots(nrows = 2, ncols= 2) for i in range (0,2): for j in", "-------------- #Importing header files import pandas as pd from sklearn.model_selection import train_test_split as", "=0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2 =", "tts # Code starts here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends", "X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude =", "class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not delete/modify the code below this", "#Importing header files import numpy as np from sklearn.preprocessing import LabelEncoder # Code", "X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df =", "out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not", "i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts here #", "le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code", "fully_paid.plot(kind='bar') # Code ends here # -------------- #Importing header files import numpy as", "param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test,", "True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not delete/modify the code below", "y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing header files from sklearn.model_selection", "= plt.subplots(nrows =9, ncols= 1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) #", "# Code ends here from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state = 0)", "np.number) cat_df = X_train.select_dtypes(exclude = np.number) # Code ends here # -------------- #Importing", "2) for i in range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j])", "# Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float)", "= {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here model_2 = DecisionTreeClassifier(random_state =0)", "# Code ends here cols= list(cat_df) fig, axes = plt.subplots(nrows = 2, ncols=", "show graph - do not delete/modify the code below this line img_path =", "= le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True)", "le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True)", "y_preds) # -------------- #Importing header files from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid", "StringIO from sklearn.tree import export_graphviz from sklearn import tree from sklearn import metrics", "y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state", "Code ends cols = list(num_df) fig, axes = plt.subplots(nrows =9, ncols= 1) for", "here model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code", "X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] =", "import pandas as pd from sklearn.model_selection import train_test_split as tts # Code starts", "header files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code starts", "import GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts", "X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df", "metrics from IPython.display import Image import pydotplus # Code starts here dot_data =", "np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree =", "Code ends here from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train,", "= y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here # -------------- #Importing header files", "from sklearn.model_selection import train_test_split as tts # Code starts here fully_paid = y_train.value_counts()", "le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics import", "= X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude = np.number) #", "= X_train.select_dtypes(exclude = np.number) # Code ends here # -------------- #Importing header files", "cat_df = X_train.select_dtypes(exclude = np.number) # Code ends here # -------------- #Importing header", "Code ends here cols= list(cat_df) fig, axes = plt.subplots(nrows = 2, ncols= 2)", "files import numpy as np from sklearn.preprocessing import LabelEncoder # Code starts here", "starts here # Code ends here cols= list(cat_df) fig, axes = plt.subplots(nrows =", "as pd from sklearn.model_selection import train_test_split as tts # Code starts here data=", "sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts here # Code ends here", "# -------------- #Importing header files import matplotlib.pyplot as plt # Code starts here", "files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code starts here", "here for i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA')", "= DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) #", "'min_samples_leaf': range(10,50,10)} # Code starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2,", "sklearn.model_selection import train_test_split as tts # Code starts here fully_paid = y_train.value_counts() plt.figure()", "X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include", "import metrics from IPython.display import Image import pydotplus # Code starts here dot_data", "X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here", "X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude = np.number)", "= np.number) cat_df = X_train.select_dtypes(exclude = np.number) # Code ends here # --------------", "= DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here", "X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here # -------------- #Importing header", "io import StringIO from sklearn.tree import export_graphviz from sklearn import tree from sklearn", "# -------------- #Importing header files from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid =", "sklearn.preprocessing import LabelEncoder # Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100", "model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends", "j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header files from sklearn.tree", "range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header files from sklearn.tree import DecisionTreeClassifier", "data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code", "-------------- #Importing header files from io import StringIO from sklearn.tree import export_graphviz from", "np from sklearn.preprocessing import LabelEncoder # Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate']", "Code ends here # -------------- #Importing header files import numpy as np from", "here # -------------- #Importing header files import numpy as np from sklearn.preprocessing import", "starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] =", "cols = list(num_df) fig, axes = plt.subplots(nrows =9, ncols= 1) for i in", "=9, ncols= 1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- #", "p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header files from io", "Code starts here # Code ends cols = list(num_df) fig, axes = plt.subplots(nrows", "sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds = model.predict(X_test)", "accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header files from io import StringIO from", "from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} #", "ends here # -------------- #Importing header files import matplotlib.pyplot as plt # Code", "{'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree", "pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends", "from sklearn import tree from sklearn import metrics from IPython.display import Image import", "files from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}", "0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing header", "DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2", "plt # Code starts here import pandas as pd from sklearn.model_selection import train_test_split", "= X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include = np.number)", "graph - do not delete/modify the code below this line img_path = user_data_dir+'/file.png'", "Code ends here # -------------- #Importing header files import seaborn as sns #", "delete/modify the code below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path)) plt.axis('off')", "GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2 = p_tree.predict(X_test) acc_2 =", "X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude = np.number) # Code ends here #", "#Importing header files import matplotlib.pyplot as plt # Code starts here import pandas", "files import seaborn as sns # Code starts here # Code ends cols", "axes = plt.subplots(nrows =9, ncols= 1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i])", "# Code starts here import pandas as pd from sklearn.model_selection import train_test_split as", "# Code ends cols = list(num_df) fig, axes = plt.subplots(nrows =9, ncols= 1)", "ends here # -------------- #Importing header files import numpy as np from sklearn.preprocessing", "sklearn import metrics from IPython.display import Image import pydotplus # Code starts here", "# -------------- # Code starts here # Code ends here cols= list(cat_df) fig,", "#Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here model_2", "feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not delete/modify", "-------------- #Importing header files import seaborn as sns # Code starts here #", "GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)} # Code starts here", "# -------------- #Importing header files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder", "hue=y_train, ax=axes[i,j]) # -------------- #Importing header files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing", "DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code starts here for i in list(cat_df):", "files from io import StringIO from sklearn.tree import export_graphviz from sklearn import tree", "as sns # Code starts here # Code ends cols = list(num_df) fig,", "X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float) X_test['int.rate'] = X_test['int.rate']/100 num_df = X_train.select_dtypes(include =", "model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds)", "import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code starts here for i in", "starts here import pandas as pd from sklearn.model_selection import train_test_split as tts #", "train_test_split as tts # Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train,", "import accuracy_score model = DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc=", "X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0)", "from sklearn.model_selection import train_test_split as tts # Code starts here data= pd.read_csv(path) X=", "dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph", "numpy as np from sklearn.preprocessing import LabelEncoder # Code starts here X_train['int.rate'] =", "here # Code ends cols = list(num_df) fig, axes = plt.subplots(nrows =9, ncols=", "acc_2 = accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header files from io import", "accuracy_score(y_test, y_preds) # -------------- #Importing header files from sklearn.model_selection import GridSearchCV #Parameter grid", "-------------- #Importing header files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder #", "the code below this line img_path = user_data_dir+'/file.png' graph_big.write_png(img_path) plt.figure(figsize=(20,15)) plt.imshow(plt.imread(img_path)) plt.axis('off') plt.show()", "as np from sklearn.preprocessing import LabelEncoder # Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float)", "# -------------- #Importing header files from io import StringIO from sklearn.tree import export_graphviz", "from io import StringIO from sklearn.tree import export_graphviz from sklearn import tree from", "#Importing header files import seaborn as sns # Code starts here # Code", "axes = plt.subplots(nrows = 2, ncols= 2) for i in range (0,2): for", "ypreds2) acc_2 # -------------- #Importing header files from io import StringIO from sklearn.tree", "# Code ends here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 #", "from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code starts here for", "in range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing", "tree from sklearn import metrics from IPython.display import Image import pydotplus # Code", "pd from sklearn.model_selection import train_test_split as tts # Code starts here data= pd.read_csv(path)", "graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not delete/modify the code below this line", "filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do not delete/modify the", "files import matplotlib.pyplot as plt # Code starts here import pandas as pd", "= 2, ncols= 2) for i in range (0,2): for j in range(0,2):", "import numpy as np from sklearn.preprocessing import LabelEncoder # Code starts here X_train['int.rate']", "Code starts here # Code ends here cols= list(cat_df) fig, axes = plt.subplots(nrows", "ends here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing", "tts(X,y,random_state=0,test_size=0.3) # Code ends here # -------------- #Importing header files import matplotlib.pyplot as", "Code starts here model_2 = DecisionTreeClassifier(random_state =0) p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train)", "Code ends here # -------------- #Importing header files import matplotlib.pyplot as plt #", "# show graph - do not delete/modify the code below this line img_path", "cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2 = p_tree.predict(X_test) acc_2 = accuracy_score(y_test, ypreds2)", "DecisionTreeClassifier(random_state = 0) model.fit(X_train, y_train) y_preds = model.predict(X_test) acc= accuracy_score(y_test, y_preds) # --------------", "= accuracy_score(y_test, ypreds2) acc_2 # -------------- #Importing header files from io import StringIO", "#Importing header files from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import LabelEncoder # Code", "header files import matplotlib.pyplot as plt # Code starts here import pandas as", "#y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics import accuracy_score", "= export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph -", "starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3)", "pandas as pd from sklearn.model_selection import train_test_split as tts # Code starts here", "= LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends", "y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here from sklearn.metrics import accuracy_score model = DecisionTreeClassifier(random_state =", "import LabelEncoder # Code starts here for i in list(cat_df): X_train[i].fillna('NA') le =", "y_train, y_test = tts(X,y,random_state=0,test_size=0.3) # Code ends here # -------------- #Importing header files", "-------------- #Importing header files import numpy as np from sklearn.preprocessing import LabelEncoder #", "as plt # Code starts here import pandas as pd from sklearn.model_selection import", "import matplotlib.pyplot as plt # Code starts here import pandas as pd from", "tts # Code starts here data= pd.read_csv(path) X= data.drop(['customer.id','paid.back.loan'],1) y=data['paid.back.loan'] X_train, X_test, y_train,", "#Importing header files from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid = {'max_depth': np.arange(3,10),", "here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show", "for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i]) # -------------- # Code starts here", "= X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude = np.number) # Code ends here", "sklearn.preprocessing import LabelEncoder # Code starts here for i in list(cat_df): X_train[i].fillna('NA') le", "= plt.subplots(nrows = 2, ncols= 2) for i in range (0,2): for j", "# Code starts here # Code ends cols = list(num_df) fig, axes =", "from sklearn.preprocessing import LabelEncoder # Code starts here for i in list(cat_df): X_train[i].fillna('NA')", "here fully_paid = y_train.value_counts() plt.figure() fully_paid.plot(kind='bar') # Code ends here # -------------- #Importing", "Image import pydotplus # Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled", "sklearn import tree from sklearn import metrics from IPython.display import Image import pydotplus", "fig, axes = plt.subplots(nrows =9, ncols= 1) for i in range(1,9): sns.boxplot(x=y_train, y=num_df[cols[i]],", "= GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5) p_tree.fit(X_train,y_train) # Code ends here ypreds2 = p_tree.predict(X_test) acc_2", "Code starts here for i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] =", "for i in range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) #", "= np.number) # Code ends here # -------------- #Importing header files import seaborn", "# Code starts here for i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i]", "# Code ends here # -------------- #Importing header files import seaborn as sns", "LabelEncoder() X_test[i] = le.fit_transform(X_test[i]) #y_test = y_test.str.replace('No',0) y_train.replace({'No':0,'Yes':1},inplace=True) y_test.replace({'No':0,'Yes':1},inplace=True) # Code ends here", "# Code starts here dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])", "num_df = X_train.select_dtypes(include = np.number) cat_df = X_train.select_dtypes(exclude = np.number) # Code ends", "from sklearn import metrics from IPython.display import Image import pydotplus # Code starts", "i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le =", "from sklearn.tree import export_graphviz from sklearn import tree from sklearn import metrics from", "# -------------- #Importing header files import pandas as pd from sklearn.model_selection import train_test_split", "i in range (0,2): for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # --------------", "in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le = LabelEncoder()", "export_graphviz from sklearn import tree from sklearn import metrics from IPython.display import Image", "X_train.select_dtypes(exclude = np.number) # Code ends here # -------------- #Importing header files import", "import tree from sklearn import metrics from IPython.display import Image import pydotplus #", "export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no']) graph_big=pydotplus.graph_from_dot_data(dot_data) # show graph - do", "here # -------------- #Importing header files import matplotlib.pyplot as plt # Code starts", "for i in list(cat_df): X_train[i].fillna('NA') le = LabelEncoder() X_train[i] = le.fit_transform(X_train[i]) X_test[i].fillna('NA') le", "-------------- #Importing header files from sklearn.model_selection import GridSearchCV #Parameter grid parameter_grid = {'max_depth':", "LabelEncoder # Code starts here X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float) X_train['int.rate'] = X_train['int.rate']/100 X_test['int.rate'] =", "for j in range(0,2): sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j]) # -------------- #Importing header files from", "model.predict(X_test) acc= accuracy_score(y_test, y_preds) # -------------- #Importing header files from sklearn.model_selection import GridSearchCV" ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "our version out of PKG-INFO. If we're installed, # this'll let us find", "Copyright (c) 2012-2014 OpenStack Foundation. # # Licensed under the Apache License, Version", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "__all__ = ['version_info', 'version'] try: # First, try to get our version out", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running", "License. # You may obtain a copy of the License at # #", "Foundation. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "let us find our version without pulling in pbr. After all, if #", "installed on a system, we're not in a Git-managed source tree, so #", "limitations under the License. import pkg_resources __all__ = ['version_info', 'version'] try: # First,", "really buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO?", "we're installed on a system, we're not in a Git-managed source tree, so", "law or agreed to in writing, software # distributed under the License is", "pkg_resources __all__ = ['version_info', 'version'] try: # First, try to get our version", "the License for the specific language governing permissions and # limitations under the", "of PKG-INFO. If we're installed, # this'll let us find our version without", "except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from a checkout, then. Let", "compliance with the License. # You may obtain a copy of the License", "version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``.", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "import pkg_resources __all__ = ['version_info', 'version'] try: # First, try to get our", "from a checkout, then. Let pbr do # its thing to figure out", "this file except in compliance with the License. # You may obtain a", "probably running from a checkout, then. Let pbr do # its thing to", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "a checkout, then. Let pbr do # its thing to figure out a", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "We're probably running from a checkout, then. Let pbr do # its thing", "pbr. After all, if # we're installed on a system, we're not in", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "if # we're installed on a system, we're not in a Git-managed source", "out of PKG-INFO. If we're installed, # this'll let us find our version", "pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from a checkout, then. Let pbr", "OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "ANY KIND, either express or implied. # See the License for the specific", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "# Copyright (c) 2012-2014 OpenStack Foundation. # # Licensed under the Apache License,", "use this file except in compliance with the License. # You may obtain", "pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``. version_info = tuple(map(int,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "Git-managed source tree, so # pbr doesn't really buy us anything. __version__ =", "not use this file except in compliance with the License. # You may", "a system, we're not in a Git-managed source tree, so # pbr doesn't", "buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "its thing to figure out a version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string()", "__version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3]))", "revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version string ``'major.minor.revision'``. version = '.'.join(map(str, version_info))", "After all, if # we're installed on a system, we're not in a", "try: # First, try to get our version out of PKG-INFO. If we're", "See the License for the specific language governing permissions and # limitations under", "version out of PKG-INFO. If we're installed, # this'll let us find our", "do # its thing to figure out a version number. import pbr.version __version__", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``. version_info", "information ``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version string ``'major.minor.revision'``. version", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from a checkout,", "minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version string ``'major.minor.revision'``. version = '.'.join(map(str,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "our version without pulling in pbr. After all, if # we're installed on", "= pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from a", "No PKG-INFO? We're probably running from a checkout, then. Let pbr do #", "OF ANY KIND, either express or implied. # See the License for the", "language governing permissions and # limitations under the License. import pkg_resources __all__ =", "First, try to get our version out of PKG-INFO. If we're installed, #", "(c) 2012-2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0", "and # limitations under the License. import pkg_resources __all__ = ['version_info', 'version'] try:", "2.0 (the \"License\"); # you may not use this file except in compliance", "# First, try to get our version out of PKG-INFO. If we're installed,", "# you may not use this file except in compliance with the License.", "# limitations under the License. import pkg_resources __all__ = ['version_info', 'version'] try: #", "running from a checkout, then. Let pbr do # its thing to figure", "['version_info', 'version'] try: # First, try to get our version out of PKG-INFO.", "pbr do # its thing to figure out a version number. import pbr.version", "out a version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major,", "License. import pkg_resources __all__ = ['version_info', 'version'] try: # First, try to get", "agreed to in writing, software # distributed under the License is distributed on", "version without pulling in pbr. After all, if # we're installed on a", "try to get our version out of PKG-INFO. If we're installed, # this'll", "without pulling in pbr. After all, if # we're installed on a system,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version", "source tree, so # pbr doesn't really buy us anything. __version__ = pkg_resources.get_provider(", "(the \"License\"); # you may not use this file except in compliance with", "in pbr. After all, if # we're installed on a system, we're not", "on a system, we're not in a Git-managed source tree, so # pbr", "pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from a checkout, then.", "# # Unless required by applicable law or agreed to in writing, software", "tree, so # pbr doesn't really buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version", "under the License. import pkg_resources __all__ = ['version_info', 'version'] try: # First, try", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "by applicable law or agreed to in writing, software # distributed under the", "a Git-managed source tree, so # pbr doesn't really buy us anything. __version__", "Version information ``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version string ``'major.minor.revision'``.", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "2012-2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the", "the specific language governing permissions and # limitations under the License. import pkg_resources", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "the License. import pkg_resources __all__ = ['version_info', 'version'] try: # First, try to", "installed, # this'll let us find our version without pulling in pbr. After", "pbr doesn't really buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: #", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``. version_info =", "file except in compliance with the License. # You may obtain a copy", "then. Let pbr do # its thing to figure out a version number.", "checkout, then. Let pbr do # its thing to figure out a version", "= pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #:", "# No PKG-INFO? We're probably running from a checkout, then. Let pbr do", "pulling in pbr. After all, if # we're installed on a system, we're", "PKG-INFO. If we're installed, # this'll let us find our version without pulling", "doesn't really buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No", "this'll let us find our version without pulling in pbr. After all, if", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "a version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information ``(major, minor,", "the License. # You may obtain a copy of the License at #", "to in writing, software # distributed under the License is distributed on an", "# pbr doesn't really buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound:", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "to get our version out of PKG-INFO. If we're installed, # this'll let", "= ['version_info', 'version'] try: # First, try to get our version out of", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "implied. # See the License for the specific language governing permissions and #", "to figure out a version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "'version'] try: # First, try to get our version out of PKG-INFO. If", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "required by applicable law or agreed to in writing, software # distributed under", "PKG-INFO? We're probably running from a checkout, then. Let pbr do # its", "applicable law or agreed to in writing, software # distributed under the License", "__version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from", "specific language governing permissions and # limitations under the License. import pkg_resources __all__", "not in a Git-managed source tree, so # pbr doesn't really buy us", "Let pbr do # its thing to figure out a version number. import", "in a Git-managed source tree, so # pbr doesn't really buy us anything.", "``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version string ``'major.minor.revision'``. version =", "or agreed to in writing, software # distributed under the License is distributed", "us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably", "or implied. # See the License for the specific language governing permissions and", "get our version out of PKG-INFO. If we're installed, # this'll let us", "find our version without pulling in pbr. After all, if # we're installed", "thing to figure out a version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #:", "# we're installed on a system, we're not in a Git-managed source tree,", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "figure out a version number. import pbr.version __version__ = pbr.version.VersionInfo('swift3').release_string() #: Version information", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "we're not in a Git-managed source tree, so # pbr doesn't really buy", "so # pbr doesn't really buy us anything. __version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift3')).version except", "system, we're not in a Git-managed source tree, so # pbr doesn't really", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "us find our version without pulling in pbr. After all, if # we're", "# its thing to figure out a version number. import pbr.version __version__ =", "permissions and # limitations under the License. import pkg_resources __all__ = ['version_info', 'version']", "with the License. # You may obtain a copy of the License at", "governing permissions and # limitations under the License. import pkg_resources __all__ = ['version_info',", "# this'll let us find our version without pulling in pbr. After all,", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "#: Version information ``(major, minor, revision)``. version_info = tuple(map(int, __version__.split('.')[:3])) #: Version string", "in writing, software # distributed under the License is distributed on an \"AS", "If we're installed, # this'll let us find our version without pulling in", "all, if # we're installed on a system, we're not in a Git-managed", "we're installed, # this'll let us find our version without pulling in pbr.", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "secret class FaultyStream: \"\"\"This stream raises an exception after some text has been", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we are removing", "self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True,", "some lines, so output is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()),", "exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output(", "stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) /", "stdout and stderr similarity (how many lines both have in common) stdout =", "self.raised and len(self.stream.getvalue()) > 1024: self.raised = True raise RuntimeError() self.stream.write(b) def close(self):", "self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix if", "len(stderr_set)), 0.90) else: # we might have retrieved data only for one of", "if capture_stdout and capture_stderr: # both streams should be equal self.assertEqual(stdout, stderr) #", "re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we", "test we are removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr:", "10) self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity (how many lines both have", "capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is not None: stdout_t.join(1.0)", "if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr =", "remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr,", "stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit", "service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # remove timestamps from each line", "test we are removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr:", "this file except in compliance with the License. # You may obtain a", "= re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we are removing something (hopefully timestamps)", "stdout = stdout.getvalue() stderr = stderr.getvalue() # remove timestamps from each line in", "stdout = io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service", "FaultyStream(stderr) service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client = BasicTaskClient('test", "0, key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts)", "None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally:", "# we might have retrieved data only for one of stdout and stderr", "stderr.getvalue() # remove timestamps from each line in outputs if prefix_output_with_timestamp: stdout_no_ts =", "ANY KIND, either express or implied. # See the License for the specific", "to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10)", "0, key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1)", "============================================================================== import io import re import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from", "prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated) if succeeds is not None:", "f\"bash -c '{stderr} >&2 & {stdout}'\" def test_run_command(self): key = secret.make_secret_key() service =", "'', stderr, flags=re.MULTILINE) # test we are removing something (hopefully timestamps) if capture_stdout:", "= client.command_result() self.assertEqual(True, terminated) if succeeds is not None: self.assertEqual(succeeds, exit == 0)", "and stderr similarity (how many lines both have in common) stdout = re.sub('\\[0\\]<stdout>:',", "likely to loose some lines, so output is hard to evaluate if succeeds:", "key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp)", "self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity (how many", "client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd),", "capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self):", "client.command_result() self.assertEqual(True, terminated) if succeeds is not None: self.assertEqual(succeeds, exit == 0) if", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "text has been written.\"\"\" def __init__(self, stream): self.stream = stream self.raised = False", "stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set", "import re import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret", "2021 Uber Technologies, Inc. All Rights Reserved. # # Licensed under the Apache", "BasicTaskClient from horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream raises an exception after", "len(self.stream.getvalue()) > 1024: self.raised = True raise RuntimeError() self.stream.write(b) def close(self): pass class", "both streams should be equal self.assertEqual(stdout, stderr) # streams should have meaningful number", "def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated) if", "'for i in {1..10000}; do echo \"a very very useful log line #$i\";", "of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()), 10) self.assertTrue(stdout_s.raised or stderr_s.raised)", "stderr, flags=re.MULTILINE) # test we are removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts,", "OF ANY KIND, either express or implied. # See the License for the", "prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive())", ") def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output(", "stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: # we might have retrieved data", "prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr", "stderr = stderr.getvalue() # remove timestamps from each line in outputs if prefix_output_with_timestamp:", "0.90) else: # we might have retrieved data only for one of stdout", "test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout = io.StringIO()", "= stdout_no_ts stderr = stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout,", "= secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client =", "verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t =", "re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we are removing something (hopefully prefixes) if", "least one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()), 10) self.assertTrue(stdout_s.raised", "= io.StringIO() stderr = io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test service', 0,", "# test we are removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if", "BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream raises an exception", "def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO()", "we are removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix,", "{1..10000}; do echo \"a very very useful log line #$i\"; done' cmd_single_line =", "10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True)", "class FaultyStream: \"\"\"This stream raises an exception after some text has been written.\"\"\"", "= re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test", "capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated) if succeeds is not", "pass class TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000}; do echo \"a very", "capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr = stderr_no_ts # remove prefix stdout_no_prefix", "stdout = io.StringIO() stderr = io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test service',", "client.command_result()) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not", "# limitations under the License. # ============================================================================== import io import re import unittest", "after some text has been written.\"\"\" def __init__(self, stream): self.stream = stream self.raised", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "= stdout.getvalue() stderr = stderr.getvalue() # we are likely to loose some lines,", "self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp):", "prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE)", "self.assertEqual((True, 0), client.command_result()) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "= stream self.raised = False def write(self, b): if not self.raised and len(self.stream.getvalue())", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "loose some lines, so output is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024)", "capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO()", "cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2 & {stdout}'\" def test_run_command(self): key =", "& {stdout}'\" def test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test service', 0, key,", "test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd),", "prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr =", "if not self.raised and len(self.stream.getvalue()) > 1024: self.raised = True raise RuntimeError() self.stream.write(b)", "0) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not", "None: self.assertEqual(succeeds, exit == 0) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive())", "False def write(self, b): if not self.raised and len(self.stream.getvalue()) > 1024: self.raised =", "been written.\"\"\" def __init__(self, stream): self.stream = stream self.raised = False def write(self,", "not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive())", "'', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set", "stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True,", "capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "stdout = stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and capture_stderr: # both streams", "one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()), 10) self.assertTrue(stdout_s.raised or", "def __init__(self, stream): self.stream = stream self.raised = False def write(self, b): if", "we are likely to loose some lines, so output is hard to evaluate", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "exit = client.command_result() self.assertEqual(True, terminated) if succeeds is not None: self.assertEqual(succeeds, exit ==", "'', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we are", "stream raises an exception after some text has been written.\"\"\" def __init__(self, stream):", "self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key()", "required by applicable law or agreed to in writing, software # distributed under", "stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated,", "and len(self.stream.getvalue()) > 1024: self.raised = True raise RuntimeError() self.stream.write(b) def close(self): pass", "stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix if capture_stdout", "applicable law or agreed to in writing, software # distributed under the License", "self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity (how many lines both", "capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def", "stderr.getvalue() # we are likely to loose some lines, so output is hard", "key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0),", "def close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000}; do echo", "or agreed to in writing, software # distributed under the License is distributed", "test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try:", "client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr)", "governing permissions and # limitations under the License. # ============================================================================== import io import", "service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {},", "succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert", "limitations under the License. # ============================================================================== import io import re import unittest from", "many lines both have in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr", "# ============================================================================== import io import re import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test", "/ min(len(stdout_set), len(stderr_set)), 0.90) else: # we might have retrieved data only for", "self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout =", "we might have retrieved data only for one of stdout and stderr #", "Technologies, Inc. All Rights Reserved. # # Licensed under the Apache License, Version", "self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False,", "Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "-c '{stderr} >&2 & {stdout}'\" def test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test", "self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "= stderr.getvalue() # remove timestamps from each line in outputs if prefix_output_with_timestamp: stdout_no_ts", "test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line),", "removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout", "succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout = io.StringIO() stderr =", "1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t,", "(hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix", "stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result())", "stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout =", "License. # You may obtain a copy of the License at # #", "stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0)", "try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit", "else: # we might have retrieved data only for one of stdout and", ") def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output(", "terminated, exit = client.command_result() self.assertEqual(True, terminated) if succeeds is not None: self.assertEqual(succeeds, exit", "= stderr.getvalue() # we are likely to loose some lines, so output is", "self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and stderr", "stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr", "compliance with the License. # You may obtain a copy of the License", "exit == 0) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t", "some data for at least one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines())", "flags=re.MULTILINE) # test we are removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout)", "stdout = stdout_no_ts stderr = stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '',", "self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity (how many lines both have in", "attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True,", "very useful log line #$i\"; done' cmd_single_line = f'{cmd} | wc' @staticmethod def", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class FaultyStream:", "self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True", "we are removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts,", "self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True,", "echo \"a very very useful log line #$i\"; done' cmd_single_line = f'{cmd} |", "timestamps from each line in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout,", "None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() #", "stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr = stderr_no_ts # remove", "def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line,", "if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) #", "capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def", "if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) > 1024)", "stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set =", "if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue()", "has been written.\"\"\" def __init__(self, stream): self.stream = stream self.raised = False def", "capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True )", "capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False )", "flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we are removing something", "verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False)", "expect some data for at least one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024)", "in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+',", "characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) >", "not use this file except in compliance with the License. # You may", "return f\"bash -c '{stderr} >&2 & {stdout}'\" def test_run_command(self): key = secret.make_secret_key() service", "stderr): return f\"bash -c '{stderr} >&2 & {stdout}'\" def test_run_command(self): key = secret.make_secret_key()", "= stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: # we might have retrieved", "removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout", "stdout and stderr # so we expect some data for at least one", "= set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90)", "service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True,", "License, Version 2.0 (the \"License\"); # you may not use this file except", "verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t =", "stream): self.stream = stream self.raised = False def write(self, b): if not self.raised", "key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client", "prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self):", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "{}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self):", "# remove timestamps from each line in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+',", "do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO() stdout_s", "(hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts", "re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we are removing something (hopefully timestamps) if", "# Copyright 2021 Uber Technologies, Inc. All Rights Reserved. # # Licensed under", "unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class FaultyStream: \"\"\"This", "meaningful number of lines and characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) >", "# you may not use this file except in compliance with the License.", "stdout.getvalue() stderr = stderr.getvalue() # we are likely to loose some lines, so", "line in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts =", "and # limitations under the License. # ============================================================================== import io import re import", "= client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if", "try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout,", "agreed to in writing, software # distributed under the License is distributed on", "try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s,", "is not None: self.assertEqual(succeeds, exit == 0) if stdout_t is not None: stdout_t.join(1.0)", "if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr,", "= f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2", "1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def", "True raise RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for i", "evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised)", "common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE)", "io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try:", "both have in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:',", "(the \"License\"); # you may not use this file except in compliance with", "raise RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for i in", "very very useful log line #$i\"; done' cmd_single_line = f'{cmd} | wc' @staticmethod", "re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set)", "1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity (how many lines", "def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd,", "# Unless required by applicable law or agreed to in writing, software #", "do echo \"a very very useful log line #$i\"; done' cmd_single_line = f'{cmd}", "= client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd,", "self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1,", "= re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test", "by applicable law or agreed to in writing, software # distributed under the", "of lines and characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if", "Copyright 2021 Uber Technologies, Inc. All Rights Reserved. # # Licensed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "language governing permissions and # limitations under the License. # ============================================================================== import io", "prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self,", "> 1024: self.raised = True raise RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase):", "stdout = stdout.getvalue() stderr = stderr.getvalue() # we are likely to loose some", "io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test service', 0, key,", "b): if not self.raised and len(self.stream.getvalue()) > 1024: self.raised = True raise RuntimeError()", "== 0) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is", "self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # we are", "intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: # we might have", "= re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set =", "10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds):", "client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is not", "\"\"\"This stream raises an exception after some text has been written.\"\"\" def __init__(self,", "= stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and capture_stderr: # both streams should", "self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def", "write(self, b): if not self.raised and len(self.stream.getvalue()) > 1024: self.raised = True raise", "1024: self.raised = True raise RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd", "file except in compliance with the License. # You may obtain a copy", "capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def", "client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated)", "flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set),", "stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we are removing", "flags=re.MULTILINE) # test we are removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout)", "f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2 &", "0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True )", "client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if", "self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) >", "re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines())", "we expect some data for at least one of them self.assertGreaterEqual(len(stdout) + len(stderr),", "= True raise RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for", "License for the specific language governing permissions and # limitations under the License.", "capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and capture_stderr:", "have retrieved data only for one of stdout and stderr # so we", "self.assertEqual(succeeds, exit == 0) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if", "to in writing, software # distributed under the License is distributed on an", "self.assertEqual(stdout, stderr) # streams should have meaningful number of lines and characters if", "capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines())", "implied. # See the License for the specific language governing permissions and #", "import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream raises an", "stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0),", "\"License\"); # you may not use this file except in compliance with the", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "from horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream raises an exception after some", "# test we are removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if", "= stderr_no_prefix if capture_stdout and capture_stderr: # both streams should be equal self.assertEqual(stdout,", "equal self.assertEqual(stdout, stderr) # streams should have meaningful number of lines and characters", "similarity (how many lines both have in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout,", "data for at least one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines()) +", "is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr =", "self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr = stderr_no_ts #", "= BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(),", "self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True,", "are removing something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr)", "i in {1..10000}; do echo \"a very very useful log line #$i\"; done'", "stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:',", "or implied. # See the License for the specific language governing permissions and", "# so we expect some data for at least one of them self.assertGreaterEqual(len(stdout)", "wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2 & {stdout}'\" def", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000};", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout =", "stderr) # streams should have meaningful number of lines and characters if capture_stdout:", "self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True", "stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we are removing something (hopefully", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "re import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class", "finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self):", "secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr)", "stderr_s = FaultyStream(stderr) service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client", "import io import re import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "raises an exception after some text has been written.\"\"\" def __init__(self, stream): self.stream", "stderr = stderr_no_prefix if capture_stdout and capture_stderr: # both streams should be equal", "BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0,", "self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False", "stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test service',", "= re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) # test we are removing something (hopefully prefixes)", "specific language governing permissions and # limitations under the License. # ============================================================================== import", "capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated) if succeeds is", "in {1..10000}; do echo \"a very very useful log line #$i\"; done' cmd_single_line", "if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def", "service', 0, key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2,", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "stdout.getvalue() stderr = stderr.getvalue() # remove timestamps from each line in outputs if", "you may not use this file except in compliance with the License. #", "under the License. # ============================================================================== import io import re import unittest from horovod.runner.common.service.task_service", "stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) #", "if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and", "log line #$i\"; done' cmd_single_line = f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr):", "so output is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised)", "lines both have in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr =", "self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self,", "self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False,", "stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is", "client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit =", "self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout", "= io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test service', 0,", "self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False,", "set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else:", "key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t,", "'{stderr} >&2 & {stdout}'\" def test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test service',", "'', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we are", "data only for one of stdout and stderr # so we expect some", "= 'for i in {1..10000}; do echo \"a very very useful log line", "client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t", "use this file except in compliance with the License. # You may obtain", "= FaultyStream(stderr) service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client =", "finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # we are likely to", "to loose some lines, so output is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout),", "10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity", ") def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO()", "__init__(self, stream): self.stream = stream self.raised = False def write(self, b): if not", "prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE)", "nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t", "done' cmd_single_line = f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash -c", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "written.\"\"\" def __init__(self, stream): self.stream = stream self.raised = False def write(self, b):", "are likely to loose some lines, so output is hard to evaluate if", "License. # ============================================================================== import io import re import unittest from horovod.runner.common.service.task_service import BasicTaskService,", "capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix", "terminated) if succeeds is not None: self.assertEqual(succeeds, exit == 0) if stdout_t is", "test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr,", "only for one of stdout and stderr # so we expect some data", "min(len(stdout_set), len(stderr_set)), 0.90) else: # we might have retrieved data only for one", "2.0 (the \"License\"); # you may not use this file except in compliance", "if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr =", "should be equal self.assertEqual(stdout, stderr) # streams should have meaningful number of lines", "for the specific language governing permissions and # limitations under the License. #", "self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True", "BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd),", "stream self.raised = False def write(self, b): if not self.raised and len(self.stream.getvalue()) >", "= secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key,", "stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test service', 0, key, nics=None,", "client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown()", "have in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '',", "FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try:", "permissions and # limitations under the License. # ============================================================================== import io import re", "#$i\"; done' cmd_single_line = f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash", "# we are likely to loose some lines, so output is hard to", "horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream raises", "# # Unless required by applicable law or agreed to in writing, software", "= BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command,", "capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False,", "express or implied. # See the License for the specific language governing permissions", "succeeds is not None: self.assertEqual(succeeds, exit == 0) if stdout_t is not None:", "1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def", "verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result())", "= BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code()", "either express or implied. # See the License for the specific language governing", "not self.raised and len(self.stream.getvalue()) > 1024: self.raised = True raise RuntimeError() self.stream.write(b) def", "stdout_no_ts stderr = stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE)", "stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: #", "= FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test service', 0, key, nics=None, verbose=2)", "= BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd,", "exception after some text has been written.\"\"\" def __init__(self, stream): self.stream = stream", "> 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts,", "retrieved data only for one of stdout and stderr # so we expect", "> 10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3,", "if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None:", "stderr = io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None,", "self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self):", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "some text has been written.\"\"\" def __init__(self, stream): self.stream = stream self.raised =", "secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client = BasicTaskClient('test", "outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '',", "number of lines and characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10)", "# streams should have meaningful number of lines and characters if capture_stdout: self.assertTrue(len(stdout)", "succeeds): key = secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout)", "stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown()", "lines, so output is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10)", "is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024)", "{}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated) if succeeds", "in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr,", "def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key", "test_stream_command_output_stderr(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd), capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line),", "stderr_no_prefix if capture_stdout and capture_stderr: # both streams should be equal self.assertEqual(stdout, stderr)", "lines and characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr:", "assert stdout and stderr similarity (how many lines both have in common) stdout", "the License. # You may obtain a copy of the License at #", "command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO() key = secret.make_secret_key()", "succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "stderr similarity (how many lines both have in common) stdout = re.sub('\\[0\\]<stdout>:', '',", "nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t", "stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)),", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "not None: stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue()", "attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally:", "for one of stdout and stderr # so we expect some data for", "an exception after some text has been written.\"\"\" def __init__(self, stream): self.stream =", "= io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2)", "io.StringIO() stderr = io.StringIO() key = secret.make_secret_key() service = BasicTaskService('test service', 0, key,", "stderr) stdout = stdout_no_ts stderr = stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:',", "do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO() key =", "be equal self.assertEqual(stdout, stderr) # streams should have meaningful number of lines and", "self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key = secret.make_secret_key() stdout = io.StringIO() stderr", "self.assertEqual(True, terminated) if succeeds is not None: self.assertEqual(succeeds, exit == 0) if stdout_t", "= stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix =", "is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is not None: stderr_t.join(1.0) self.assertEqual(False,", "self.assertEqual(0, exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True,", "for at least one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()),", "streams should have meaningful number of lines and characters if capture_stdout: self.assertTrue(len(stdout) >", "stderr # so we expect some data for at least one of them", "stderr = stderr_no_ts # remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix", "are removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr)", "io import re import unittest from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import", "service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output(", "test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd),", "re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we", "the License. # ============================================================================== import io import re import unittest from horovod.runner.common.service.task_service import", "with the License. # You may obtain a copy of the License at", "= re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect =", "service = BasicTaskService('test service', 0, key, nics=None, verbose=2) try: client = BasicTaskClient('test service',", "0), client.command_result()) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False, stdout_t.is_alive()) if stderr_t is", "(how many lines both have in common) stdout = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE)", "one of stdout and stderr # so we expect some data for at", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True )", "service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr,", ") def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command,", "self.assertNotEqual(stderr_no_prefix, stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and capture_stderr: #", "attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2)", "and stderr # so we expect some data for at least one of", "class TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000}; do echo \"a very very", "'', stderr, flags=re.MULTILINE) # test we are removing something (hopefully prefixes) if capture_stdout:", "capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO() key = secret.make_secret_key() service =", "of stdout and stderr # so we expect some data for at least", "if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr = stderr_no_ts # remove prefix", ">&2 & {stdout}'\" def test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test service', 0,", "law or agreed to in writing, software # distributed under the License is", "self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False,", "the License for the specific language governing permissions and # limitations under the", "BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {},", "self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: # we might have retrieved data only", "{stdout}'\" def test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# remove prefix stdout_no_prefix = re.sub('\\[0\\]<stdout>:', '', stdout, flags=re.MULTILINE) stderr_no_prefix = re.sub('\\[0\\]<stderr>:', '',", "cmd_single_line = f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash -c '{stderr}", "stderr, flags=re.MULTILINE) # test we are removing something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix,", "not None: self.assertEqual(succeeds, exit == 0) if stdout_t is not None: stdout_t.join(1.0) self.assertEqual(False,", "\"a very very useful log line #$i\"; done' cmd_single_line = f'{cmd} | wc'", "prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self):", "key = secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s", "flags=re.MULTILINE) stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines())", "self.raised = False def write(self, b): if not self.raised and len(self.stream.getvalue()) > 1024:", "TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000}; do echo \"a very very useful", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "in compliance with the License. # You may obtain a copy of the", "so we expect some data for at least one of them self.assertGreaterEqual(len(stdout) +", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "cmd = 'for i in {1..10000}; do echo \"a very very useful log", "key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr = stderr_no_ts # remove prefix stdout_no_prefix =", "output is hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr),", "capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr = stderr_no_ts", "= set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: # we", "def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout,", "stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # remove timestamps from", "attempts, succeeds): key = secret.make_secret_key() stdout = io.StringIO() stderr = io.StringIO() stdout_s =", "stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and capture_stderr: # both streams should be", "horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream raises an exception after some text", "See the License for the specific language governing permissions and # limitations under", "self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False ) def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout", "flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we are removing something", "io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service = BasicTaskService('test", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def", "useful log line #$i\"; done' cmd_single_line = f'{cmd} | wc' @staticmethod def cmd_with(stdout,", "and capture_stderr: # both streams should be equal self.assertEqual(stdout, stderr) # streams should", "{}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2) self.assertEqual((True, 0), client.command_result()) if stdout_t is not None:", "def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd,", "set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90) else: # we might", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # remove timestamps from each", "= stdout.getvalue() stderr = stderr.getvalue() # remove timestamps from each line in outputs", "from each line in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE)", "have meaningful number of lines and characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines())", ") def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output(", "prefix_output_with_timestamp=True ) def test_stream_command_output_stdout(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd_single_line), capture_stdout=True, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_stderr(self):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "each line in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts", "something (hopefully timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout =", "from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient from horovod.runner.common.util import secret class FaultyStream: \"\"\"This stream", "Uber Technologies, Inc. All Rights Reserved. # # Licensed under the Apache License,", "capture_stdout and capture_stderr: # both streams should be equal self.assertEqual(stdout, stderr) # streams", "self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=True", "self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # remove timestamps", "| wc' @staticmethod def cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2 & {stdout}'\"", "exit) self.assertEqual((True, 0), client.command_result()) finally: service.shutdown() def test_stream_command_output(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True, capture_stderr=True,", "Version 2.0 (the \"License\"); # you may not use this file except in", "except in compliance with the License. # You may obtain a copy of", "self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()), 10) self.assertTrue(stderr_s.raised) # assert stdout and stderr similarity (how", "the specific language governing permissions and # limitations under the License. # ==============================================================================", "close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000}; do echo \"a", "def write(self, b): if not self.raised and len(self.stream.getvalue()) > 1024: self.raised = True", "remove timestamps from each line in outputs if prefix_output_with_timestamp: stdout_no_ts = re.sub('^[^[]+', '',", "stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) # test we are removing something (hopefully", "service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # we are likely to loose", "def test_run_command(self): key = secret.make_secret_key() service = BasicTaskService('test service', 0, key, nics=None, verbose=2)", "if succeeds is not None: self.assertEqual(succeeds, exit == 0) if stdout_t is not", "self.raised = True raise RuntimeError() self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd =", "self.stream.write(b) def close(self): pass class TaskServiceTest(unittest.TestCase): cmd = 'for i in {1..10000}; do", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "@staticmethod def cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2 & {stdout}'\" def test_run_command(self):", "import secret class FaultyStream: \"\"\"This stream raises an exception after some text has", "capture_stderr: # both streams should be equal self.assertEqual(stdout, stderr) # streams should have", "streams should be equal self.assertEqual(stdout, stderr) # streams should have meaningful number of", "def do_test_stream_command_output(self, command, capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO() key", "verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {})", "stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # we are likely", "service', service.addresses(), key, verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout,", "= client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit =", "def cmd_with(stdout, stderr): return f\"bash -c '{stderr} >&2 & {stdout}'\" def test_run_command(self): key", "might have retrieved data only for one of stdout and stderr # so", "capture_stdout, capture_stderr, prefix_output_with_timestamp): stdout = io.StringIO() stderr = io.StringIO() key = secret.make_secret_key() service", "client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s)", "> 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr) > 1024) self.assertTrue(len(stderr.splitlines()) > 10)", "key, nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line,", "test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None) def do_test_stream_command_output_reconnect(self, attempts, succeeds): key =", "FaultyStream: \"\"\"This stream raises an exception after some text has been written.\"\"\" def", "stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # we", "line #$i\"; done' cmd_single_line = f'{cmd} | wc' @staticmethod def cmd_with(stdout, stderr): return", "at least one of them self.assertGreaterEqual(len(stdout) + len(stderr), 1024) self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()), 10)", "= io.StringIO() stderr = io.StringIO() stdout_s = FaultyStream(stdout) stderr_s = FaultyStream(stderr) service =", "verbose=2, attempts=1) stdout_t, stderr_t = client.stream_command_output(stdout, stderr) client.run_command(command, {}, capture_stdout=capture_stdout, capture_stderr=capture_stderr, prefix_output_with_timestamp=prefix_output_with_timestamp) client.wait_for_command_termination(delay=0.2)", "= False def write(self, b): if not self.raised and len(self.stream.getvalue()) > 1024: self.raised", "service.addresses(), key, verbose=2, attempts=attempts) stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True,", "hard to evaluate if succeeds: self.assertGreaterEqual(len(stdout), 1024) self.assertGreater(len(stdout.splitlines()), 10) self.assertTrue(stdout_s.raised) self.assertGreaterEqual(len(stderr), 1024) self.assertGreater(len(stderr.splitlines()),", "self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True ) def test_stream_command_output_un_prefixed(self): self.do_test_stream_command_output( self.cmd_with(self.cmd, self.cmd), capture_stdout=True,", "'', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect = stdout_set.intersection(stderr_set) self.assertGreater(len(intersect)", "client.stream_command_output(stdout_s, stderr_s) client.run_command(self.cmd_with(self.cmd, self.cmd), {}, capture_stdout=True, capture_stderr=True, prefix_output_with_timestamp=False) client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result()", "self.stream = stream self.raised = False def write(self, b): if not self.raised and", "capture_stdout=False, capture_stderr=True, prefix_output_with_timestamp=True ) def test_stream_command_output_neither(self): self.do_test_stream_command_output( self.cmd_with(self.cmd_single_line, self.cmd_single_line), capture_stdout=False, capture_stderr=False, prefix_output_with_timestamp=True )", "stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE) stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE) #", "timestamps) if capture_stdout: self.assertNotEqual(stdout_no_ts, stdout) if capture_stderr: self.assertNotEqual(stderr_no_ts, stderr) stdout = stdout_no_ts stderr", "and characters if capture_stdout: self.assertTrue(len(stdout) > 1024) self.assertTrue(len(stdout.splitlines()) > 10) if capture_stderr: self.assertTrue(len(stderr)", "stderr = re.sub('\\[0\\]<stderr>:', '', stderr, flags=re.MULTILINE) stdout_set = set(stdout.splitlines()) stderr_set = set(stderr.splitlines()) intersect", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "something (hopefully prefixes) if capture_stdout: self.assertNotEqual(stdout_no_prefix, stdout) if capture_stderr: self.assertNotEqual(stderr_no_prefix, stderr) stdout =", "stderr = stderr.getvalue() # we are likely to loose some lines, so output", "# both streams should be equal self.assertEqual(stdout, stderr) # streams should have meaningful", "client.wait_for_command_termination(delay=0.2) terminated, exit = client.command_result() self.assertEqual(True, terminated) if succeeds is not None: self.assertEqual(succeeds,", "> 1024) self.assertTrue(len(stderr.splitlines()) > 10) def test_stream_command_output_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True) def test_stream_command_output_no_reconnect(self): self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None)", "stderr) stdout = stdout_no_prefix stderr = stderr_no_prefix if capture_stdout and capture_stderr: # both", "nics=None, verbose=2) try: client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line),", "should have meaningful number of lines and characters if capture_stdout: self.assertTrue(len(stdout) > 1024)", "stderr_t.join(1.0) self.assertEqual(False, stderr_t.is_alive()) finally: service.shutdown() stdout = stdout.getvalue() stderr = stderr.getvalue() # remove", "service', service.addresses(), key, verbose=2, attempts=1) client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {}) exit = client.wait_for_command_exit_code() self.assertEqual(0, exit)", "# assert stdout and stderr similarity (how many lines both have in common)" ]
[ "\"\"\" # Map of tuples from an operator name to a tuple of", "# TODO: zhill - Just jotted down these notes for future work #", "self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # # class MetadataConditionGate(Gate):", "# # # # # TODO: zhill - Just jotted down these notes", "'trigger_2': str, # 'result_2': str # } # # class OrTrigger(BaseTrigger): # __trigger_name__", "= { # 'key': str, # 'value': str # } # # class", "can override the __conditions__ list for different values. \"\"\" # Map of tuples", "dict of string keys mapped to CheckOperation tuples \"\"\" self.ops = ops def", "# class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__ = { # 'gate_1':", "\"\"\" # # Resolves a key to a specific image element and retrieves", "# :param image_obj: # :return: # \"\"\" # # Resolves a key to", "generic condition validator. Child classes can override the __conditions__ list for different values.", "__trigger_name__ = 'not_exists' # __params__ = {'key': str} # # @staticmethod # def", "statement Ids to the language to facilitate # # direct references here #", "a specific attribute of an image and returns it. # Examples: # $image.dockerfile.from", "# } # # class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__ =", "# 'result_1': str, # 'gate_2': str, # 'trigger_2': str, # 'result_2': str #", "False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic condition validator.", "class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__ = { # 'key': str,", "validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args and args[0]: return", "tuple of (bool, function) where arg 0 is whether an rvalue is required", "to facilitate # # direct references here # class BooleanOperatorGate(Gate): # __gate_name__ =", "# else: # key_components.pop() # # obj = image_obj # for k in", "'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__ = {'key':", "a text key to a specific attribute of an image and returns it.", "= attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args", "# __params__ = {'key': str} # # class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match'", "} # # class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__ = {'key':", "model.get_lookup(k, obj) # # # # # TODO: zhill - Just jotted down", "references here # class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger):", "an rvalue is required and arg 1 is function taking 2 args to", "validator. Child classes can override the __conditions__ list for different values. \"\"\" #", "notes for future work # # # Powerful, but need to ensure consistency,", "it from the image object # key_components = key.split('.') # if key_components[0] !=", "items in the image metadata. # \"\"\" # __gate_name__ = 'attribute_condition' # #", "__call__(self, *args, **kwargs): if args and args[0]: parts = map(lambda x: x.strip(), args[0].split(','))", "__gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__", "else: # key_components.pop() # # obj = image_obj # for k in key_components:", "down these notes for future work # # # Powerful, but need to", "CheckOperations(InputValidator): \"\"\" A very generic condition validator. Child classes can override the __conditions__", "it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param key: #", "in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # # class", "ensure consistency, may need to add statement Ids to the language to facilitate", "= 'xor' # __params__ = { # 'gate_1': str, # 'trigger_1': str, #", "image_obj # for k in key_components: # obj = model.get_lookup(k, obj) # #", "need to ensure consistency, may need to add statement Ids to the language", "LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__ = { # 'key': str, #", "x.strip(), args[0].split(',')) return not bool(filter(lambda x: x not in self.attrs, parts)) else: return", "\"\"\" :param ops: a dict of string keys mapped to CheckOperation tuples \"\"\"", "# class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__ = {'key': str} #", "A very generic condition validator. Child classes can override the __conditions__ list for", "if args and args[0]: parts = map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda", "MetadataConditionGate(Gate): # \"\"\" # A generic conditional check gate on specific data items", "= { # 'gate_1': str, # 'trigger_1': str, # 'result_1': str # }", "import BaseTrigger, Gate # # # class MetadataConditionGate(Gate): # \"\"\" # A generic", "# __trigger_name__ = 'equals' # __params__ = { # 'key': str, # 'value':", "else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic", "namedtuple from anchore_engine.services.policy_engine.engine.policy.params import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator):", "'not' # __params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1':", "for k in key_components: # obj = model.get_lookup(k, obj) # # # #", "str # } # # class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__", "arg 1 is function taking 2 args to return evaluation def __init__(self, ops):", "attribute of an image and returns it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from']", "# __trigger_name__ = 'like_match' # __params__ = { # 'key': str, # 'pattern':", "from the image object # key_components = key.split('.') # if key_components[0] != '$image':", "x: x not in self.attrs, parts)) else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function'])", "return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args", "# # TODO: zhill - Just jotted down these notes for future work", "import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs def", "str, # 'pattern': str, # } # # class EqualsTrigger(BaseTrigger): # __trigger_name__ =", "consistency, may need to add statement Ids to the language to facilitate #", "# # class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__ = { #", "# __trigger_name__ = 'and' # __params__ = { # 'gate_1': str, # 'trigger_1':", "on specific data items in the image metadata. # \"\"\" # __gate_name__ =", "str, # 'result_1': str, # 'gate_2': str, # 'trigger_2': str, # 'result_2': str", "tuples from an operator name to a tuple of (bool, function) where arg", "= 'not' # __params__ = { # 'gate_1': str, # 'trigger_1': str, #", "retrieves it from the image object # key_components = key.split('.') # if key_components[0]", "Must be $image.p1.p2.p3...pN') # else: # key_components.pop() # # obj = image_obj #", "def __call__(self, *args, **kwargs): if args and args[0]: parts = map(lambda x: x.strip(),", "a dict of string keys mapped to CheckOperation tuples \"\"\" self.ops = ops", "['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic condition validator. Child classes can override", "'trigger_2': str, # 'result_2': str # } # # class XorTrigger(BaseTrigger): # __trigger_name__", "function) where arg 0 is whether an rvalue is required and arg 1", "but need to ensure consistency, may need to add statement Ids to the", "class CheckOperations(InputValidator): \"\"\" A very generic condition validator. Child classes can override the", "def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args and args[0]:", "= {'key': str} # # @staticmethod # def resolve_key(key, image_obj): # \"\"\" #", "# raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN') # else: # key_components.pop()", "# } # # class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__ =", "\"\"\" # A generic conditional check gate on specific data items in the", "# # class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__ = { #", "operator name to a tuple of (bool, function) where arg 0 is whether", "# # # :param key: # :param image_obj: # :return: # \"\"\" #", "# 'result_2': str # } # # class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor'", "for different values. \"\"\" # Map of tuples from an operator name to", "# class MetadataConditionGate(Gate): # \"\"\" # A generic conditional check gate on specific", "# # class MetadataConditionGate(Gate): # \"\"\" # A generic conditional check gate on", "Just jotted down these notes for future work # # # Powerful, but", "# # class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__ = { #", "} # # class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__ = {", "'not_exists' # __params__ = {'key': str} # # @staticmethod # def resolve_key(key, image_obj):", "ops def get_op(self, name): return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self,", "'exists' # __params__ = {'key': str} # # class LikeTrigger(BaseTrigger): # __trigger_name__ =", "# key_components = key.split('.') # if key_components[0] != '$image': # raise ValueError('Invalid key", "# Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param key: # :param", "{'key': str} # # @staticmethod # def resolve_key(key, image_obj): # \"\"\" # Resolves", "Child classes can override the __conditions__ list for different values. \"\"\" # Map", "'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__ = {", "CheckOperation tuples \"\"\" self.ops = ops def get_op(self, name): return self.ops[name] def validation_criteria(self):", "# # Powerful, but need to ensure consistency, may need to add statement", "# class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__ = {'key': str} #", "an image and returns it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # #", "self.ops = ops def get_op(self, name): return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys()))", "OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__ = { # 'gate_1': str, #", "from an operator name to a tuple of (bool, function) where arg 0", "return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args and args[0]: parts =", "generic conditional check gate on specific data items in the image metadata. #", "self.attrs, parts)) else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A", "BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs def validation_criteria(self): return 'In:", "NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__ = {'key': str} # # @staticmethod", "list for different values. \"\"\" # Map of tuples from an operator name", "rvalue is required and arg 1 is function taking 2 args to return", "of string keys mapped to CheckOperation tuples \"\"\" self.ops = ops def get_op(self,", "bool(filter(lambda x: x not in self.attrs, parts)) else: return False CheckOperation = namedtuple('CheckOperation',", "class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__ = { # 'gate_1': str,", "# __params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1': str", "'result_2': str # } # # class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' #", "= 'equals' # __params__ = { # 'key': str, # 'value': str #", "str # } # # class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__", "x not in self.attrs, parts)) else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class", "return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic condition", "**kwargs): if args and args[0]: return args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate", "Resolves a text key to a specific attribute of an image and returns", "Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs def validation_criteria(self):", "tuples \"\"\" self.ops = ops def get_op(self, name): return self.ops[name] def validation_criteria(self): return", "# # @staticmethod # def resolve_key(key, image_obj): # \"\"\" # Resolves a text", "= 'and' # __params__ = { # 'gate_1': str, # 'trigger_1': str, #", "{ # 'gate_1': str, # 'trigger_1': str, # 'result_1': str, # 'gate_2': str,", "__trigger_name__ = 'not' # __params__ = { # 'gate_1': str, # 'trigger_1': str,", "args and args[0]: return args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger,", "@staticmethod # def resolve_key(key, image_obj): # \"\"\" # Resolves a text key to", "# @staticmethod # def resolve_key(key, image_obj): # \"\"\" # Resolves a text key", "of (bool, function) where arg 0 is whether an rvalue is required and", "# } # # class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__ =", "{ # 'gate_1': str, # 'trigger_1': str, # 'result_1': str # } #", "str, # 'value': str # } # # class NotExists(BaseTrigger): # __trigger_name__ =", "str, # 'trigger_1': str, # 'result_1': str, # 'gate_2': str, # 'trigger_2': str,", "format: {}. Must be $image.p1.p2.p3...pN') # else: # key_components.pop() # # obj =", "element and retrieves it from the image object # key_components = key.split('.') #", "key_components: # obj = model.get_lookup(k, obj) # # # # # TODO: zhill", "# 'gate_1': str, # 'trigger_1': str, # 'result_1': str # } # #", "A generic conditional check gate on specific data items in the image metadata.", "key_components.pop() # # obj = image_obj # for k in key_components: # obj", "__params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1': str #", "# :param key: # :param image_obj: # :return: # \"\"\" # # Resolves", "anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # # class MetadataConditionGate(Gate): # \"\"\" # A", "jotted down these notes for future work # # # Powerful, but need", "image_obj: # :return: # \"\"\" # # Resolves a key to a specific", "# \"\"\" # Resolves a text key to a specific attribute of an", "metadata. # \"\"\" # __gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__", "'gate_2': str, # 'trigger_2': str, # 'result_2': str # } # # class", "'or' # __params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1':", "TODO: zhill - Just jotted down these notes for future work # #", "not in self.attrs, parts)) else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator):", "'key': str, # 'value': str # } # # class NotExists(BaseTrigger): # __trigger_name__", "key format: {}. Must be $image.p1.p2.p3...pN') # else: # key_components.pop() # # obj", "Resolves a key to a specific image element and retrieves it from the", "the image object # key_components = key.split('.') # if key_components[0] != '$image': #", "# \"\"\" # A generic conditional check gate on specific data items in", "= 'not_exists' # __params__ = {'key': str} # # @staticmethod # def resolve_key(key,", "# # # # TODO: zhill - Just jotted down these notes for", "def get_op(self, name): return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args,", "{ # 'key': str, # 'pattern': str, # } # # class EqualsTrigger(BaseTrigger):", "obj) # # # # # TODO: zhill - Just jotted down these", "= image_obj # for k in key_components: # obj = model.get_lookup(k, obj) #", "an operator name to a tuple of (bool, function) where arg 0 is", "ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__ = {'key': str} # # class", "class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__ =", "ops: a dict of string keys mapped to CheckOperation tuples \"\"\" self.ops =", "{}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args and args[0]: parts = map(lambda x:", "args[0].split(',')) return not bool(filter(lambda x: x not in self.attrs, parts)) else: return False", "__trigger_name__ = 'equals' # __params__ = { # 'key': str, # 'value': str", "resolve_key(key, image_obj): # \"\"\" # Resolves a text key to a specific attribute", "Map of tuples from an operator name to a tuple of (bool, function)", "very generic condition validator. Child classes can override the __conditions__ list for different", "'result_1': str, # 'gate_2': str, # 'trigger_2': str, # 'result_2': str # }", "# \"\"\" # __gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__ =", "{}. Must be $image.p1.p2.p3...pN') # else: # key_components.pop() # # obj = image_obj", "args and args[0]: parts = map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda x:", "Gate # # # class MetadataConditionGate(Gate): # \"\"\" # A generic conditional check", "{ # 'key': str, # 'value': str # } # # class NotExists(BaseTrigger):", "def resolve_key(key, image_obj): # \"\"\" # Resolves a text key to a specific", "need to add statement Ids to the language to facilitate # # direct", "# 'result_2': str # } # # class OrTrigger(BaseTrigger): # __trigger_name__ = 'or'", "for future work # # # Powerful, but need to ensure consistency, may", "and arg 1 is function taking 2 args to return evaluation def __init__(self,", "'pattern': str, # } # # class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' #", "if key_components[0] != '$image': # raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN')", "args[0]: return args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate #", "taking 2 args to return evaluation def __init__(self, ops): \"\"\" :param ops: a", "is required and arg 1 is function taking 2 args to return evaluation", "'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args and args[0]: parts = map(lambda", "'value': str # } # # class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' #", "class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__ = { # 'gate_1': str,", "# obj = image_obj # for k in key_components: # obj = model.get_lookup(k,", "anchore_engine.services.policy_engine.engine.policy.params import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self,", "XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__ = { # 'gate_1': str, #", "$image.p1.p2.p3...pN') # else: # key_components.pop() # # obj = image_obj # for k", "return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # # class MetadataConditionGate(Gate): #", "# Resolves a key to a specific image element and retrieves it from", "a tuple of (bool, function) where arg 0 is whether an rvalue is", "str, # 'gate_2': str, # 'trigger_2': str, # 'result_2': str # } #", "# # # Powerful, but need to ensure consistency, may need to add", "# # class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__ = { #", "# 'key': str, # 'value': str # } # # class NotExists(BaseTrigger): #", "NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__ = { # 'gate_1': str, #", "str} # # @staticmethod # def resolve_key(key, image_obj): # \"\"\" # Resolves a", "!= '$image': # raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN') # else:", ":param image_obj: # :return: # \"\"\" # # Resolves a key to a", "different values. \"\"\" # Map of tuples from an operator name to a", "to add statement Ids to the language to facilitate # # direct references", "is function taking 2 args to return evaluation def __init__(self, ops): \"\"\" :param", "= model.get_lookup(k, obj) # # # # # TODO: zhill - Just jotted", "string keys mapped to CheckOperation tuples \"\"\" self.ops = ops def get_op(self, name):", "args to return evaluation def __init__(self, ops): \"\"\" :param ops: a dict of", "image element and retrieves it from the image object # key_components = key.split('.')", "image.dockerfile_contents['from'] # # # :param key: # :param image_obj: # :return: # \"\"\"", "required and arg 1 is function taking 2 args to return evaluation def", "# # class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__ = {'key': str}", "name): return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if", "here # class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger): #", "AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__ = { # 'gate_1': str, #", "arg 0 is whether an rvalue is required and arg 1 is function", "} # # class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__ = {", "# class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__ = { # 'key':", "import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs):", "self.attrs = attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if", "return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args and args[0]: return args[0].strip()", "these notes for future work # # # Powerful, but need to ensure", "# 'trigger_2': str, # 'result_2': str # } # # class OrTrigger(BaseTrigger): #", "parts)) else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very", "# A generic conditional check gate on specific data items in the image", "# # direct references here # class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' #", "text key to a specific attribute of an image and returns it. #", "= 'or' # __params__ = { # 'gate_1': str, # 'trigger_1': str, #", "image_obj): # \"\"\" # Resolves a text key to a specific attribute of", "__params__ = {'key': str} # # class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' #", "name to a tuple of (bool, function) where arg 0 is whether an", "# 'trigger_1': str, # 'result_1': str, # 'gate_2': str, # 'trigger_2': str, #", "image metadata. # \"\"\" # __gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger): #", "str, # } # # class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__", "\"\"\" # Resolves a text key to a specific attribute of an image", "# 'result_2': str # } # # class NotTrigger(BaseTrigger): # __trigger_name__ = 'not'", "values. \"\"\" # Map of tuples from an operator name to a tuple", "key_components = key.split('.') # if key_components[0] != '$image': # raise ValueError('Invalid key format:", "# obj = model.get_lookup(k, obj) # # # # # TODO: zhill -", "obj = image_obj # for k in key_components: # obj = model.get_lookup(k, obj)", "**kwargs): if args and args[0]: parts = map(lambda x: x.strip(), args[0].split(',')) return not", "key: # :param image_obj: # :return: # \"\"\" # # Resolves a key", "str} # # class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__ = {", "# Map of tuples from an operator name to a tuple of (bool,", "to a tuple of (bool, function) where arg 0 is whether an rvalue", "and returns it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param", "work # # # Powerful, but need to ensure consistency, may need to", "*args, **kwargs): if args and args[0]: parts = map(lambda x: x.strip(), args[0].split(',')) return", "facilitate # # direct references here # class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner'", "= { # 'key': str, # 'pattern': str, # } # # class", "# 'gate_2': str, # 'trigger_2': str, # 'result_2': str # } # #", "if args and args[0]: return args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import", "# __trigger_name__ = 'or' # __params__ = { # 'gate_1': str, # 'trigger_1':", "to return evaluation def __init__(self, ops): \"\"\" :param ops: a dict of string", "to ensure consistency, may need to add statement Ids to the language to", "# __trigger_name__ = 'not' # __params__ = { # 'gate_1': str, # 'trigger_1':", "Ids to the language to facilitate # # direct references here # class", "import namedtuple from anchore_engine.services.policy_engine.engine.policy.params import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class", "# Powerful, but need to ensure consistency, may need to add statement Ids", "# # # class MetadataConditionGate(Gate): # \"\"\" # A generic conditional check gate", "= map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda x: x not in self.attrs,", "ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN') # else: # key_components.pop() # #", "specific image element and retrieves it from the image object # key_components =", "is whether an rvalue is required and arg 1 is function taking 2", "key to a specific attribute of an image and returns it. # Examples:", "# Resolves a text key to a specific attribute of an image and", "collections import namedtuple from anchore_engine.services.policy_engine.engine.policy.params import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger", "keys mapped to CheckOperation tuples \"\"\" self.ops = ops def get_op(self, name): return", "the image metadata. # \"\"\" # __gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger):", "map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda x: x not in self.attrs, parts))", "to CheckOperation tuples \"\"\" self.ops = ops def get_op(self, name): return self.ops[name] def", "{'key': str} # # class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__ =", "1 is function taking 2 args to return evaluation def __init__(self, ops): \"\"\"", "return not bool(filter(lambda x: x not in self.attrs, parts)) else: return False CheckOperation", "-> image.dockerfile_contents['from'] # # # :param key: # :param image_obj: # :return: #", "of tuples from an operator name to a tuple of (bool, function) where", "= 'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__ =", "# __params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1': str,", "} # # class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__ = {", "to the language to facilitate # # direct references here # class BooleanOperatorGate(Gate):", "return evaluation def __init__(self, ops): \"\"\" :param ops: a dict of string keys", "def __init__(self, attrs): self.attrs = attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self,", "future work # # # Powerful, but need to ensure consistency, may need", "class MetadataConditionGate(Gate): # \"\"\" # A generic conditional check gate on specific data", "def __init__(self, ops): \"\"\" :param ops: a dict of string keys mapped to", "# $image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param key: # :param image_obj: #", "class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__ = { # 'gate_1': str,", "= namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic condition validator. Child classes", "evaluation def __init__(self, ops): \"\"\" :param ops: a dict of string keys mapped", "BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__ = 'and'", "gate on specific data items in the image metadata. # \"\"\" # __gate_name__", "InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs", "# class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__ = { # 'gate_1':", "'and' # __params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1':", "__trigger_name__ = 'or' # __params__ = { # 'gate_1': str, # 'trigger_1': str,", "a specific image element and retrieves it from the image object # key_components", "# # class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__ = { #", "namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic condition validator. Child classes can", "'xor' # __params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1':", "\"\"\" A very generic condition validator. Child classes can override the __conditions__ list", "class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__ = {'key': str} # #", "anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs", "key_components[0] != '$image': # raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN') #", "image object # key_components = key.split('.') # if key_components[0] != '$image': # raise", "__conditions__ list for different values. \"\"\" # Map of tuples from an operator", "returns it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param key:", "# __params__ = { # 'key': str, # 'value': str # } #", "'like_match' # __params__ = { # 'key': str, # 'pattern': str, # }", "conditional check gate on specific data items in the image metadata. # \"\"\"", "# # Resolves a key to a specific image element and retrieves it", "{}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args and args[0]: return args[0].strip() in self.ops.keys()", "def __call__(self, *args, **kwargs): if args and args[0]: return args[0].strip() in self.ops.keys() return", "str, # 'trigger_2': str, # 'result_2': str # } # # class XorTrigger(BaseTrigger):", "__call__(self, *args, **kwargs): if args and args[0]: return args[0].strip() in self.ops.keys() return False", "from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs =", "# # obj = image_obj # for k in key_components: # obj =", "attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args and", "GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs def validation_criteria(self): return", "False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # # class MetadataConditionGate(Gate): # \"\"\"", "# 'pattern': str, # } # # class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals'", "a key to a specific image element and retrieves it from the image", "Powerful, but need to ensure consistency, may need to add statement Ids to", "where arg 0 is whether an rvalue is required and arg 1 is", "of an image and returns it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] #", "str # } # # class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__", "# # # TODO: zhill - Just jotted down these notes for future", "be $image.p1.p2.p3...pN') # else: # key_components.pop() # # obj = image_obj # for", "# if key_components[0] != '$image': # raise ValueError('Invalid key format: {}. Must be", "k in key_components: # obj = model.get_lookup(k, obj) # # # # #", "# __params__ = { # 'key': str, # 'pattern': str, # } #", "object # key_components = key.split('.') # if key_components[0] != '$image': # raise ValueError('Invalid", "# # :param key: # :param image_obj: # :return: # \"\"\" # #", "__params__ = { # 'key': str, # 'pattern': str, # } # #", "__trigger_name__ = 'like_match' # __params__ = { # 'key': str, # 'pattern': str,", "# __trigger_name__ = 'xor' # __params__ = { # 'gate_1': str, # 'trigger_1':", "'equals' # __params__ = { # 'key': str, # 'value': str # }", "__params__ = { # 'key': str, # 'value': str # } # #", "classes can override the __conditions__ list for different values. \"\"\" # Map of", "0 is whether an rvalue is required and arg 1 is function taking", "# # class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__ = {'key': str}", "args[0]: parts = map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda x: x not", "raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN') # else: # key_components.pop() #", "mapped to CheckOperation tuples \"\"\" self.ops = ops def get_op(self, name): return self.ops[name]", "# 'value': str # } # # class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists'", "= ops def get_op(self, name): return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def", "'trigger_1': str, # 'result_1': str, # 'gate_2': str, # 'trigger_2': str, # 'result_2':", "= { # 'gate_1': str, # 'trigger_1': str, # 'result_1': str, # 'gate_2':", "the __conditions__ list for different values. \"\"\" # Map of tuples from an", "__trigger_name__ = 'xor' # __params__ = { # 'gate_1': str, # 'trigger_1': str,", "x: x.strip(), args[0].split(',')) return not bool(filter(lambda x: x not in self.attrs, parts)) else:", "zhill - Just jotted down these notes for future work # # #", "and args[0]: parts = map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda x: x", ":return: # \"\"\" # # Resolves a key to a specific image element", "'trigger_2': str, # 'result_2': str # } # # class NotTrigger(BaseTrigger): # __trigger_name__", "CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\" A very generic condition validator. Child", "*args, **kwargs): if args and args[0]: return args[0].strip() in self.ops.keys() return False from", "str # } # # class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__", "} # # class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__ = {", ":param ops: a dict of string keys mapped to CheckOperation tuples \"\"\" self.ops", "in the image metadata. # \"\"\" # __gate_name__ = 'attribute_condition' # # class", "return args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # #", "__gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' # __params__", "# class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__ = { # 'key':", "class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__ = { # 'key': str,", "override the __conditions__ list for different values. \"\"\" # Map of tuples from", "and retrieves it from the image object # key_components = key.split('.') # if", "args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # #", "check gate on specific data items in the image metadata. # \"\"\" #", "get_op(self, name): return self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs):", "to a specific attribute of an image and returns it. # Examples: #", "key to a specific image element and retrieves it from the image object", "# def resolve_key(key, image_obj): # \"\"\" # Resolves a text key to a", "# 'trigger_2': str, # 'result_2': str # } # # class XorTrigger(BaseTrigger): #", "BaseTrigger, Gate # # # class MetadataConditionGate(Gate): # \"\"\" # A generic conditional", "from anchore_engine.services.policy_engine.engine.policy.params import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger class AttributeListValidator(InputValidator): def", "# __gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' #", "# for k in key_components: # obj = model.get_lookup(k, obj) # # #", "# direct references here # class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # #", "the language to facilitate # # direct references here # class BooleanOperatorGate(Gate): #", "'result_2': str # } # # class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' #", "__params__ = {'key': str} # # @staticmethod # def resolve_key(key, image_obj): # \"\"\"", "class AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs))", "function taking 2 args to return evaluation def __init__(self, ops): \"\"\" :param ops:", "# :return: # \"\"\" # # Resolves a key to a specific image", "- Just jotted down these notes for future work # # # Powerful,", "str, # 'result_2': str # } # # class OrTrigger(BaseTrigger): # __trigger_name__ =", "not bool(filter(lambda x: x not in self.attrs, parts)) else: return False CheckOperation =", "self.ops[name] def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args and", "add statement Ids to the language to facilitate # # direct references here", "# __gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists' #", "2 args to return evaluation def __init__(self, ops): \"\"\" :param ops: a dict", "'$image': # raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN') # else: #", "key.split('.') # if key_components[0] != '$image': # raise ValueError('Invalid key format: {}. Must", "data items in the image metadata. # \"\"\" # __gate_name__ = 'attribute_condition' #", "= 'like_match' # __params__ = { # 'key': str, # 'pattern': str, #", "attrs): self.attrs = attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs):", "= {'key': str} # # class LikeTrigger(BaseTrigger): # __trigger_name__ = 'like_match' # __params__", "specific attribute of an image and returns it. # Examples: # $image.dockerfile.from ->", "# key_components.pop() # # obj = image_obj # for k in key_components: #", "direct references here # class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # # class", "specific data items in the image metadata. # \"\"\" # __gate_name__ = 'attribute_condition'", "ops): \"\"\" :param ops: a dict of string keys mapped to CheckOperation tuples", "parts = map(lambda x: x.strip(), args[0].split(',')) return not bool(filter(lambda x: x not in", "'key': str, # 'pattern': str, # } # # class EqualsTrigger(BaseTrigger): # __trigger_name__", "condition validator. Child classes can override the __conditions__ list for different values. \"\"\"", "__trigger_name__ = 'exists' # __params__ = {'key': str} # # class LikeTrigger(BaseTrigger): #", "and args[0]: return args[0].strip() in self.ops.keys() return False from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate", "# \"\"\" # # Resolves a key to a specific image element and", "# __trigger_name__ = 'not_exists' # __params__ = {'key': str} # # @staticmethod #", "# __params__ = {'key': str} # # @staticmethod # def resolve_key(key, image_obj): #", "obj = model.get_lookup(k, obj) # # # # # TODO: zhill - Just", "language to facilitate # # direct references here # class BooleanOperatorGate(Gate): # __gate_name__", "\"\"\" self.ops = ops def get_op(self, name): return self.ops[name] def validation_criteria(self): return 'In:", "def validation_criteria(self): return 'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args and args[0]:", "'result_2': str # } # # class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' #", "# class OrTrigger(BaseTrigger): # __trigger_name__ = 'or' # __params__ = { # 'gate_1':", "__params__ = { # 'gate_1': str, # 'trigger_1': str, # 'result_1': str, #", "= 'exists' # __params__ = {'key': str} # # class LikeTrigger(BaseTrigger): # __trigger_name__", "= 'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__ = 'and' # __params__ =", "$image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param key: # :param image_obj: # :return:", "# # class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__ = { #", "# class NotTrigger(BaseTrigger): # __trigger_name__ = 'not' # __params__ = { # 'gate_1':", "# __trigger_name__ = 'exists' # __params__ = {'key': str} # # class LikeTrigger(BaseTrigger):", "whether an rvalue is required and arg 1 is function taking 2 args", "# } # # class EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__ =", "AttributeListValidator(InputValidator): def __init__(self, attrs): self.attrs = attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def", "= key.split('.') # if key_components[0] != '$image': # raise ValueError('Invalid key format: {}.", "class XorTrigger(BaseTrigger): # __trigger_name__ = 'xor' # __params__ = { # 'gate_1': str,", "# 'key': str, # 'pattern': str, # } # # class EqualsTrigger(BaseTrigger): #", "# 'trigger_2': str, # 'result_2': str # } # # class NotTrigger(BaseTrigger): #", "class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__ = {'key': str} # #", "# class BooleanOperatorGate(Gate): # __gate_name__ = 'combiner' # # class AndTrigger(BaseTrigger): # __trigger_name__", "__init__(self, ops): \"\"\" :param ops: a dict of string keys mapped to CheckOperation", "# } # # class NotExists(BaseTrigger): # __trigger_name__ = 'not_exists' # __params__ =", "may need to add statement Ids to the language to facilitate # #", "__trigger_name__ = 'and' # __params__ = { # 'gate_1': str, # 'trigger_1': str,", "# 'gate_1': str, # 'trigger_1': str, # 'result_1': str, # 'gate_2': str, #", "from collections import namedtuple from anchore_engine.services.policy_engine.engine.policy.params import InputValidator from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta,", "(bool, function) where arg 0 is whether an rvalue is required and arg", "__init__(self, attrs): self.attrs = attrs def validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args,", "'gate_1': str, # 'trigger_1': str, # 'result_1': str, # 'gate_2': str, # 'trigger_2':", "str, # 'result_2': str # } # # class NotTrigger(BaseTrigger): # __trigger_name__ =", "'In: {}'.format(','.join(self.ops.keys())) def __call__(self, *args, **kwargs): if args and args[0]: return args[0].strip() in", "\"\"\" # __gate_name__ = 'attribute_condition' # # class ExistsTrigger(BaseTrigger): # __trigger_name__ = 'exists'", "from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate # # # class MetadataConditionGate(Gate): # \"\"\" #", "str, # 'trigger_2': str, # 'result_2': str # } # # class OrTrigger(BaseTrigger):", "image and returns it. # Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # # #", "str, # 'trigger_2': str, # 'result_2': str # } # # class NotTrigger(BaseTrigger):", "'gate_1': str, # 'trigger_1': str, # 'result_1': str # } # # #", "in self.attrs, parts)) else: return False CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function']) class CheckOperations(InputValidator): \"\"\"", "to a specific image element and retrieves it from the image object #", "EqualsTrigger(BaseTrigger): # __trigger_name__ = 'equals' # __params__ = { # 'key': str, #", "in key_components: # obj = model.get_lookup(k, obj) # # # # # TODO:", "Examples: # $image.dockerfile.from -> image.dockerfile_contents['from'] # # # :param key: # :param image_obj:", "validation_criteria(self): return 'In: {}'.format(','.join(self.attrs)) def __call__(self, *args, **kwargs): if args and args[0]: parts", ":param key: # :param image_obj: # :return: # \"\"\" # # Resolves a", "str, # 'result_2': str # } # # class XorTrigger(BaseTrigger): # __trigger_name__ =" ]
[ "of self if id is not None and id >= cls._next_id: self._id =", "integer larger than any of IDs for this type of object assigned so", "\"\"\" cls = type(self) # the actual (most specific) class of self if", "and task manager CC-Share Alike 2012 © The Wyrd In team https://github.com/WyrdIn \"\"\"", "and id >= cls._next_id: self._id = id else: self._id = cls._next_id cls._next_id =", "specific) class of self if id is not None and id >= cls._next_id:", "larger than any of IDs for this type of object assigned so far", "code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and task manager", "DBObject(object): _next_id = 0 def __init__(self, id=None): \"\"\"Creates a new database-enabled object. Keyword", "string which identifies the object and its type within the set of objects", "it has to be non-negative integer larger than any of IDs for this", "See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and task manager CC-Share Alike 2012", "= self._id + 1 @property def id(self): return self._id def short_repr(self): \"\"\"Returns a", "arguments: - id: an ID (a number) of the object, if a specific", "\"\"\"Returns a short string which identifies the object and its type within the", "ID (a number) of the object, if a specific one is required; if", "is not None and id >= cls._next_id: self._id = id else: self._id =", "1 @property def id(self): return self._id def short_repr(self): \"\"\"Returns a short string which", "supplied, it has to be non-negative integer larger than any of IDs for", "WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not implement the ' \"required method `short_repr'.\").format(", "In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0 def __init__(self, id=None): \"\"\"Creates", "-*- # This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker", "Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0 def __init__(self, id=None):", "type(self) # the actual (most specific) class of self if id is not", "The Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0 def __init__(self,", "objects created in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not implement the", "self if id is not None and id >= cls._next_id: self._id = id", "else: self._id = cls._next_id cls._next_id = self._id + 1 @property def id(self): return", "cls._next_id cls._next_id = self._id + 1 @property def id(self): return self._id def short_repr(self):", "type within the set of objects created in this WyrdIn application. \"\"\" raise", "new database-enabled object. Keyword arguments: - id: an ID (a number) of the", "id: an ID (a number) of the object, if a specific one is", "set of objects created in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not", "\"\"\" class DBObject(object): _next_id = 0 def __init__(self, id=None): \"\"\"Creates a new database-enabled", "the object and its type within the set of objects created in this", "This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and task", "is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and task manager CC-Share", "Keyword arguments: - id: an ID (a number) of the object, if a", "if a specific one is required; if ID is supplied, it has to", "(a number) of the object, if a specific one is required; if ID", "None and id >= cls._next_id: self._id = id else: self._id = cls._next_id cls._next_id", ">= cls._next_id: self._id = id else: self._id = cls._next_id cls._next_id = self._id +", "manager CC-Share Alike 2012 © The Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object):", "def id(self): return self._id def short_repr(self): \"\"\"Returns a short string which identifies the", "2012 © The Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0", "(most specific) class of self if id is not None and id >=", "return self._id def short_repr(self): \"\"\"Returns a short string which identifies the object and", "within the set of objects created in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls}", "def short_repr(self): \"\"\"Returns a short string which identifies the object and its type", "\"\"\" Wyrd In: Time tracker and task manager CC-Share Alike 2012 © The", "cls._next_id = self._id + 1 @property def id(self): return self._id def short_repr(self): \"\"\"Returns", "the set of objects created in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does", "In: Time tracker and task manager CC-Share Alike 2012 © The Wyrd In", "ID is supplied, it has to be non-negative integer larger than any of", "assigned so far \"\"\" cls = type(self) # the actual (most specific) class", "= type(self) # the actual (most specific) class of self if id is", "application. \"\"\" raise NotImplementedError(('{cls} does not implement the ' \"required method `short_repr'.\").format( cls=type(self).__name__))", "if ID is supplied, it has to be non-negative integer larger than any", "self._id = cls._next_id cls._next_id = self._id + 1 @property def id(self): return self._id", "of object assigned so far \"\"\" cls = type(self) # the actual (most", "the actual (most specific) class of self if id is not None and", "is required; if ID is supplied, it has to be non-negative integer larger", "IDs for this type of object assigned so far \"\"\" cls = type(self)", "and its type within the set of objects created in this WyrdIn application.", "self._id def short_repr(self): \"\"\"Returns a short string which identifies the object and its", "= cls._next_id cls._next_id = self._id + 1 @property def id(self): return self._id def", "short string which identifies the object and its type within the set of", "#!/usr/bin/python3 #-*- coding: utf-8 -*- # This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\"", "which identifies the object and its type within the set of objects created", "# the actual (most specific) class of self if id is not None", "its type within the set of objects created in this WyrdIn application. \"\"\"", "© The Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0 def", "= 0 def __init__(self, id=None): \"\"\"Creates a new database-enabled object. Keyword arguments: -", "Wyrd In: Time tracker and task manager CC-Share Alike 2012 © The Wyrd", "for this type of object assigned so far \"\"\" cls = type(self) #", "+ 1 @property def id(self): return self._id def short_repr(self): \"\"\"Returns a short string", "https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0 def __init__(self, id=None): \"\"\"Creates a new", "# This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and", "id(self): return self._id def short_repr(self): \"\"\"Returns a short string which identifies the object", "= id else: self._id = cls._next_id cls._next_id = self._id + 1 @property def", "short_repr(self): \"\"\"Returns a short string which identifies the object and its type within", "class DBObject(object): _next_id = 0 def __init__(self, id=None): \"\"\"Creates a new database-enabled object.", "a specific one is required; if ID is supplied, it has to be", "be non-negative integer larger than any of IDs for this type of object", "than any of IDs for this type of object assigned so far \"\"\"", "http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and task manager CC-Share Alike 2012 ©", "PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time tracker and task manager CC-Share Alike", "has to be non-negative integer larger than any of IDs for this type", "so far \"\"\" cls = type(self) # the actual (most specific) class of", "coding: utf-8 -*- # This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In:", "id is not None and id >= cls._next_id: self._id = id else: self._id", "id >= cls._next_id: self._id = id else: self._id = cls._next_id cls._next_id = self._id", "cls._next_id: self._id = id else: self._id = cls._next_id cls._next_id = self._id + 1", "<filename>wyrdin/core/backend/generic.py #!/usr/bin/python3 #-*- coding: utf-8 -*- # This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/.", "object, if a specific one is required; if ID is supplied, it has", "of the object, if a specific one is required; if ID is supplied,", "class of self if id is not None and id >= cls._next_id: self._id", "CC-Share Alike 2012 © The Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id", "this type of object assigned so far \"\"\" cls = type(self) # the", "object and its type within the set of objects created in this WyrdIn", "0 def __init__(self, id=None): \"\"\"Creates a new database-enabled object. Keyword arguments: - id:", "to be non-negative integer larger than any of IDs for this type of", "id=None): \"\"\"Creates a new database-enabled object. Keyword arguments: - id: an ID (a", "an ID (a number) of the object, if a specific one is required;", "object assigned so far \"\"\" cls = type(self) # the actual (most specific)", "type of object assigned so far \"\"\" cls = type(self) # the actual", "specific one is required; if ID is supplied, it has to be non-negative", "the object, if a specific one is required; if ID is supplied, it", "tracker and task manager CC-Share Alike 2012 © The Wyrd In team https://github.com/WyrdIn", "if id is not None and id >= cls._next_id: self._id = id else:", "a short string which identifies the object and its type within the set", "required; if ID is supplied, it has to be non-negative integer larger than", "id else: self._id = cls._next_id cls._next_id = self._id + 1 @property def id(self):", "self._id + 1 @property def id(self): return self._id def short_repr(self): \"\"\"Returns a short", "Time tracker and task manager CC-Share Alike 2012 © The Wyrd In team", "self._id = id else: self._id = cls._next_id cls._next_id = self._id + 1 @property", "Alike 2012 © The Wyrd In team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id =", "utf-8 -*- # This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd In: Time", "number) of the object, if a specific one is required; if ID is", "_next_id = 0 def __init__(self, id=None): \"\"\"Creates a new database-enabled object. Keyword arguments:", "one is required; if ID is supplied, it has to be non-negative integer", "of objects created in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not implement", "cls = type(self) # the actual (most specific) class of self if id", "any of IDs for this type of object assigned so far \"\"\" cls", "def __init__(self, id=None): \"\"\"Creates a new database-enabled object. Keyword arguments: - id: an", "__init__(self, id=None): \"\"\"Creates a new database-enabled object. Keyword arguments: - id: an ID", "team https://github.com/WyrdIn \"\"\" class DBObject(object): _next_id = 0 def __init__(self, id=None): \"\"\"Creates a", "\"\"\"Creates a new database-enabled object. Keyword arguments: - id: an ID (a number)", "created in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not implement the '", "in this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not implement the ' \"required", "task manager CC-Share Alike 2012 © The Wyrd In team https://github.com/WyrdIn \"\"\" class", "- id: an ID (a number) of the object, if a specific one", "of IDs for this type of object assigned so far \"\"\" cls =", "this WyrdIn application. \"\"\" raise NotImplementedError(('{cls} does not implement the ' \"required method", "a new database-enabled object. Keyword arguments: - id: an ID (a number) of", "database-enabled object. Keyword arguments: - id: an ID (a number) of the object,", "@property def id(self): return self._id def short_repr(self): \"\"\"Returns a short string which identifies", "is supplied, it has to be non-negative integer larger than any of IDs", "actual (most specific) class of self if id is not None and id", "identifies the object and its type within the set of objects created in", "object. Keyword arguments: - id: an ID (a number) of the object, if", "not None and id >= cls._next_id: self._id = id else: self._id = cls._next_id", "non-negative integer larger than any of IDs for this type of object assigned", "#-*- coding: utf-8 -*- # This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/. \"\"\" Wyrd", "far \"\"\" cls = type(self) # the actual (most specific) class of self" ]
[ "from output.models.saxon_data.id.id011_xsd.id011 import ( Doc, Para, ) __all__ = [ \"Doc\", \"Para\", ]", "<filename>output/models/saxon_data/id/id011_xsd/__init__.py from output.models.saxon_data.id.id011_xsd.id011 import ( Doc, Para, ) __all__ = [ \"Doc\", \"Para\"," ]
[ "node = lista._head while not node == None: n = str(node._element) + n", "self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements())", "self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10)", "ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll", "= LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self):", "= '' node = lista._head while not node == None: n = str(node._element)", "= LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer()", "LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements())", "= LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll =", "= LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3))", "get_number_from_list(lista): n = '' node = lista._head while not node == None: n", "n = '' node = lista._head while not node == None: n =", "self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top())", "ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10)", "test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6))", "+ n node = node._next return int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1)", "def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements())", "self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4))", "ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self):", "ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def", "n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1 + n2 l3 =", "n2 = get_number_from_list(l2) n3 = n1 + n2 l3 = populate_liste(n3) return l3", "ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList()", "ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12)", "self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10)", "ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5)", "'' node = lista._head while not node == None: n = str(node._element) +", "test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def", "def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def", "ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList()", "ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8)", "self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2))", "while not node == None: n = str(node._element) + n node = node._next", "import LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty())", "self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements())", "l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self):", "ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop())", "test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements())", "ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5)", "def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements())", "<reponame>jrandson/data-structures<gh_stars>0 import unittest from linked_list import LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll =", "ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7)", "node._next return int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3", "ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513)", "self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self):", "test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll =", "lista._head while not node == None: n = str(node._element) + n node =", "self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList()", "== None: n = str(node._element) + n node = node._next return int(n) def", "self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295)", "= lista._head while not node == None: n = str(node._element) + n node", "self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements())", "= populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4))", "ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8)", "ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3)", "ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll =", "def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll", "test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll =", "self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10)", "ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll", "LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for i", "self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for i in str(n):", "ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3)", "self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def", "LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5))", "def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll", "def populate_liste(n): ll = LinkedList() for i in str(n): ll.add_node(i) return ll def", "= get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1 + n2 l3 = populate_liste(n3)", "LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def", "def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll", "ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8)", "populate_liste(n): ll = LinkedList() for i in str(n): ll.add_node(i) return ll def get_number_from_list(lista):", "from linked_list import LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements())", "def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10)", "self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList()", "self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList()", "test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def", "ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList()", "ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll =", "= node._next return int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2)", "self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2))", "= populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll", "self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for i in str(n): ll.add_node(i)", "ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6)", "LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll =", "def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self):", "ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements())", "ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements())", "self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3))", "n node = node._next return int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2", "self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements())", "test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12)", "ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1))", "test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll", "self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for i in", "ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self):", "LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1", "n1 + n2 l3 = populate_liste(n3) return l3 if __name__ == '__main__': unittest.main()", "LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top())", "ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def", "populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll =", "= LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll", "self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def", "ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList()", "self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10)", "ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 =", "ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def", "def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1 +", "self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self):", "n = str(node._element) + n node = node._next return int(n) def soma_lista(l1, l2):", "= LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList()", "test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self):", "test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12)", "unittest from linked_list import LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty())", "ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12)", "ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10)", "LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList()", "soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1 + n2", "not node == None: n = str(node._element) + n node = node._next return", "import unittest from linked_list import LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList()", "ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll", "LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements())", "ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll", "self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8)", "LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements())", "str(node._element) + n node = node._next return int(n) def soma_lista(l1, l2): n1 =", "self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8)", "get_number_from_list(l2) n3 = n1 + n2 l3 = populate_liste(n3) return l3 if __name__", "ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop()) self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8)", "self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3))", "self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n):", "ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self):", "ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8)", "ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self):", "ll def get_number_from_list(lista): n = '' node = lista._head while not node ==", "ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5)", "self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1))", "self.assertEqual('',ll.show_elements()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8)", "i in str(n): ll.add_node(i) return ll def get_number_from_list(lista): n = '' node =", "= LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer()", "ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements())", "ll = LinkedList() for i in str(n): ll.add_node(i) return ll def get_number_from_list(lista): n", "self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2))", "LinkedList() for i in str(n): ll.add_node(i) return ll def get_number_from_list(lista): n = ''", "= LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def test_pop(self): ll = LinkedList() self.assertEqual(None,ll.pop())", "ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2", "in str(n): ll.add_node(i) return ll def get_number_from_list(lista): n = '' node = lista._head", "= str(node._element) + n node = node._next return int(n) def soma_lista(l1, l2): n1", "ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3)", "None: n = str(node._element) + n node = node._next return int(n) def soma_lista(l1,", "ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8)", "ll.add_node(i) return ll def get_number_from_list(lista): n = '' node = lista._head while not", "ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.top()) def", "self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12)", "self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for i in str(n): ll.add_node(i) return ll", "ll.add_node(8) ll.add_new_tail(5) self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5)", "self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1)) self.assertEqual('5->4->3->2->1->',ll.add_sort(2)) def", "n3 = n1 + n2 l3 = populate_liste(n3) return l3 if __name__ ==", "self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for i in str(n): ll.add_node(i) return", "return ll def get_number_from_list(lista): n = '' node = lista._head while not node", "int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1", "LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1)", "ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll =", "ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll =", "l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5))", "= LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll = LinkedList() for", "LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_without_buffer() self.assertEqual('3->12->8->10->',ll.show_elements())", "= LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12) self.assertEqual('8->5->',ll.show_elements()) ll.remove_node(5) self.assertEqual('8->',ll.show_elements())", "str(n): ll.add_node(i) return ll def get_number_from_list(lista): n = '' node = lista._head while", "= get_number_from_list(l2) n3 = n1 + n2 l3 = populate_liste(n3) return l3 if", "self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements())", "def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10)", "= LinkedList() for i in str(n): ll.add_node(i) return ll def get_number_from_list(lista): n =", "self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements())", "self.assertEqual(8,ll.pop()) self.assertEqual('10->5->',ll.show_elements()) def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10)", "ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12)", "ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 = populate_liste(513) self.assertEqual('3->1->5->',l1.show_elements())", "node == None: n = str(node._element) + n node = node._next return int(n)", "for i in str(n): ll.add_node(i) return ll def get_number_from_list(lista): n = '' node", "linked_list import LinkedList class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10)", "ll.remove_node(5) self.assertEqual('8->',ll.show_elements()) def test_remove_repeated_with_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_with_buffer()", "ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1)) self.assertEqual([10],ll.find_last_n(6)) def test_soma(self): l1 =", "= n1 + n2 l3 = populate_liste(n3) return l3 if __name__ == '__main__':", "test_add_sort2(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort2(3)) self.assertEqual('3->5->',ll.add_sort2(5)) self.assertEqual('3->4->5->',ll.add_sort2(4)) self.assertEqual('1->3->4->5->',ll.add_sort2(1)) self.assertEqual('1->2->3->4->5->',ll.add_sort2(2)) def populate_liste(n): ll =", "self.assertEqual('3->1->5->',l1.show_elements()) self.assertEqual(513,get_number_from_list(l1)) l2 = populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList()", "ll.add_new_tail(5) self.assertEqual('8->10->5->',ll.show_elements()) ll.add_node(6) self.assertEqual('6->8->10->5->',ll.show_elements()) def test_top(self): ll = LinkedList() self.assertEqual(None,ll.top()) ll.add_node(10) ll.add_node(8) ll.add_new_tail(5)", "def get_number_from_list(lista): n = '' node = lista._head while not node == None:", "class LinkListTest(unittest.TestCase): def test_add_node(self): ll = LinkedList() self.assertEqual(True,ll.is_empty()) self.assertEqual('',ll.show_elements()) ll.add_node(10) self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8)", "l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1 + n2 l3", "ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer()", "populate_liste(295) self.assertEqual('5->9->2->',l2.show_elements()) self.assertEqual(295,get_number_from_list(l2)) self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements()) def test_add_sort(self): ll = LinkedList() self.assertEqual('3->',ll.add_sort(3)) self.assertEqual('5->3->',ll.add_sort(5)) self.assertEqual('5->4->3->',ll.add_sort(4)) self.assertEqual('5->4->3->1->',ll.add_sort(1))", "return int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 = get_number_from_list(l2) n3 =", "def test_remove_node(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_new_tail(5) self.assertEqual('12->8->10->5->',ll.show_elements()) ll.remove_node(10) self.assertEqual('12->8->5->',ll.show_elements()) ll.remove_node(12)", "get_number_from_list(l1) n2 = get_number_from_list(l2) n3 = n1 + n2 l3 = populate_liste(n3) return", "self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(8) self.assertEqual('8->12->8->10->',ll.show_elements()) ll.remove_repeated_without_buffer() self.assertEqual('12->8->10->',ll.show_elements())", "def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_node(12) ll.add_node(3) ll.add_node(5) ll.add_node(7) self.assertEqual([3,12,8,10],ll.find_last_n(3)) self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1))", "node = node._next return int(n) def soma_lista(l1, l2): n1 = get_number_from_list(l1) n2 =", "ll.remove_repeated_with_buffer() self.assertEqual('12->8->10->',ll.show_elements()) ll.add_node(10) ll.add_node(12) ll.add_node(3) ll.remove_repeated_with_buffer() self.assertEqual('3->12->8->10->',ll.show_elements()) def test_remove_repeated_wihtout_buffer(self): ll = LinkedList() ll.add_node(10)", "self.assertEqual(False,ll.is_empty()) self.assertEqual('10->',ll.show_elements()) ll.add_node(8) ll.add_node(1) self.assertEqual('1->8->10->',ll.show_elements()) def test_add_new_tail(self): ll = LinkedList() ll.add_node(10) ll.add_node(8) ll.add_new_tail(5)" ]
[ "0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): #", "= preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path,", "img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的)", ":param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" #", "program end'.format(name=img_path_name)) return 1, None else: # 纠正成功 print('Correctly cut img {name}, exception", "and (area >= 0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i])", "preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name:", "img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img,", "error_names.append(img_name) print('total error number is: ', error_count) print('error images mame :') for error_img_name", "30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\" #", "res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) != 2: # 纠正失败 print('Failed to cut", "and width_sum[i] > 330: start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if", "裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path:", "0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'),", "y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left = [] right =", "sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) #", "图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return:", "number is: ', error_count) print('error images mame :') for error_img_name in error_names: print(error_img_name)", "= cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4 * img.shape[0] * img.shape[1]) and", "cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] +", "return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path)", "if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0 error_names", "22:25 # @Author : <NAME> # @Reference : None # @File : cut_twist_join.py", "return img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent:", "== '__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/' cut_problem_path = './temp_imgs/' #process_img(img_path=origin_img_path, save_path=cutted_save_path,", "= cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))", "rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m =", "= np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]])", "gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用", "is: ', error_count) print('error images mame :') for error_img_name in error_names: print(error_img_name) return", "@File : cut_twist_join.py # @IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。", "右下, 左上, 右上 \"\"\" left = [] right = [] for i in", "end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index +", "kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name", "# 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用", "= i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0]", "cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) #", "2 w_end = width - width_num // 2 - 1 return img[h_start:h_end, w_start:w_end]", "<NAME> # @Reference : None # @File : cut_twist_join.py # @IDE : PyCharm", "= cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh =", "[int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m", "结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img", "point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst =", ":return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path + image_name) # 读取图片 img_gray =", "program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'),", "0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox def process_img(img_path, save_path,", "- height_num // 2 - 1 width_num = int(width * cut_percent) # 需要裁剪的宽度值", "-1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行锐化", "{name}, exception program end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0]", "None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域", "numpy as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param", "(area <= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0]", "problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name):", "return 0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param", "img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] > 330: start_region_flag = 1 start_region_index", "right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up", "def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name:", "+ '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total", "- 1 width_num = int(width * cut_percent) # 需要裁剪的宽度值 w_start = 0 +", "img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果", "调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) !=", "for i in range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if (area <=", "img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name),", "# 如果y点坐标大,则是右上 right_down = right[1] right_up = right[0] else: right_down = right[0] right_up", "countours_res = [] for i in range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积", "None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is not exits,", "== 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1],", "img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域", "[-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1,", "program end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'),", "= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看", "需要裁剪的宽度值 w_start = 0 + width_num // 2 w_end = width - width_num", "np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1,", "锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param", "(5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed", "cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred", "转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1,", "start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1, 11): # 参数可变,分割10个点", ":param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下,", "参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: #", "img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not", "cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel", "img {name}, exception program end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path,", "left = [] right = [] for i in range(4): if bbox[i][0] >", "图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name", "y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left = []", "else: right_down = right[0] right_up = right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上", "0], [-1, 5, -1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'),", "-1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化,", "dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用", "abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0", "这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res", "# 左右等比例切分 h_end = height - height_num // 2 - 1 width_num =", "= right[0] right_up = right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down =", "if width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值", "len(res_bbox) != 2: # 异常处理 print('Error happened when cut img {name}, try exception", "# @Author : <NAME> # @Reference : None # @File : cut_twist_join.py #", "end'.format(name=img_path_name)) return 1, None else: # 纠正成功 print('Correctly cut img {name}, exception program", "right_up = right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up", "img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) #", "y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是", "error_names = [] for img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path,", "img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST,", "= 0 start_region_index = 0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for", "try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path,", "np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0],", "# 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0,", "left_down = left[1] left_up = left[0] else: left_down = left[0] left_up = left[1]", "cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) #", "= cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None,", "= cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None,", "rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up =", "img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\" if not", "原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)", "0 # 强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img:", "- 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): #", "temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30,", "img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1],", "height - height_num // 2 - 1 width_num = int(width * cut_percent) #", "(_, contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = []", "= gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1,", "img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param", "point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2,", "19-11-19 22:25 # @Author : <NAME> # @Reference : None # @File :", "min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original", "cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return", "# 强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域", ":return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours,", "for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and", "< 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original def", "# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox def process_img(img_path, save_path, problem_path):", "[0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst)", "error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp if error_temp ==", "矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上,", "# 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] =", "img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down,", "# sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)", "return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param", "转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return:", "cv2 import numpy as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x,", "# 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy()", "测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if", "cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3,", "img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag", "img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed:", "0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary)", "rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m,", "# 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down =", "输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img =", "!= 2: # 纠正失败 print('Failed to cut img {name}, exception program end'.format(name=img_path_name)) return", "for error_img_name in error_names: print(error_img_name) return if __name__ == '__main__': origin_img_path = './problem_imgs/'", "w_start = 0 + width_num // 2 w_end = width - width_num //", "# 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0],", "* img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box =", "problem_path: 问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name}", ": 19-11-19 22:25 # @Author : <NAME> # @Reference : None # @File", "program break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): #", "= 0 + width_num // 2 w_end = width - width_num // 2", "身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name}", "img_closed = img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) #", "# 如果y点坐标大,则是左上 left_down = left[1] left_up = left[0] else: left_down = left[0] left_up", "\"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed =", "kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀", "出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name): #", "img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox", "os import cv2 import numpy as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param", "# 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用", "img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'),", "\"\"\" height, width, _ = img.shape height_num = int(height * cut_percent) # 需要裁剪的高度值", "img_closed_original[min_line_position][:] = 0 for i in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index", "'_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error", "需要裁剪的高度值 h_start = 0 + height_num // 2 # 左右等比例切分 h_end = height", "= cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))", "= right[0] else: right_down = right[0] right_up = right[1] if left[0][1] > left[1][1]:", "dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient", "if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up = left[0] else:", "# 纠正成功 print('Correctly cut img {name}, exception program end'.format(name=img_path_name)) return 0, res_bbox else:", "import cv2 import numpy as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center:", "判断图片路径是否存在 print('img path {name} is not exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path):", "cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\"", "2 # 左右等比例切分 h_end = height - height_num // 2 - 1 width_num", "np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def", "这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name +", "切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\"", "kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed =", "gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient", "返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径", "exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path):", "img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'):", "= int(height * cut_percent) # 需要裁剪的高度值 h_start = 0 + height_num // 2", "kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary", "\"\"\" import os import cv2 import numpy as np def point_judge(center, bbox): \"\"\"", "w_end = width - width_num // 2 - 1 return img[h_start:h_end, w_start:w_end] #", "如果y点坐标大,则是左上 left_down = left[1] left_up = left[0] else: left_down = left[0] left_up =", "dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0],", "img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox)", "exception program end'.format(name=img_path_name)) return 1, None else: # 纠正成功 print('Correctly cut img {name},", "argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1,", ":return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum", "start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值", "-*- # @Time : 19-11-19 22:25 # @Author : <NAME> # @Reference :", "身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _ = img.shape height_num", "330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index", "// 2 w_end = width - width_num // 2 - 1 return img[h_start:h_end,", ": PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os", "else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0]", "center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down", "= left[1] return left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用", "\"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)", "gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient)", "if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 *", "cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])],", "np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1, 11): # 参数可变,分割10个点 temp_line_position =", "_ = img.shape height_num = int(height * cut_percent) # 需要裁剪的高度值 h_start = 0", "cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\"", "return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片", "二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed", "img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值 end_region_index =", "cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours = sorted(contours, key=cv2.contourArea,", "right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应", "\"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3,", "right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param", "11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) <", "# 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0,", "# 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray,", "1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片", "find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed =", "find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return:", "return 0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))", "= cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]),", "1 width_num = int(width * cut_percent) # 需要裁剪的宽度值 w_start = 0 + width_num", "error_count += error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))", "'_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) #", "def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param", "error_count = 0 error_names = [] for img_name in img_names: error_temp, res_bbox =", "img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 #", "[0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox =", "# 异常处理 print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name))", "process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path:", "+ '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number is: ', error_count) print('error", "bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]: #", "left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up, right_up]) #", "# cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0,", "-1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1,", "= 0 error_names = [] for img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path,", "== 0 and width_sum[i] > 330: start_region_flag = 1 start_region_index = i #", "find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) != 2: # 异常处理 print('Error happened when", "纠正失败 print('Failed to cut img {name}, exception program end'.format(name=img_path_name)) return 1, None else:", "# @Time : 19-11-19 22:25 # @Author : <NAME> # @Reference : None", "得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result)", "0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred,", "= img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数", "cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t,", "rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])),", "if len(res_bbox) != 2: # 纠正失败 print('Failed to cut img {name}, exception program", "0.0)) else: error_names.append(img_name) print('total error number is: ', error_count) print('error images mame :')", "image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0],", "print('error images mame :') for error_img_name in error_names: print(error_img_name) return if __name__ ==", "(int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def", "= cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down,", "# 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original def cut_part_img(img, cut_percent):", "left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片", "[0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0],", "// 2 - 1 width_num = int(width * cut_percent) # 需要裁剪的宽度值 w_start =", "# 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img:", "# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1],", "save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY =", "os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0 error_names = [] for img_name in", "+= error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path,", "# 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name", "0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]):", "'.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary)", "img_closed = img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag =", "返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\"", "+ np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] =", "find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) != 2: # 纠正失败", "// 2 # 左右等比例切分 h_end = height - height_num // 2 - 1", "\"\"\" # img = cv2.imread(image_path + image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "'_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用", "= os.listdir(img_path) error_count = 0 error_names = [] for img_name in img_names: error_temp,", "gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用", "-1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred =", "= np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])),", "@IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import", "+ '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1,", "res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) != 2: # 异常处理 print('Error", "cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res #", "按照面积大小排序 countours_res = [] for i in range(0, len(contours)): area = cv2.contourArea(contours[i]) #", "os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is not exits, program break.'.format(name=img_path)) return if", "= left[0] left_up = left[1] return left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg',", "0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred", "用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3],", "= [] for img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path)", "range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] >", "1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name)", "else: error_names.append(img_name) print('total error number is: ', error_count) print('error images mame :') for", "'_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面", "# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) #", "res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径", "保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1,", "5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0,", "'_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5,", "img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed", "\"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param", "= cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh)", "5, -1], [0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化", "# 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] > 330: start_region_flag", "2 - 1 width_num = int(width * cut_percent) # 需要裁剪的宽度值 w_start = 0", "img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]],", "cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): #", "np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1],", "寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours,", "iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def", "gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255,", "to cut img {name}, exception program end'.format(name=img_path_name)) return 1, None else: # 纠正成功", "i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i]", "img_binary) if len(res_bbox) != 2: # 纠正失败 print('Failed to cut img {name}, exception", "输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2 import numpy as np def point_judge(center,", "{name}, exception program end'.format(name=img_path_name)) return 1, None else: # 纠正成功 print('Correctly cut img", "left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up,", "img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX =", "height_num = int(height * cut_percent) # 需要裁剪的高度值 h_start = 0 + height_num //", "img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称", "0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number is:", "# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary", "+ '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img)", "iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\"", "cut img {name}, exception program end'.format(name=img_path_name)) return 1, None else: # 纠正成功 print('Correctly", "'_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox def", "cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox def process_img(img_path,", "img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number is: ', error_count)", "= cv2.imread(image_path + image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 #", "sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = [] for i in range(0, len(contours)):", "img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]],", "flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\"", "处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用", "img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用", "# cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox =", "# cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred,", "[x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\"", "img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img", "= start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1, 11): #", "= cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,", "矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return:", "0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box", "width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值", "img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0,", "h_end = height - height_num // 2 - 1 width_num = int(width *", "cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)", "box) src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0, 0],", "对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if", "异常处理 print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name)) #", "for i in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if", "cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好", "[x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left = [] right", "# 投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分", ":return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img", "{name}, try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) #", "error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'),", "左下, 右下, 左上, 右上 \"\"\" left = [] right = [] for i", "image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param", "ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代", "result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return", "cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours =", "preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0]", "@Time : 19-11-19 22:25 # @Author : <NAME> # @Reference : None #", "right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up = right[0] else: right_down", "save_path, problem_path) error_count += error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'),", "30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original def cut_part_img(img,", "np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) #", "img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed,", "= find_bbox(img_t, img_binary) if len(res_bbox) != 2: # 纠正失败 print('Failed to cut img", "if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 #", "投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param", "= 0 + height_num // 2 # 左右等比例切分 h_end = height - height_num", "读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1,", "纠正成功 print('Correctly cut img {name}, exception program end'.format(name=img_path_name)) return 0, res_bbox else: #", "{name} is not exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path)", "判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img =", "y1], [x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上", "img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片", "只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0", "utf-8 -*- # @Time : 19-11-19 22:25 # @Author : <NAME> # @Reference", "cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred =", "1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2 import numpy as np def", "countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return:", "img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域", "= right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up =", "img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect)", "0 + width_num // 2 w_end = width - width_num // 2 -", "'__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/' cut_problem_path = './temp_imgs/' #process_img(img_path=origin_img_path, save_path=cutted_save_path, problem_path=cut_problem_path)", "save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在", ":param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if not", ":') for error_img_name in error_names: print(error_img_name) return if __name__ == '__main__': origin_img_path =", "i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:]", "is not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片", "cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) #", "5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed =", "box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src =", "# -*- coding: utf-8 -*- # @Time : 19-11-19 22:25 # @Author :", "Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2", "start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:]", "# 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\"", "def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed", "330: start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330:", "+ np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1, 11): # 参数可变,分割10个点 temp_line_position", "# 二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) != 2: #", "return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片", "dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient =", "# 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'),", ":return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left = [] right = []", "rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线", "img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C,", "# cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,", "1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] > 330: start_region_flag =", "将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2 import numpy as", "range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position)", "right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up = right[0] else: right_down = right[0]", "根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) =", "# 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值 end_region_index = 0", "结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name)", "not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0 error_names =", "强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param", "path {name} is not exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径", "img_binary) # 切分正反面 if len(res_bbox) != 2: # 异常处理 print('Error happened when cut", "os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一", "bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下,", "# @Reference : None # @File : cut_twist_join.py # @IDE : PyCharm Community", "= cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))),", "def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name:", ":return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F,", "import os import cv2 import numpy as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列", "image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1,", "右上 \"\"\" left = [] right = [] for i in range(4): if", "right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up, right_up])", "= cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img,", "error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0]", "# 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点", "!= 2: # 异常处理 print('Error happened when cut img {name}, try exception cut", "h_start = 0 + height_num // 2 # 左右等比例切分 h_end = height -", "这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] > 330: start_region_flag = 1", "cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域", "255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化", "判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 #", "i in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position", "int(width * cut_percent) # 需要裁剪的宽度值 w_start = 0 + width_num // 2 w_end", "强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) != 2: # 纠正失败 print('Failed to", "# 需要裁剪的高度值 h_start = 0 + height_num // 2 # 左右等比例切分 h_end =", "= left[1] left_up = left[0] else: left_down = left[0] left_up = left[1] return", "width, _ = img.shape height_num = int(height * cut_percent) # 需要裁剪的高度值 h_start =", "mame :') for error_img_name in error_names: print(error_img_name) return if __name__ == '__main__': origin_img_path", "cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX,", "i in range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4", "= 0 # 强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param", "\"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None", "res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp if error_temp == 0:", "切分正反面 if len(res_bbox) != 2: # 异常处理 print('Error happened when cut img {name},", "if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] +", "cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary =", "保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0 error_names = [] for img_name", "int(height * cut_percent) # 需要裁剪的高度值 h_start = 0 + height_num // 2 #", "gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5,", "# 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面", "身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i", "start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i #", "len(res_bbox) != 2: # 纠正失败 print('Failed to cut img {name}, exception program end'.format(name=img_path_name))", "cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) #", "0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag", "+ '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): #", "cv2.imread(image_path + image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path,", "img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数", "cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度,", "os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return", "img {name}, try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred)", "{name} is not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) #", "cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area", "-1, 0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t,", "left_down = left[0] left_up = left[1] return left_down, right_down, left_up, right_up def gray_and_fliter(img,", "image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name +", "二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape)", "w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param", "if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return 1, []", "+ image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name", "cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9)", "# 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count", "# 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换", "= left[0] else: left_down = left[0] left_up = left[1] return left_down, right_down, left_up,", "_) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _)", ":param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _ =", ":param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F,", "# 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box)", ":param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\" if", "img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp", "countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original:", "else: left_down = left[0] left_up = left[1] return left_down, right_down, left_up, right_up def", "img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed = img_closed // 250", "\"\"\" img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is", "[0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改", "-1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作", "img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name)", "left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up = left[0] else: left_down = left[0]", "None # @File : cut_twist_join.py # @IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。", "right_up]) # 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))],", "os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path)", "= [] for i in range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i])", "身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0", "range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] > 330:", "0 and width_sum[i] > 330: start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点", "= find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) != 2: # 异常处理 print('Error happened", "find_bbox(img_t, img_binary) if len(res_bbox) != 2: # 纠正失败 print('Failed to cut img {name},", "# 按照面积大小排序 countours_res = [] for i in range(0, len(contours)): area = cv2.contourArea(contours[i])", "# 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0 error_names = [] for", "return if __name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/' cut_problem_path =", ":param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path", "\"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _)", "rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original):", "1, None else: # 纠正成功 print('Correctly cut img {name}, exception program end'.format(name=img_path_name)) return", "for i in range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i])", "save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path,", "+ '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path,", "img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary)", "happened when cut img {name}, try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0]", "cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))", "人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up", "根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed = img_closed", "+ '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) #", "is not exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if", "<= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] *", "= os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name))", "img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh", "# 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化", "int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) #", "else: left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up =", "img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index", "img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\"", "裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _ = img.shape height_num = int(height *", "= right[1] right_up = right[0] else: right_down = right[0] right_up = right[1] if", "cut img {name}, exception program end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常 #", "if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names", "# 需要裁剪的宽度值 w_start = 0 + width_num // 2 w_end = width -", "// 2 - 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/',", "+ '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed =", "= 0 for i in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index +", "right[1] right_up = right[0] else: right_down = right[0] right_up = right[1] if left[0][1]", ":param img: 原始RGB图片 :param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(),", "# 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path:", "return left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波", "0 for i in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i]", "\"\"\" left = [] right = [] for i in range(4): if bbox[i][0]", "2 - 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'):", "if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is not exits, program break.'.format(name=img_path))", "img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp if error_temp", "图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)", "\"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_,", "kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred", "in error_names: print(error_img_name) return if __name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path =", "= 0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]):", "问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is", "矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left = [] right = [] for", "cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) #", "if len(res_bbox) != 2: # 异常处理 print('Error happened when cut img {name}, try", ":param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed = img_closed //", "// 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index =", "* img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down,", "1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i", "-1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path,", "width_sum[i] > 330: start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i]", "也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3) # cv2.imwrite(os.path.join(save_path,", "* img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]): #", "os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0", "# 读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0,", "+ '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img) return 0, res_bbox", "return 1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img,", "* img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect", "切分后的身份证正反面图片。 \"\"\" import os import cv2 import numpy as np def point_judge(center, bbox):", "rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result =", "i in range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if", "img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\"", "2: # 异常处理 print('Error happened when cut img {name}, try exception cut program", "cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] +", "not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is not exits, program break.'.format(name=img_path)) return", "def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param", "# 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up,", "= cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed):", "int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0,", "测试用 :return: 身份证正反面图片 \"\"\" img_path_name = os.path.join(img_path, img_name) if not os.path.exists(img_path_name): # 判断图片是否存在", "error_img_name in error_names: print(error_img_name) return if __name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path", "return 1, None else: # 纠正成功 print('Correctly cut img {name}, exception program end'.format(name=img_path_name))", "cut_percent) # 需要裁剪的高度值 h_start = 0 + height_num // 2 # 左右等比例切分 h_end", "= cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) #", "right_down, left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0,", "[] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name) #", "exception program end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] +", "# img = cv2.imread(image_path + image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #", "'_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0]", "(contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours,", "np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary) #", "# @IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\"", "[-1, 5, -1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred)", "\"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2 import numpy", "width_num // 2 w_end = width - width_num // 2 - 1 return", "# 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域", "img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘", "[] for img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count", "滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path + image_name) # 读取图片 img_gray", "problem_path) error_count += error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0],", "= point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst", "contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = [] for i in", "0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5,", "dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 #", "# 切分正反面 if len(res_bbox) != 2: # 异常处理 print('Error happened when cut img", "cut img {name}, try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'),", "in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position -", ":param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path + image_name) #", "= [] for i in range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if", "= int(width * cut_percent) # 需要裁剪的宽度值 w_start = 0 + width_num // 2", "# 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径", "3, -3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel =", "# 判断图片路径是否存在 print('img path {name} is not exits, program break.'.format(name=img_path)) return if not", "save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径", "exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0]", "y2], [x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left", ":param img_closed: 二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #", "print('total error number is: ', error_count) print('error images mame :') for error_img_name in", "#print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 #", "\"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片", "print('img {name} is not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name)", "cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return", "# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img)", "width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position", "for img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count +=", "img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed,", ":param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _ = img.shape height_num =", "width_num = int(width * cut_percent) # 需要裁剪的宽度值 w_start = 0 + width_num //", "height_num // 2 - 1 width_num = int(width * cut_percent) # 需要裁剪的宽度值 w_start", ": cut_twist_join.py # @IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为:", "img) return 0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径", "bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2],", "= sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = [] for i in range(0,", "0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定", "img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if", "2: # 纠正失败 print('Failed to cut img {name}, exception program end'.format(name=img_path_name)) return 1,", "width_num // 2 - 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name,", "left[1] return left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\"", "= img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值 end_region_index", "save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path + image_name) # 读取图片", "m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0],", "reverse=True) # 按照面积大小排序 countours_res = [] for i in range(0, len(contours)): area =", ": <NAME> # @Reference : None # @File : cut_twist_join.py # @IDE :", "img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]],", "in range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1]", "import numpy as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y]", "-*- coding: utf-8 -*- # @Time : 19-11-19 22:25 # @Author : <NAME>", "= 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index =", "exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred =", "> center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上", "img.shape height_num = int(height * cut_percent) # 需要裁剪的高度值 h_start = 0 + height_num", "沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值 end_region_index = 0 #", "= 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if", "error_count) print('error images mame :') for error_img_name in error_names: print(error_img_name) return if __name__", "right[0] else: right_down = right[0] right_up = right[1] if left[0][1] > left[1][1]: #", "not exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not", "left_up = left[1] return left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): #", "images mame :') for error_img_name in error_names: print(error_img_name) return if __name__ == '__main__':", "# 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox)", "in range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4 *", "-1], [0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) # 二值化 res_bbox", "dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) #", "= i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点", "0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred)", "cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1,", "def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片 :param img_closed: 二值化后的图片", "image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path +", "@Reference : None # @File : cut_twist_join.py # @IDE : PyCharm Community Edition", "def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return:", "# 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) != 2: # 纠正失败 print('Failed", "int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img,", "not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred", "cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else:", "right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up = left[0]", "img_closed_original def cut_part_img(img, cut_percent): \"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例", "5, -1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) #", "= cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours", "img {name}, exception program end'.format(name=img_path_name)) return 1, None else: # 纠正成功 print('Correctly cut", "从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _", "np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0", "求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\"", "image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param", "# 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True)", "0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))", "# argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in", "when cut img {name}, try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] +", "处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum =", ":param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY", "= cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))", "-3) # cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,", "'_binary.jpg'), img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh,", "left[0] left_up = left[1] return left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'):", "对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1,", "as np def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox:", "= height - height_num // 2 - 1 width_num = int(width * cut_percent)", "right = [] for i in range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边", "img = cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t =", "# 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然,", "求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True) #", ":param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4,", "save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path:", "> left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up = left[0] else: left_down =", "cut_part_img(res_bbox[0], 0.0)) cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number", "m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res # 返回身份证区域", ":param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片", "ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient = cv2.subtract(gradX, gradY)", "灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path + image_name) # 读取图片 img_gray = cv2.cvtColor(img,", "cv2.subtract(gradX, gradY) img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代 # 这里改进成自适应阈值,貌似没用 img_thresh = cv2.adaptiveThreshold(img_gradient,", "# 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width,", "cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))", "right[0] right_up = right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1]", "left[0] else: left_down = left[0] left_up = left[1] return left_down, right_down, left_up, right_up", "None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed", "_)改成 (_, contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res =", "+ width_num // 2 w_end = width - width_num // 2 - 1", "[int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result", "# 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag ==", "img_thresh) # 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE,", "start_region_flag == 0 and width_sum[i] > 330: start_region_flag = 1 start_region_index = i", "PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import", "+ height_num // 2 # 左右等比例切分 h_end = height - height_num // 2", "gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) != 2:", "这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])),", ":param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存", "保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred,", "* cut_percent) # 需要裁剪的宽度值 w_start = 0 + width_num // 2 w_end =", "# 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] +", "滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred,", "= img.shape height_num = int(height * cut_percent) # 需要裁剪的高度值 h_start = 0 +", "img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0],", "print('Correctly cut img {name}, exception program end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常", "cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number is: ',", ":return: None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is not", "right_up = right[0] else: right_down = right[0] right_up = right[1] if left[0][1] >", "_) contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = [] for i", "__name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/' cut_problem_path = './temp_imgs/' #process_img(img_path=origin_img_path,", "\"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片", "right_down = right[1] right_up = right[0] else: right_down = right[0] right_up = right[1]", "> 330: start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] >", "if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]:", "# 图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波", "left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down = left[1] left_up = left[0] else: left_down", "range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4 * img.shape[0]", "img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect =", "cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _ = img.shape height_num = int(height", "else: # 纠正成功 print('Correctly cut img {name}, exception program end'.format(name=img_path_name)) return 0, res_bbox", "二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) != 2: # 异常处理", "end_region_index = 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代", "cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC)", "i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 #", "in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] >", "裁剪后的身份证区域 \"\"\" height, width, _ = img.shape height_num = int(height * cut_percent) #", "if start_region_flag == 0 and width_sum[i] > 330: start_region_flag = 1 start_region_index =", "[] for i in range(0, len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if (area", "int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res # 返回身份证区域 def find_cut_line(img_closed_original): #", "img_names = os.listdir(img_path) error_count = 0 error_names = [] for img_name in img_names:", "0 start_region_index = 0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i", "right_down = right[0] right_up = right[1] if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上 left_down", "cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看 img_blurred = cv2.filter2D(img_gray, -1, kernel=np.array([[0, -1,", "# 返回身份证区域 def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片", "> right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up = right[0] else: right_down =", "img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height, width, _ = img.shape", "# 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序", "= cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小", "对于正反面粘连情况的处理,求取最小点作为中线 \"\"\" 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 \"\"\" img_closed = img_closed_original.copy() img_closed", "0 + height_num // 2 # 左右等比例切分 h_end = height - height_num //", "print('Failed to cut img {name}, exception program end'.format(name=img_path_name)) return 1, None else: #", "= cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN,", "right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img:", "图片不存在,直接返回,报错加一 img = cv2.imread(img_path_name) # 读取图片 img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波 img_t", ":param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return: 二值化后的图片 \"\"\" gradX", "os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count = 0 error_names = []", ">= 0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数", "left_down, right_down, left_up, right_up def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param", "-1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return", "最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数 box = cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src", "kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed,", "-1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary =", "if __name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/' cut_problem_path = './temp_imgs/'", "width - width_num // 2 - 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def", ":param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path): #", "这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param img: 原始RGB图片", "break.'.format(name=img_path)) return if not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径", "img_name, save_path, problem_path) error_count += error_temp if error_temp == 0: cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] +", "# rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵 result = cv2.warpPerspective(img, m, (int(max(rect[1][0],", "限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original def cut_part_img(img, cut_percent): \"\"\"", "print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name)) # cv2.imwrite(os.path.join(problem_path,", "', error_count) print('error images mame :') for error_img_name in error_names: print(error_img_name) return if", "# @File : cut_twist_join.py # @IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有:", "[] for i in range(4): if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else:", "img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) #", "rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵", "图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path):", "None else: # 纠正成功 print('Correctly cut img {name}, exception program end'.format(name=img_path_name)) return 0,", "这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred:", "cv2.MORPH_CLOSE, kernel) img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed", "裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'),", "img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) != 2:", "left[1] left_up = left[0] else: left_down = left[0] left_up = left[1] return left_down,", "# 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return 1, [] # 图片不存在,直接返回,报错加一 img", "# 二值化 阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)", "coding: utf-8 -*- # @Time : 19-11-19 22:25 # @Author : <NAME> #", "需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2 import numpy as np", "save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param", "cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _) contours = sorted(contours,", "contours, _) contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = [] for", "start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index", "将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\"", "left_up = left[0] else: left_down = left[0] left_up = left[1] return left_down, right_down,", "\"\"\" img_closed = img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1)", "key=cv2.contourArea, reverse=True) # 按照面积大小排序 countours_res = [] for i in range(0, len(contours)): area", "0 error_names = [] for img_name in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name,", "save_path='./'): # 转为灰度图并滤波,后面两个参数调试用 \"\"\" 将图片灰度化,并滤波 :param img: 输入RGB图片 :param image_name: 输入图片名称,测试时使用 :param save_path:", "cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改 return img_blurred def gradient_and_binary(img_blurred, image_name='1.jpg',", "# 计算面积 if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area >=", "计算面积 if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05", "= find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary) if len(res_bbox) != 2: #", ":param problem_path: 问题图片保存路径 :return: None \"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path", "# 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0,", "end'.format(name=img_path_name)) return 0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0],", "Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。 \"\"\" import os import cv2 import", "error_names: print(error_img_name) return if __name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/'", "= start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内", "灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1,", "def point_judge(center, bbox): \"\"\" 用于将矩形框的边界按顺序排列 :param center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1],", "if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up = right[0] else:", "= img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0", "二值化后的图片 :return: 身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 #", "如果y点坐标大,则是右上 right_down = right[1] right_up = right[0] else: right_down = right[0] right_up =", "= gradient_and_binary(img_blurred) # 二值化 res_bbox = find_bbox(img_t, img_binary) # 切分正反面 if len(res_bbox) !=", "0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path:", "'_1.jpg'), cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number is: ', error_count) print('error images", "cv2.boxPoints(rect) left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box) src = np.float32([left_down, right_down,", "in img_names: error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path) error_count += error_temp if", "只要是x坐标比中心点坐标大,一定是右边 right.append(bbox[i]) else: left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1]", "cut_percent) # 需要裁剪的宽度值 w_start = 0 + width_num // 2 w_end = width", "return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片 \"\"\"", "img_name) # 灰度化并滤波 img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1],", "cut_part_img(res_bbox[1], 0.0)) else: error_names.append(img_name) print('total error number is: ', error_count) print('error images mame", "\"\"\" # 从宽度和高度两个方向,裁剪身份证边缘 :param img: 身份证区域 :param cut_percent: 裁剪的比例 :return: 裁剪后的身份证区域 \"\"\" height,", "not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return 1, [] #", "左右等比例切分 h_end = height - height_num // 2 - 1 width_num = int(width", "= cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1) img_gradient =", "problem_path='./problem_save/'): # 处理一张图片 \"\"\" 裁剪出一张图片中的身份证正反面区域 :param img_path: 图片所在路径 :param img_name: 图片名称 :param save_path:", "# 纠正失败 print('Failed to cut img {name}, exception program end'.format(name=img_path_name)) return 1, None", "保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names = os.listdir(img_path) error_count =", "= cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))), flags=cv2.INTER_CUBIC) # 投影变换 countours_res.append(result) return countours_res", "min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1, 11):", "height, width, _ = img.shape height_num = int(height * cut_percent) # 需要裁剪的高度值 h_start", "error number is: ', error_count) print('error images mame :') for error_img_name in error_names:", "-1, 0]], np.float32)) # 对图像进行滤波,是锐化操作 img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1,", "(area >= 0.05 * img.shape[0] * img.shape[1]): # 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的) rect = cv2.minAreaRect(contours[i]) #", "start_region_index = 0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i in", "> 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position =", "[-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行锐化 img_binary = gradient_and_binary(img_blurred) #", "not os.path.exists(save_path): # 保存路径不存在,则创建路径 os.makedirs(save_path) if not os.path.exists(problem_path): # 保存路径不存在,则创建路径 os.makedirs(problem_path) img_names =", "print(error_img_name) return if __name__ == '__main__': origin_img_path = './problem_imgs/' cutted_save_path = './res_imgs/' cut_problem_path", ":return: 裁剪后的身份证区域 \"\"\" height, width, _ = img.shape height_num = int(height * cut_percent)", "img_name) if not os.path.exists(img_path_name): # 判断图片是否存在 print('img {name} is not exits'.format(name=img_path_name)) return 1,", "0, res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) #", "print('img path {name} is not exits, program break.'.format(name=img_path)) return if not os.path.exists(save_path): #", "center: 矩形中心的坐标[x, y] :param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]]", "0], [0, int(min(rect[1][0], rect[1][1]))], [int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定 m = cv2.getPerspectiveTransform(src,", "二值化后的图片 \"\"\" gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0) gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0,", "len(contours)): area = cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4 * img.shape[0] *", "img_name), img) # 调试用,保存中间处理结果 img_binary = find_cut_line(img_binary) # 强制分割正反面 res_bbox = find_bbox(img_t, img_binary)", "腐蚀膨胀 # 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小 return img_closed def find_bbox(img, img_closed): # 寻找身份证正反面区域 \"\"\" 根据二值化结果判定并裁剪出身份证正反面区域 :param", "res_bbox else: # 裁剪过程正常 # cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0)) # cv2.imwrite(os.path.join(save_path,", "cut_twist_join.py # @IDE : PyCharm Community Edition \"\"\" 将身份证正反面从原始图片中切分出来。 需要的参数有: 1.图片所在路径。 输出结果为: 切分后的身份证正反面图片。", "src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0],", "- width_num // 2 - 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片 def preprocess_cut_one_img(img_path,", "# 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for", "- min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return", "@Author : <NAME> # @Reference : None # @File : cut_twist_join.py # @IDE", "left.append(bbox[i]) if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上 right_down = right[1] right_up = right[0]", "cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred) # cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary) # cv2.imwrite(os.path.join(problem_path,", "\"\"\" if not os.path.exists(img_path): # 判断图片路径是否存在 print('img path {name} is not exits, program", "os.listdir(img_path) error_count = 0 error_names = [] for img_name in img_names: error_temp, res_bbox", "[] right = [] for i in range(4): if bbox[i][0] > center[0]: #", "身份证正反面区域 \"\"\" (contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数 # 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成", "problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path: 图片所在路径 :param save_path: 结果保存路径 :param problem_path: 问题图片保存路径 :return:", "# 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30:", ": None # @File : cut_twist_join.py # @IDE : PyCharm Community Edition \"\"\"", "[x3, y3], [x4, y4]] :return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上 \"\"\" left =", "身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用", "cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel) img_closed = cv2.erode(img_closed, None, iterations=9) img_closed = cv2.dilate(img_closed, None, iterations=9)", "左上, 右上 \"\"\" left = [] right = [] for i in range(4):", "输入图片名称,测试时使用 :param save_path: 滤波结果保存路径,测试时使用 :return: 灰度化、滤波后图片 \"\"\" # img = cv2.imread(image_path + image_name)", "-1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # cv2.imwrite(os.path.join(save_path, img_name +", "+ '_original.jpg'), img) return 0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param", "= [] right = [] for i in range(4): if bbox[i][0] > center[0]:", "left_up, right_up]) # 这里注意必须对应 dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0],", "250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0", "* cut_percent) # 需要裁剪的高度值 h_start = 0 + height_num // 2 # 左右等比例切分", "img = cv2.imread(image_path + image_name) # 读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片", "将灰度图二值化,后面两个参数调试用 \"\"\" 求取梯度,二值化 :param img_blurred: 滤波后的图片 :param image_name: 图片名,测试用 :param save_path: 保存路径,测试用 :return:", "height_num // 2 # 左右等比例切分 h_end = height - height_num // 2 -", ":param save_path: 结果保存路径 测试用 :param problem_path: 出错图片中间结果保存 测试用 :return: 身份证正反面图片 \"\"\" img_path_name =", "读取图片 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片 # cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray)", "area = cv2.contourArea(contours[i]) # 计算面积 if (area <= 0.4 * img.shape[0] * img.shape[1])", "'_original.jpg'), img) return 0, res_bbox def process_img(img_path, save_path, problem_path): \"\"\" 切分一个目录下的所有图片 :param img_path:", "阈值未调整好 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel) img_closed =", "= width - width_num // 2 - 1 return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片" ]
[ "client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE", "import bleak async def main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected =", "asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb',", "response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000') print(response) if __name__ == \"__main__\": asyncio.run(main())", "bleak async def main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await", "import concurrent import asyncio import bleak async def main(): loop = asyncio.new_event_loop() client", "concurrent import asyncio import bleak async def main(): loop = asyncio.new_event_loop() client =", "bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000')", "import time import concurrent import asyncio import bleak async def main(): loop =", "loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response =", "time import concurrent import asyncio import bleak async def main(): loop = asyncio.new_event_loop()", "def main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected)", "await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000') print(response) if __name__", "client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000') print(response) if __name__ ==", "is_connected = await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000') print(response)", "async def main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect()", "= await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000') print(response) if", "import asyncio import bleak async def main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2')", "print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000') print(response) if __name__ == \"__main__\":", "asyncio import bleak async def main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected", "= asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response = await", "= bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X", "main(): loop = asyncio.new_event_loop() client = bleak.BleakClient('D8:A9:8B:7E:1E:D2') is_connected = await client.connect() print(is_connected) response" ]
[ "0] c = c.loc[c > 0] u = ind_m.div(c, axis=0) avg_u = u[u", "pandas as pd def get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i,", "(t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m def get_avg_uniq(ind_m, c=None):", "get_avg_uniq(ind_m, c=None): if c is None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c >", "ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i]", "ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c = c.loc[c > 0] u =", "c = ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c = c.loc[c > 0]", "i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m def get_avg_uniq(ind_m,", "c.loc[c > 0] u = ind_m.div(c, axis=0) avg_u = u[u > 0].mean() avg_u", "c is None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c =", "import pandas as pd def get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for", "c=None): if c is None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0]", "= ind_m.div(c, axis=0) avg_u = u[u > 0].mean() avg_u = avg_u.fillna(0) return avg_u", "u = ind_m.div(c, axis=0) avg_u = u[u > 0].mean() avg_u = avg_u.fillna(0) return", "t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m def get_avg_uniq(ind_m, c=None): if", "c = c.loc[c > 0] u = ind_m.div(c, axis=0) avg_u = u[u >", "ind_m.loc[t0_:t1_, i] = 1 return ind_m def get_avg_uniq(ind_m, c=None): if c is None:", "columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m", "> 0] u = ind_m.div(c, axis=0) avg_u = u[u > 0].mean() avg_u =", "def get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in", "enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m def get_avg_uniq(ind_m, c=None): if c is", "= ind_m.loc[c > 0] c = c.loc[c > 0] u = ind_m.div(c, axis=0)", "None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c = c.loc[c >", "t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_,", "ind_m def get_avg_uniq(ind_m, c=None): if c is None: c = ind_m.sum(axis=1) ind_m =", "= pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] =", "ind_m = ind_m.loc[c > 0] c = c.loc[c > 0] u = ind_m.div(c,", "def get_avg_uniq(ind_m, c=None): if c is None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c", "1 return ind_m def get_avg_uniq(ind_m, c=None): if c is None: c = ind_m.sum(axis=1)", "= c.loc[c > 0] u = ind_m.div(c, axis=0) avg_u = u[u > 0].mean()", "ind_m.loc[c > 0] c = c.loc[c > 0] u = ind_m.div(c, axis=0) avg_u", "0] u = ind_m.div(c, axis=0) avg_u = u[u > 0].mean() avg_u = avg_u.fillna(0)", "> 0] c = c.loc[c > 0] u = ind_m.div(c, axis=0) avg_u =", "= ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c = c.loc[c > 0] u", "is None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c = c.loc[c", "<gh_stars>100-1000 import pandas as pd def get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0]))", "pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1", "get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()):", "as pd def get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_,", "in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m def get_avg_uniq(ind_m, c=None): if c", "pd def get_ind_matrix(bar_idx, t1): ind_m = pd.DataFrame(0, index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_)", "if c is None: c = ind_m.sum(axis=1) ind_m = ind_m.loc[c > 0] c", "i] = 1 return ind_m def get_avg_uniq(ind_m, c=None): if c is None: c", "= 1 return ind_m def get_avg_uniq(ind_m, c=None): if c is None: c =", "index=bar_idx, columns=range(t1.shape[0])) for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return", "return ind_m def get_avg_uniq(ind_m, c=None): if c is None: c = ind_m.sum(axis=1) ind_m", "for i, (t0_, t1_) in enumerate(t1.iteritems()): ind_m.loc[t0_:t1_, i] = 1 return ind_m def" ]
[ "python # -*- coding: utf-8 -*- from __future__ import with_statement import sys if", "= '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages =", "sys.exit('Python 2.5 or greater is required.') try: from setuptools import setup except ImportError:", "'srap', version = srap.__version__, description = 'Simple reflect annotation protocol lib.', long_description =", "-*- from __future__ import with_statement import sys if sys.version_info < (2, 5): sys.exit('Python", "try: from setuptools import setup except ImportError: from distutils.core import setup import srap", "<reponame>aozhiwei/srap #!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import with_statement import", "distutils.core import setup import srap with open('LICENSE') as fp: license = fp.read() setup(name", "# -*- coding: utf-8 -*- from __future__ import with_statement import sys if sys.version_info", "packages = ['srap'], license = license, platforms = ['any'], classifiers = [] )", "fp: license = fp.read() setup(name = 'srap', version = srap.__version__, description = 'Simple", "sys.version_info < (2, 5): sys.exit('Python 2.5 or greater is required.') try: from setuptools", "as fp: license = fp.read() setup(name = 'srap', version = srap.__version__, description =", "version = srap.__version__, description = 'Simple reflect annotation protocol lib.', long_description = '',", "'<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'],", "= '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license = license, platforms =", "= 'Simple reflect annotation protocol lib.', long_description = '', author = '<NAME>', author_email", "url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license = license, platforms = ['any'], classifiers", "annotation protocol lib.', long_description = '', author = '<NAME>', author_email = '<EMAIL>', maintainer", "srap.__version__, description = 'Simple reflect annotation protocol lib.', long_description = '', author =", "from __future__ import with_statement import sys if sys.version_info < (2, 5): sys.exit('Python 2.5", "= 'https://github.com/aozhiwei/srap', packages = ['srap'], license = license, platforms = ['any'], classifiers =", "import setup except ImportError: from distutils.core import setup import srap with open('LICENSE') as", "long_description = '', author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email", "= '', author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email =", "from distutils.core import setup import srap with open('LICENSE') as fp: license = fp.read()", "license = fp.read() setup(name = 'srap', version = srap.__version__, description = 'Simple reflect", "__future__ import with_statement import sys if sys.version_info < (2, 5): sys.exit('Python 2.5 or", "'Simple reflect annotation protocol lib.', long_description = '', author = '<NAME>', author_email =", "= fp.read() setup(name = 'srap', version = srap.__version__, description = 'Simple reflect annotation", "#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import with_statement import sys", "< (2, 5): sys.exit('Python 2.5 or greater is required.') try: from setuptools import", "coding: utf-8 -*- from __future__ import with_statement import sys if sys.version_info < (2,", "utf-8 -*- from __future__ import with_statement import sys if sys.version_info < (2, 5):", "fp.read() setup(name = 'srap', version = srap.__version__, description = 'Simple reflect annotation protocol", "required.') try: from setuptools import setup except ImportError: from distutils.core import setup import", "author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url", "protocol lib.', long_description = '', author = '<NAME>', author_email = '<EMAIL>', maintainer =", "author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages", "-*- coding: utf-8 -*- from __future__ import with_statement import sys if sys.version_info <", "= srap.__version__, description = 'Simple reflect annotation protocol lib.', long_description = '', author", "setuptools import setup except ImportError: from distutils.core import setup import srap with open('LICENSE')", "lib.', long_description = '', author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>',", "greater is required.') try: from setuptools import setup except ImportError: from distutils.core import", "import sys if sys.version_info < (2, 5): sys.exit('Python 2.5 or greater is required.')", "sys if sys.version_info < (2, 5): sys.exit('Python 2.5 or greater is required.') try:", "description = 'Simple reflect annotation protocol lib.', long_description = '', author = '<NAME>',", "is required.') try: from setuptools import setup except ImportError: from distutils.core import setup", "'<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license = license,", "from setuptools import setup except ImportError: from distutils.core import setup import srap with", "srap with open('LICENSE') as fp: license = fp.read() setup(name = 'srap', version =", "= 'srap', version = srap.__version__, description = 'Simple reflect annotation protocol lib.', long_description", "setup except ImportError: from distutils.core import setup import srap with open('LICENSE') as fp:", "or greater is required.') try: from setuptools import setup except ImportError: from distutils.core", "if sys.version_info < (2, 5): sys.exit('Python 2.5 or greater is required.') try: from", "(2, 5): sys.exit('Python 2.5 or greater is required.') try: from setuptools import setup", "5): sys.exit('Python 2.5 or greater is required.') try: from setuptools import setup except", "'https://github.com/aozhiwei/srap', packages = ['srap'], license = license, platforms = ['any'], classifiers = []", "reflect annotation protocol lib.', long_description = '', author = '<NAME>', author_email = '<EMAIL>',", "with_statement import sys if sys.version_info < (2, 5): sys.exit('Python 2.5 or greater is", "'<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license = license, platforms = ['any'],", "import with_statement import sys if sys.version_info < (2, 5): sys.exit('Python 2.5 or greater", "import srap with open('LICENSE') as fp: license = fp.read() setup(name = 'srap', version", "= '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license =", "'<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap',", "setup(name = 'srap', version = srap.__version__, description = 'Simple reflect annotation protocol lib.',", "ImportError: from distutils.core import setup import srap with open('LICENSE') as fp: license =", "= '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url =", "maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license", "maintainer_email = '<EMAIL>', url = 'https://github.com/aozhiwei/srap', packages = ['srap'], license = license, platforms", "with open('LICENSE') as fp: license = fp.read() setup(name = 'srap', version = srap.__version__,", "'', author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>',", "import setup import srap with open('LICENSE') as fp: license = fp.read() setup(name =", "except ImportError: from distutils.core import setup import srap with open('LICENSE') as fp: license", "2.5 or greater is required.') try: from setuptools import setup except ImportError: from", "setup import srap with open('LICENSE') as fp: license = fp.read() setup(name = 'srap',", "open('LICENSE') as fp: license = fp.read() setup(name = 'srap', version = srap.__version__, description" ]
[ "hit this timeout - it's there to prevent a failing test from hanging", "using Enthought open source! \"\"\" Example of testing a simple future using the", "\"\"\" import unittest from pyface.toolkit import toolkit_object from traits_futures.api import submit_call, TraitsExecutor #:", "is currently only available for Qt, not #: for wxPython. To run this", "submit_call, TraitsExecutor #: Maximum timeout for blocking calls, in seconds. A successful test", "toolkit_object from traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout for blocking calls, in", "TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): # Request the", "blocking the rest of the test suite. SAFETY_TIMEOUT = 5.0 #: Note that", "Enthought open source! \"\"\" Example of testing a simple future using the GuiTestAssistant.", "wxPython. To run this unit test, you'll need PyQt or PySide 2 installed.", "the conditions described in the aforementioned license. The license # is also available", "prevent a failing test from hanging #: forever and blocking the rest of", "# the conditions described in the aforementioned license. The license # is also", "rest of the test suite. SAFETY_TIMEOUT = 5.0 #: Note that the GuiTestAssistant", "be redistributed only under # the conditions described in the aforementioned license. The", "that the GuiTestAssistant is currently only available for Qt, not #: for wxPython.", "only available for Qt, not #: for wxPython. To run this unit test,", "<filename>docs/source/guide/examples/test_future.py # (C) Copyright 2018-2021 Enthought, Inc., Austin, TX # All rights reserved.", "license included in LICENSE.txt and may be redistributed only under # the conditions", "GuiTestAssistant is currently only available for Qt, not #: for wxPython. To run", "All rights reserved. # # This software is provided without warranty under the", "submit_call(self.traits_executor, pow, 3, 5) # Wait for the future to complete. self.assertEventuallyTrueInGui( lambda:", "Qt, not #: for wxPython. To run this unit test, you'll need PyQt", "# # This software is provided without warranty under the terms of the", "reserved. # # This software is provided without warranty under the terms of", "blocking calls, in seconds. A successful test should #: never hit this timeout", "toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): #", "the BSD # license included in LICENSE.txt and may be redistributed only under", "never hit this timeout - it's there to prevent a failing test from", "only under # the conditions described in the aforementioned license. The license #", "aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # #", "self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5) # Wait for", "import toolkit_object from traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout for blocking calls,", "unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): # Request the executor", "tearDown(self): # Request the executor to stop, and wait for that stop to", "import unittest from pyface.toolkit import toolkit_object from traits_futures.api import submit_call, TraitsExecutor #: Maximum", "installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor()", "without warranty under the terms of the BSD # license included in LICENSE.txt", "successful test should #: never hit this timeout - it's there to prevent", "this unit test, you'll need PyQt or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\")", "is provided without warranty under the terms of the BSD # license included", "for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow,", "online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! \"\"\" Example", "and wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future =", "the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import toolkit_object from traits_futures.api import submit_call,", "from hanging #: forever and blocking the rest of the test suite. SAFETY_TIMEOUT", "hanging #: forever and blocking the rest of the test suite. SAFETY_TIMEOUT =", "\"\"\" Example of testing a simple future using the GuiTestAssistant. \"\"\" import unittest", "TX # All rights reserved. # # This software is provided without warranty", "suite. SAFETY_TIMEOUT = 5.0 #: Note that the GuiTestAssistant is currently only available", "calls, in seconds. A successful test should #: never hit this timeout -", "source! \"\"\" Example of testing a simple future using the GuiTestAssistant. \"\"\" import", "it's there to prevent a failing test from hanging #: forever and blocking", "Note that the GuiTestAssistant is currently only available for Qt, not #: for", "of the test suite. SAFETY_TIMEOUT = 5.0 #: Note that the GuiTestAssistant is", "terms of the BSD # license included in LICENSE.txt and may be redistributed", "PyQt or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self):", "= toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self):", "stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5)", "the future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result, 243) if __name__", "to prevent a failing test from hanging #: forever and blocking the rest", "# license included in LICENSE.txt and may be redistributed only under # the", "#: for wxPython. To run this unit test, you'll need PyQt or PySide", "To run this unit test, you'll need PyQt or PySide 2 installed. GuiTestAssistant", "simple future using the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import toolkit_object from", "for Qt, not #: for wxPython. To run this unit test, you'll need", "stop, and wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future", "license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using", "Thanks for using Enthought open source! \"\"\" Example of testing a simple future", "timeout for blocking calls, in seconds. A successful test should #: never hit", "5) # Wait for the future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT )", "the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt #", "test from hanging #: forever and blocking the rest of the test suite.", "unit test, you'll need PyQt or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class", "traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout for blocking calls, in seconds. A", "5.0 #: Note that the GuiTestAssistant is currently only available for Qt, not", "# Wait for the future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result,", "BSD # license included in LICENSE.txt and may be redistributed only under #", "GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): # Request the executor to stop, and", "Request the executor to stop, and wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT)", "may be redistributed only under # the conditions described in the aforementioned license.", "The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for", "def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): # Request the executor to", "also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source!", "for wxPython. To run this unit test, you'll need PyQt or PySide 2", "GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import toolkit_object from traits_futures.api import submit_call, TraitsExecutor", "from traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout for blocking calls, in seconds.", "there to prevent a failing test from hanging #: forever and blocking the", "= submit_call(self.traits_executor, pow, 3, 5) # Wait for the future to complete. self.assertEventuallyTrueInGui(", "GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def", "2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor =", "# This software is provided without warranty under the terms of the BSD", "# # Thanks for using Enthought open source! \"\"\" Example of testing a", "provided without warranty under the terms of the BSD # license included in", "using the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import toolkit_object from traits_futures.api import", "testing a simple future using the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import", "conditions described in the aforementioned license. The license # is also available online", "from pyface.toolkit import toolkit_object from traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout for", "wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor,", "for blocking calls, in seconds. A successful test should #: never hit this", "= TraitsExecutor() def tearDown(self): # Request the executor to stop, and wait for", "Example of testing a simple future using the GuiTestAssistant. \"\"\" import unittest from", "currently only available for Qt, not #: for wxPython. To run this unit", "Wait for the future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result, 243)", "to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result, 243) if __name__ == \"__main__\":", "This software is provided without warranty under the terms of the BSD #", "at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! \"\"\" Example of", "#: Maximum timeout for blocking calls, in seconds. A successful test should #:", "in seconds. A successful test should #: never hit this timeout - it's", "available for Qt, not #: for wxPython. To run this unit test, you'll", "pyface.toolkit import toolkit_object from traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout for blocking", "import submit_call, TraitsExecutor #: Maximum timeout for blocking calls, in seconds. A successful", "future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result, 243) if __name__ ==", "#: forever and blocking the rest of the test suite. SAFETY_TIMEOUT = 5.0", "def tearDown(self): # Request the executor to stop, and wait for that stop", "is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open", "the rest of the test suite. SAFETY_TIMEOUT = 5.0 #: Note that the", "TraitsExecutor #: Maximum timeout for blocking calls, in seconds. A successful test should", "you'll need PyQt or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase):", "and may be redistributed only under # the conditions described in the aforementioned", "future = submit_call(self.traits_executor, pow, 3, 5) # Wait for the future to complete.", "this timeout - it's there to prevent a failing test from hanging #:", "under # the conditions described in the aforementioned license. The license # is", "for using Enthought open source! \"\"\" Example of testing a simple future using", "described in the aforementioned license. The license # is also available online at", "# All rights reserved. # # This software is provided without warranty under", "software is provided without warranty under the terms of the BSD # license", "Copyright 2018-2021 Enthought, Inc., Austin, TX # All rights reserved. # # This", "not #: for wxPython. To run this unit test, you'll need PyQt or", "Enthought, Inc., Austin, TX # All rights reserved. # # This software is", "PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor", "warranty under the terms of the BSD # license included in LICENSE.txt and", "in LICENSE.txt and may be redistributed only under # the conditions described in", "# Request the executor to stop, and wait for that stop to complete.", "- it's there to prevent a failing test from hanging #: forever and", "or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self)", "a simple future using the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import toolkit_object", "#: Note that the GuiTestAssistant is currently only available for Qt, not #:", "the executor to stop, and wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self)", "setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): # Request the executor to stop,", "available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! \"\"\"", "in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt", "Inc., Austin, TX # All rights reserved. # # This software is provided", "# Thanks for using Enthought open source! \"\"\" Example of testing a simple", "self.traits_executor = TraitsExecutor() def tearDown(self): # Request the executor to stop, and wait", "# (C) Copyright 2018-2021 Enthought, Inc., Austin, TX # All rights reserved. #", "unittest from pyface.toolkit import toolkit_object from traits_futures.api import submit_call, TraitsExecutor #: Maximum timeout", "complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5) # Wait", "# is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought", "open source! \"\"\" Example of testing a simple future using the GuiTestAssistant. \"\"\"", "def test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5) # Wait for the future", "#: never hit this timeout - it's there to prevent a failing test", "complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result, 243) if __name__ == \"__main__\": unittest.main()", "the GuiTestAssistant is currently only available for Qt, not #: for wxPython. To", "test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5) # Wait for the future to", "to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5) #", "seconds. A successful test should #: never hit this timeout - it's there", "and blocking the rest of the test suite. SAFETY_TIMEOUT = 5.0 #: Note", "under the terms of the BSD # license included in LICENSE.txt and may", "license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks", "forever and blocking the rest of the test suite. SAFETY_TIMEOUT = 5.0 #:", "http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! \"\"\" Example of testing", "that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow, 3,", "(C) Copyright 2018-2021 Enthought, Inc., Austin, TX # All rights reserved. # #", "Austin, TX # All rights reserved. # # This software is provided without", "failing test from hanging #: forever and blocking the rest of the test", "test, you'll need PyQt or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant,", "of testing a simple future using the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit", "executor to stop, and wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def", "to stop, and wait for that stop to complete. self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT) GuiTestAssistant.tearDown(self) def test_my_future(self):", "future using the GuiTestAssistant. \"\"\" import unittest from pyface.toolkit import toolkit_object from traits_futures.api", "2018-2021 Enthought, Inc., Austin, TX # All rights reserved. # # This software", "for the future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT ) self.assertEqual(future.result, 243) if", "test suite. SAFETY_TIMEOUT = 5.0 #: Note that the GuiTestAssistant is currently only", "run this unit test, you'll need PyQt or PySide 2 installed. GuiTestAssistant =", "a failing test from hanging #: forever and blocking the rest of the", "included in LICENSE.txt and may be redistributed only under # the conditions described", "SAFETY_TIMEOUT = 5.0 #: Note that the GuiTestAssistant is currently only available for", "should #: never hit this timeout - it's there to prevent a failing", "TraitsExecutor() def tearDown(self): # Request the executor to stop, and wait for that", "GuiTestAssistant.tearDown(self) def test_my_future(self): future = submit_call(self.traits_executor, pow, 3, 5) # Wait for the", "class TestMyFuture(GuiTestAssistant, unittest.TestCase): def setUp(self): GuiTestAssistant.setUp(self) self.traits_executor = TraitsExecutor() def tearDown(self): # Request", "3, 5) # Wait for the future to complete. self.assertEventuallyTrueInGui( lambda: future.done, timeout=SAFETY_TIMEOUT", "the test suite. SAFETY_TIMEOUT = 5.0 #: Note that the GuiTestAssistant is currently", "pow, 3, 5) # Wait for the future to complete. self.assertEventuallyTrueInGui( lambda: future.done,", "need PyQt or PySide 2 installed. GuiTestAssistant = toolkit_object(\"util.gui_test_assistant:GuiTestAssistant\") class TestMyFuture(GuiTestAssistant, unittest.TestCase): def", "the terms of the BSD # license included in LICENSE.txt and may be", "of the BSD # license included in LICENSE.txt and may be redistributed only", "rights reserved. # # This software is provided without warranty under the terms", "LICENSE.txt and may be redistributed only under # the conditions described in the", "timeout - it's there to prevent a failing test from hanging #: forever", "A successful test should #: never hit this timeout - it's there to", "test should #: never hit this timeout - it's there to prevent a", "redistributed only under # the conditions described in the aforementioned license. The license", "= 5.0 #: Note that the GuiTestAssistant is currently only available for Qt,", "Maximum timeout for blocking calls, in seconds. A successful test should #: never" ]
[ "import re CCNUM = re.compile(r'(?!.*(\\d)(?:\\D?\\1){3})[456]\\d{3}(-?)(?:\\d{4}\\2){2}\\d{4}') for _ in range(int(input())): print('Valid' if CCNUM.fullmatch(input().strip()) else", "python3 import re CCNUM = re.compile(r'(?!.*(\\d)(?:\\D?\\1){3})[456]\\d{3}(-?)(?:\\d{4}\\2){2}\\d{4}') for _ in range(int(input())): print('Valid' if CCNUM.fullmatch(input().strip())", "#!/usr/bin/env python3 import re CCNUM = re.compile(r'(?!.*(\\d)(?:\\D?\\1){3})[456]\\d{3}(-?)(?:\\d{4}\\2){2}\\d{4}') for _ in range(int(input())): print('Valid' if", "re CCNUM = re.compile(r'(?!.*(\\d)(?:\\D?\\1){3})[456]\\d{3}(-?)(?:\\d{4}\\2){2}\\d{4}') for _ in range(int(input())): print('Valid' if CCNUM.fullmatch(input().strip()) else 'Invalid')" ]
[ "Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response)", "Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name == \"Indian\" assert business.categories[0].alias == \"indpak\"", "json.load(biz) business = Business(response) assert business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as", "= Business(response) assert business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response", "import Business from yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response", "response = json.load(biz) business = Business(response) assert business.id == response['id'] def test_business_category_is_tuple(): with", "def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert", "response = json.load(biz) business = Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name ==", "<filename>tests/obj/business_test.py # -*- coding: UTF-8 -*- import io import json from tests.testing import", "from tests.testing import resource_filename from yelp.obj.business import Business from yelp.obj.business import Category def", "UTF-8 -*- import io import json from tests.testing import resource_filename from yelp.obj.business import", "yelp.obj.business import Business from yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz:", "yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business", "assert business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz)", "test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert business.id", "import json from tests.testing import resource_filename from yelp.obj.business import Business from yelp.obj.business import", "import resource_filename from yelp.obj.business import Business from yelp.obj.business import Category def test_init_business(): with", "resource_filename from yelp.obj.business import Business from yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json'))", "Business from yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response =", "business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business", "test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert type(business.categories[0])", "response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response)", "-*- coding: UTF-8 -*- import io import json from tests.testing import resource_filename from", "= json.load(biz) business = Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name == \"Indian\"", "def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert", "json from tests.testing import resource_filename from yelp.obj.business import Business from yelp.obj.business import Category", "biz: response = json.load(biz) business = Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name", "json.load(biz) business = Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name == \"Indian\" assert", "coding: UTF-8 -*- import io import json from tests.testing import resource_filename from yelp.obj.business", "import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business =", "from yelp.obj.business import Business from yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as", "= Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name == \"Indian\" assert business.categories[0].alias ==", "as biz: response = json.load(biz) business = Business(response) assert business.id == response['id'] def", "with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert type(business.categories[0]) is", "io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert type(business.categories[0]) is Category", "# -*- coding: UTF-8 -*- import io import json from tests.testing import resource_filename", "io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert business.id == response['id']", "Business(response) assert business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response =", "business = Business(response) assert type(business.categories[0]) is Category assert business.categories[0].name == \"Indian\" assert business.categories[0].alias", "from yelp.obj.business import Category def test_init_business(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz)", "import io import json from tests.testing import resource_filename from yelp.obj.business import Business from", "with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business = Business(response) assert business.id ==", "as biz: response = json.load(biz) business = Business(response) assert type(business.categories[0]) is Category assert", "biz: response = json.load(biz) business = Business(response) assert business.id == response['id'] def test_business_category_is_tuple():", "business = Business(response) assert business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz:", "== response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json')) as biz: response = json.load(biz) business =", "io import json from tests.testing import resource_filename from yelp.obj.business import Business from yelp.obj.business", "-*- import io import json from tests.testing import resource_filename from yelp.obj.business import Business", "= json.load(biz) business = Business(response) assert business.id == response['id'] def test_business_category_is_tuple(): with io.open(resource_filename('json/business_response.json'))", "tests.testing import resource_filename from yelp.obj.business import Business from yelp.obj.business import Category def test_init_business():" ]
[ "1 if self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1) if", "1)] self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for", "val): pass def affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem", "self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\" return [(pos[i]", "1) if self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1]", "!= cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for", "if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy() for", "= [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in (0, 1)]", "[[[None for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _ in Categories]", "self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection", "image = None if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj", "== pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif", "que les photos existent (pas de coquille dans le nom) for AttrName, champ", "' + val[2] # Marque la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette)", "images=[selection]) else: sel = selection(pos=None) sel.efface() return sel def EditFields(item): return sorted([AttrName for", "= self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -= 1", "e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1", "= self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\" return [(pos[i] -", "for AttrName, champ in self.champs: if AttrName == 'photos_': for val in champ.valeur:", "liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]]", "index ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)]", "0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION:", "- self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)] def pos_pour_index(self, index): \"\"\"", "= self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i] %=", "len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection =", "return [item for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']]", "val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' : ' + val[2] #", "if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface() return", "[(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)] def pos_pour_index(self, index):", "elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -=", "val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return val[0] @valeur.setter def valeur(self,", "def pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\" return [index[i] * self.dim_vignette[i] +", "elem_name for cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] =", "not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les photos existent (pas", "i in (0, 1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is", "range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def emplit_grille(self): for", "pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val:", "self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)] coin_HG", "emplit_grille(self): for cat_index, categorie in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li =", "self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index, _categorie", "if self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType", "None RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire)", "GetElements(): Elems = Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems", "Elements, Categories from . import elems from . import media import os import", "os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy()", "categorie, elem) else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else:", "self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' : '", "for AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self):", "Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for item in os.listdir(vrac_dir)", "self.dim_vignette[i] for i in (0, 1)] def pos_pour_index(self, index): \"\"\" index col, index", "%= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str):", "self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in", "in champ.valeur: try: media.charge_image('photos/' + val) except: print('manque la photo', val) menu.EditeurElem.alafin(self) def", "if val is not None: return val[0] @valeur.setter def valeur(self, val): pass def", "in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)] self.grilles", "i in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)]", "0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear() if selection:", "= 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in", "for liIndex, elem in enumerate(col): if elem: elem, image, _nom = elem if", "else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image =", "les photos existent (pas de coquille dans le nom) for AttrName, champ in", "is not None: return val[0] @valeur.setter def valeur(self, val): pass def affiche(self, surface):", "// self.dim_vignette[i] for i in (0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] *", "= elem if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType]", "existent (pas de coquille dans le nom) for AttrName, champ in self.champs: if", "for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if elem: elem,", "else: sel = selection(pos=None) sel.efface() return sel def EditFields(item): return sorted([AttrName for AttrName", "for i in (0, 1)] def pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\"", "= [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)] coin_HG =", "str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface() return sel def", "Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs):", "import os import pygame from . import menu _Selection = None RepertoiresVrac =", "val is not None: return val[0] @valeur.setter def valeur(self, val): pass def affiche(self,", "== pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if", "elif e.type == pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key", "+ val[2] # Marque la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface,", "os.path.join(media.MEDIA_REP, repertoire) return [item for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif',", "import media import os import pygame from . import menu _Selection = None", "if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0]", "surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if elem:", "index col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in", "def emplit_grille(self): for cat_index, categorie in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li", "for i in (0, 1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val", "for i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle()", "Categories from . import elems from . import media import os import pygame", "pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0:", "- self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in (0, 1)] self.coin_HG =", "import elems from . import media import os import pygame from . import", "32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0, 16 self.nomb_vignettes", "Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0]", "pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1]", "= [0, 0] self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0", "enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if elem: elem, image, _nom = elem", "# Marque la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255,", "def index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) //", "image_obj = None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index, _categorie in enumerate(Categories):", "Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie in enumerate(Categories): for i, elem in", "\"\"\" index col, index ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i", "self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e):", "self.emplit_grille() def emplit_grille(self): for cat_index, categorie in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]):", "for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def GetElements():", "import Elements, Categories from . import elems from . import media import os", "i in (0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2", "32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0, 16 self.nomb_vignettes =", "(0, 1)] self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])]", "if selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None)", "isinstance(elem, str): elem_name = elem image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name =", "Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements = GetElements() class Palette(menu.ElemInterface):", "= elem.nomImages[0] else: image = None if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image),", "cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def", "+= ' : ' + val[2] # Marque la selection d'une ombre Rect", "pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key", "None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index, _categorie in enumerate(Categories): for CatIndex2,", "from . import menu _Selection = None RepertoiresVrac = ['decors Vrac', 'Cap Vrac']", "- self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)] coin_HG = [(self.dim_ecran[i] -", "image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] = elem, image_obj, elem_name", "None: return val[0] @valeur.setter def valeur(self, val): pass def affiche(self, surface): for colIndex,", "str): elem_name = elem image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__", "- 1) if self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else:", "champ in self.champs: if AttrName == 'photos_': for val in champ.valeur: try: media.charge_image('photos/'", "= os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image =", "menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur", "_ in Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie in enumerate(Categories): for i,", "Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for item", "e.type == pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key ==", "in Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie in enumerate(Categories): for i, elem", "self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key ==", "['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for", "in (0, 1)] self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for _ in", "in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def emplit_grille(self):", "self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif", "from . import elems from . import media import os import pygame from", "in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems =", "self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)] self.grilles = [[[None for", "for i in (0, 1)] self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for", "Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos =", "self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType !=", "e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1", "@property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return val[0]", "ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)] def", "Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] = elem, image_obj,", "def GetElements(): Elems = Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return", "self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)] def pos_pour_index(self, index): \"\"\" index", "de coquille dans le nom) for AttrName, champ in self.champs: if AttrName ==", "pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key", "elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface() return sel def EditFields(item): return sorted([AttrName", "0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in (0,", "1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0]", "isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface() return sel", "e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN:", "== 'photos_': for val in champ.valeur: try: media.charge_image('photos/' + val) except: print('manque la", "255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type ==", "[item for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def", "(0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i", "def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return val[0] @valeur.setter", "@valeur.setter def valeur(self, val): pass def affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]):", "item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy() for Repertoire", "= pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def", "import menu _Selection = None RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire):", "= os.path.join(media.MEDIA_REP, repertoire) return [item for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png',", "self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie", "self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i]", "coin_HG[i]) for i in (0, 1)] self.grilles = [[[None for _ in range(self.nomb_vignettes[1])]", "pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories)", "self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i", "d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1)", "= [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)] self.grilles = [[[None for _", "photos existent (pas de coquille dans le nom) for AttrName, champ in self.champs:", "self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i]", "else: image = None if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else:", "in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos):", "pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key", "pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index,", "'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] =", "1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0]", "etiquette) def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection =", "= elem, image_obj, elem_name for cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie in", "selection = Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection])", "_categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col, index", "in ['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy() for Repertoire in", "0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType != cat_index: self.IndexType", "val: etiquette += ' : ' + val[2] # Marque la selection d'une", "= GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection", "col = 1 + i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem", "sel = elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface() return sel def EditFields(item):", "-= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0:", "if e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] +=", "[index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)] @property def valeur(self):", "== 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType != cat_index:", "elem) else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image", "photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] +", "pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self,", "Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements = GetElements()", "coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in (0,", "else: image_obj = None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index, _categorie in", "+ val) except: print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs", "= i % self.nomb_vignettes[1] col = 1 + i // self.nomb_vignettes[1] if isinstance(elem,", "= pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for", "Selecte(): selection = Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None,", "'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs, filtre_=True) editeur.boucle() pygame.event.clear() return", "== pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]),", "== pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN:", "elem.nomImages[0] else: image = None if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette)", "self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte():", "in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col = 1 + i //", "self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]])", "= dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs, filtre_=True) editeur.boucle()", "dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs, filtre_=True) editeur.boucle() pygame.event.clear()", "le nom) for AttrName, champ in self.champs: if AttrName == 'photos_': for val", "= None self.grilles[cat_index][col][li] = elem, image_obj, elem_name for cat_index, _categorie in enumerate(Categories): for", "enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\" return", "image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] !=", "selection(pos=None) sel.efface() return sel def EditFields(item): return sorted([AttrName for AttrName in dir(item) if", "(pas de coquille dans le nom) for AttrName, champ in self.champs: if AttrName", "col, index ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in (0,", "sel def EditFields(item): return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and not", "in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col", "ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs)", "pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0]", "i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem image = os.path.join(media.MEDIA_REP, categorie,", "None if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None", "self.coin_HG[i] for i in (0, 1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if", "for cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0]", "item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems", "= elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface() return sel def EditFields(item): return", ".elems import Elements, Categories from . import elems from . import media import", "self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear() if selection: if isinstance(selection,", "self.nomb_vignettes[1] col = 1 + i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name =", "if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10,", "= Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements =", "if self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %=", "1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return", "surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if", "for val in champ.valeur: try: media.charge_image('photos/' + val) except: print('manque la photo', val)", "media import os import pygame from . import menu _Selection = None RepertoiresVrac", "AttrName == 'photos_': for val in champ.valeur: try: media.charge_image('photos/' + val) except: print('manque", "repertoire) return [item for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg',", ". import media import os import pygame from . import menu _Selection =", "in (0, 1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not", "min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1]", "= 1 + i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem image", "0] self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG =", "sel.efface() return sel def EditFields(item): return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_')", "Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette)", "<reponame>Guillaume227/supercoco<gh_stars>1-10 from .elems import Elements, Categories from . import elems from . import", "pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type", "self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key ==", "1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)] self.grilles = [[[None", "col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if elem: elem, image, _nom", "and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les photos existent", "= min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType != cat_index: self.IndexType = cat_index", "else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def", "self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i])", "def EditFields(item): return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')])", "(0, 1)] def pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\" return [index[i] *", "EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les photos existent (pas de coquille dans", "EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs,", "self.dim_vignette[i] for i in (0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i])", "+ self.coin_HG[i] for i in (0, 1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]]", "* self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)] @property def valeur(self): val", "image_obj, elem_name for cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2]", "selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect,", "// 2 for i in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i", "= [[[None for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _ in", "\"\"\" index col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i", "for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index", "0 self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for", "[(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)] coin_HG = [(self.dim_ecran[i]", "[max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)] self.grilles = [[[None for _ in", "= Categories[self.IndexType] if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette +=", "if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' :", "in self.champs: if AttrName == 'photos_': for val in champ.valeur: try: media.charge_image('photos/' +", "enumerate(col): if elem: elem, image, _nom = elem if image: pos = self.pos_pour_index((colIndex,", "EditFields(item): return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class", "+= 1 elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT:", "in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear() if", "def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette", ". import elems from . import media import os import pygame from .", "col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in (0,", "= cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0,", "elems from . import media import os import pygame from . import menu", "= Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else:", "self.dim_vignette[i]) // 2 for i in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for", "def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for item in os.listdir(vrac_dir) if", "in (0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for", "pygame.event.clear() if selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel =", "if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les", "!= 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' : ' +", "cat_index, categorie in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li = i %", "self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0: val =", "if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type ==", "surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs, filtre_=True) editeur.boucle() pygame.event.clear() return editeur.modifie", "if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image = None if image: #", "== pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif", "2 for i in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in", "1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos()", "def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos)", "return Elems ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0),", "val) except: print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs =", "= selection(pos=None) sel.efface() return sel def EditFields(item): return sorted([AttrName for AttrName in dir(item)", "= elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image = None if", "return val[0] @valeur.setter def valeur(self, val): pass def affiche(self, surface): for colIndex, col", "cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i", "liIndex, elem in enumerate(col): if elem: elem, image, _nom = elem if image:", "_nom = elem if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette =", "# Verifie que les photos existent (pas de coquille dans le nom) for", "for cat_index, categorie in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li = i", "selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel = selection(pos=None) sel.efface()", "self.IndexType = 0 self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) //", "i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear()", "% self.nomb_vignettes[1] col = 1 + i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name", "categorie in enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1]", "if val: etiquette += ' : ' + val[2] # Marque la selection", "mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif", "1 + i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem image =", "choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs, filtre_=True)", "from . import media import os import pygame from . import menu _Selection", "CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col,", "image = elem.nomImages[0] else: image = None if image: # Retaille image_obj =", "try: media.charge_image('photos/' + val) except: print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if", "in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\"", "pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if", "for _ in Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie in enumerate(Categories): for", "pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key ==", "= None if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj =", "+ i // self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem image = os.path.join(media.MEDIA_REP,", "self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i]", "print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'],", "self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i]", "self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] ==", "_Selection = None RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir =", "AttrName, champ in self.champs: if AttrName == 'photos_': for val in champ.valeur: try:", "in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie in", "[(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in (0, 1)] self.coin_HG", "def alafin(self): # Verifie que les photos existent (pas de coquille dans le", "return sel def EditFields(item): return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and", "index): \"\"\" index col, index ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for", "from .elems import Elements, Categories from . import elems from . import media", "os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0]", "RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self,", "(0, 1)] @property def valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None:", "dans le nom) for AttrName, champ in self.champs: if AttrName == 'photos_': for", "in (0, 1)] def pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\" return [index[i]", "image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] =", "image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image", "= None RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP,", "menu _Selection = None RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir", "= self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' : ' + val[2] # Marque", "if elem: elem, image, _nom = elem if image: pos = self.pos_pour_index((colIndex, liIndex))", "vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for item in os.listdir(vrac_dir) if item.split('.')[-1] in", "index col, index ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in", "elem in enumerate(col): if elem: elem, image, _nom = elem if image: pos", "= elem image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__ if hasattr(elem,", "'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for item in", "etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette", "Elems ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150,", "self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) -", "' : ' + val[2] # Marque la selection d'une ombre Rect =", "etiquette += ' : ' + val[2] # Marque la selection d'une ombre", "= 0 self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i]", "valeur(self): val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return val[0] @valeur.setter def", "pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1]", "return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)] def pos_pour_index(self,", "self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in (0, 1)] self.coin_HG = [max(self.marge_HG[i],", "self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str): sel", "class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les photos existent (pas de coquille", "elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image = None if image:", "Marque la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0,", "(0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)] self.grilles =", "self.IndexSelection[1]), len(Categories) - 1) if self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1] =", "'nomImages'): image = elem.nomImages[0] else: image = None if image: # Retaille image_obj", "ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)] @property", "100), Rect, 1) self.affiche_ligne(surface, etiquette) def mettre_a_jour(self, e): if e.type == pygame.MOUSEMOTION: pos", "0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette = 32, 32 self.dim_ecran =", "_ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def", ": ' + val[2] # Marque la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection),", "e): if e.type == pygame.MOUSEMOTION: pos = pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type", "elem in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col = 1 + i", "sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def", "self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return val[0] @valeur.setter def valeur(self, val): pass", "elem if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if", "for i in (0, 1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) //", "i, elem in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col = 1 +", "self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key == pygame.K_UP: self.IndexSelection[1] -=", "valeur(self, val): pass def affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex,", "// self.dim_vignette[i] for i in (0, 1)] def pos_pour_index(self, index): \"\"\" index col,", "1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection = Palette().boucle() pygame.event.clear() if selection: if", "= 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0, 16", "pass def affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in", "hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image = None if image: # Retaille", "self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)] @property def valeur(self): val =", "in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if elem: elem, image, _nom =", "= pygame.mouse.get_pos() self.IndexSelection = self.index_pour_pos(pos) elif e.type == pygame.KEYDOWN: if e.key == pygame.K_UP:", "la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None]", "// self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem image = os.path.join(media.MEDIA_REP, categorie, elem)", "cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1) if self.IndexType != cat_index: self.IndexType =", "-= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1 elif e.key == pygame.K_LEFT:", "pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\" return [index[i] * self.dim_vignette[i] + self.coin_HG[i]", "= pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] -", "elem: elem, image, _nom = elem if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image,", "len(Categories) - 1) if self.IndexType != cat_index: self.IndexType = cat_index self.IndexSelection[1] = self.IndexType", "champ.valeur: try: media.charge_image('photos/' + val) except: print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items):", "if image: # Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li]", "AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les photos existent (pas de", "[0, 0] self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG", "elem_name = elem.__name__ if hasattr(elem, 'nomImages'): image = elem.nomImages[0] else: image = None", "DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item for item in os.listdir(vrac_dir) if item.split('.')[-1]", "for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements = GetElements() class", "* self.dim_vignette[i]) // 2 for i in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i])", "class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0,", "for i in (0, 1)] self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0,", "= self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val is not None: return val[0] @valeur.setter def valeur(self, val):", "elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0: cat_index =", "i in (0, 1)] self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for _", "+= 1 if self.IndexSelection[0] == 0: cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1)", "self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _", "elem_name = elem image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__ if", "return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)] @property def", "= self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette = Categories[self.IndexType] if self.IndexSelection[0] != 0: val", "= ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return [item", "pos): \"\"\" index col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for", "== pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if", "in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie", "self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' : ' + val[2] # Marque la", "alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size()", "for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille()", "self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0,", "index_pour_pos(self, pos): \"\"\" index col, index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i]", "val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises'])", "Elems = Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements", "media.charge_image('photos/' + val) except: print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]):", "GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection =", "i in (0, 1)] def pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\" return", "cat_index self.IndexSelection[1] = self.IndexType else: self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]]) for i in 0, 1:", "val in champ.valeur: try: media.charge_image('photos/' + val) except: print('manque la photo', val) menu.EditeurElem.alafin(self)", "self.nomb_vignettes[1] if isinstance(elem, str): elem_name = elem image = os.path.join(media.MEDIA_REP, categorie, elem) else:", "affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if", "enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self, pos): \"\"\"", "import pygame from . import menu _Selection = None RepertoiresVrac = ['decors Vrac',", "'gif', 'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire]", "pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette = 32, 32 self.dim_ecran", "la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100),", "0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += ' : ' + val[2]", "alafin(self): # Verifie que les photos existent (pas de coquille dans le nom)", "menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette = 32, 32", "**kwargs) self.IndexSelection = [0, 0] self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType", "coquille dans le nom) for AttrName, champ in self.champs: if AttrName == 'photos_':", "AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): #", "def valeur(self, val): pass def affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for", "i % self.nomb_vignettes[1] col = 1 + i // self.nomb_vignettes[1] if isinstance(elem, str):", "val[0] @valeur.setter def valeur(self, val): pass def affiche(self, surface): for colIndex, col in", "nom) for AttrName, champ in self.champs: if AttrName == 'photos_': for val in", "li = i % self.nomb_vignettes[1] col = 1 + i // self.nomb_vignettes[1] if", "if AttrName == 'photos_': for val in champ.valeur: try: media.charge_image('photos/' + val) except:", "elif e.key == pygame.K_LEFT: self.IndexSelection[0] -= 1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] +=", "'jpg']] def GetElements(): Elems = Elements.copy() for Repertoire in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire)", "RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def DecorsVrac(repertoire): vrac_dir = os.path.join(media.MEDIA_REP, repertoire) return", "enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col = 1 + i // self.nomb_vignettes[1]", "Categories[self.IndexType] if self.IndexSelection[0] != 0: val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]] if val: etiquette += '", "self.champs: if AttrName == 'photos_': for val in champ.valeur: try: media.charge_image('photos/' + val)", "_ in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie", "not None: return val[0] @valeur.setter def valeur(self, val): pass def affiche(self, surface): for", "colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col): if elem: elem, image,", "elem, image, _nom = elem if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos)", "'photos_': for val in champ.valeur: try: media.charge_image('photos/' + val) except: print('manque la photo',", "return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem):", "1)] def pos_pour_index(self, index): \"\"\" index col, index ligne\"\"\" return [index[i] * self.dim_vignette[i]", "for i, elem in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col = 1", "enumerate(Categories): for i, elem in enumerate(ListElements[categorie]): li = i % self.nomb_vignettes[1] col =", "in enumerate(col): if elem: elem, image, _nom = elem if image: pos =", "Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str): sel = elems.Dessinable(pos=None, images=[selection]) else: sel", "image, _nom = elem if image: pos = self.pos_pour_index((colIndex, liIndex)) surface.blit(image, pos) etiquette", "pygame from . import menu _Selection = None RepertoiresVrac = ['decors Vrac', 'Cap", "dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que", "# Retaille image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette) else: image_obj = None self.grilles[cat_index][col][li] = elem,", "pygame.display.get_surface().get_size() self.IndexType = 0 self.marge_HG = 0, 16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i])", "self.IndexSelection = [0, 0] self.dim_vignette = 32, 32 self.dim_ecran = pygame.display.get_surface().get_size() self.IndexType =", "e.key == pygame.K_UP: self.IndexSelection[1] -= 1 elif e.key == pygame.K_DOWN: self.IndexSelection[1] += 1", "1 elif e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0: cat_index", "_categorie in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories): self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0] def index_pour_pos(self,", "AttrName.endswith('_') and not AttrName.startswith('_')]) class EditeurElem(menu.EditeurElem): def alafin(self): # Verifie que les photos", "for _ in range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def emplit_grille(self): for cat_index,", "sel = selection(pos=None) sel.efface() return sel def EditFields(item): return sorted([AttrName for AttrName in", "os import pygame from . import menu _Selection = None RepertoiresVrac = ['decors", ". import menu _Selection = None RepertoiresVrac = ['decors Vrac', 'Cap Vrac'] def", "e.key == pygame.K_RIGHT: self.IndexSelection[0] += 1 if self.IndexSelection[0] == 0: cat_index = min(max(0,", "def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur =", "val[2] # Marque la selection d'une ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0,", "def affiche(self, surface): for colIndex, col in enumerate(self.grilles[self.IndexType]): for liIndex, elem in enumerate(col):", "16 self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)]", "%= len(ListElements[Categories[self.IndexType]]) for i in 0, 1: self.IndexSelection[i] %= self.nomb_vignettes[i] def Selecte(): selection", "def Selecte(): selection = Palette().boucle() pygame.event.clear() if selection: if isinstance(selection, str): sel =", "index ligne\"\"\" return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)]", "ombre Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette) pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1) self.affiche_ligne(surface,", "__init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette =", "elem image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name = elem.__name__ if hasattr(elem, 'nomImages'):", "except: print('manque la photo', val) menu.EditeurElem.alafin(self) def Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco',", "range(self.nomb_vignettes[0])] for _ in Categories] self.emplit_grille() def emplit_grille(self): for cat_index, categorie in enumerate(Categories):", "Editor(*items): if EditFields(items[0]): choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises']) editeur = EditeurElem(items,", "if isinstance(elem, str): elem_name = elem image = os.path.join(media.MEDIA_REP, categorie, elem) else: elem_name", "1)] coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in", "DecorsVrac(Repertoire) return Elems ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self, pos=(0,", "Verifie que les photos existent (pas de coquille dans le nom) for AttrName,", "elem, image_obj, elem_name for cat_index, _categorie in enumerate(Categories): for CatIndex2, _categorie in enumerate(Categories):", "['png', 'gif', 'jpeg', 'jpg']] def GetElements(): Elems = Elements.copy() for Repertoire in RepertoiresVrac:", "= DecorsVrac(Repertoire) return Elems ListElements = GetElements() class Palette(menu.ElemInterface): def __init__(self, **kwargs): menu.ElemInterface.__init__(self,", "in RepertoiresVrac: Elems[Repertoire] = DecorsVrac(Repertoire) return Elems ListElements = GetElements() class Palette(menu.ElemInterface): def", "**kwargs): menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs) self.IndexSelection = [0, 0] self.dim_vignette = 32," ]
[ "paired = False addr = None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 =", "0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True)", "x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00)", "104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main():", "joy_pos = None paired = False addr = None data = None setScreenColor(0x000000)", "pass def receive_msg(_): global addr, data, paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data))", "titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data, paired addr, _, data", "timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos =", "is this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired = True label4.setText(str('paired'))", "\"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2", "M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def", "label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\",", "titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg)", "lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 =", "* from m5ui import * import espnow import wifiCfg import hat joy_pos =", "== False: #TODO: check if is this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1,", "receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_):", "import * from m5ui import * import espnow import wifiCfg import hat joy_pos", "if paired == False: #TODO: check if is this a mac address? espnow.add_peer(str(data),", "m5ui import * import espnow import wifiCfg import hat joy_pos = None paired", "True: #TODO: Add msg type code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass", "paired == False: #TODO: check if is this a mac address? espnow.add_peer(str(data), id=1)", "48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0)", "\"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3", "timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1),", "hat joy_pos = None paired = False addr = None data = None", "None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48,", "90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0)", "m5stack import * from m5ui import * import espnow import wifiCfg import hat", "rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104,", "data, paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO:", "rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init()", "= hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22,", "def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2]))", "type code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv", "setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0)", "rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76,", "label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\",", "label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add msg type code, and check at", "= None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default,", "global addr, data, paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired ==", "label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add msg type code,", "def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data, paired addr,", "lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 =", "label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\",", "_, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO: check if is", "= M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b)", "[hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO:", "M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF,", "hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 =", "rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3,", "label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\",", "at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def", "paired == True: #TODO: Add msg type code, and check at receiver. espnow.send(id=1,", "76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0)", "\"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar", "= M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default,", "espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global", "wifiCfg import hat joy_pos = None paired = False addr = None data", "def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00)", "0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\",", "mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired = True label4.setText(str('paired')) pass else: pass", "joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired", "== True: #TODO: Add msg type code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos))", "M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF,", "1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)]", "addr = None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 =", "global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if", "False addr = None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0", "wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition():", "if is this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired = True", "a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired = True label4.setText(str('paired')) pass else:", "0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22,", "* import espnow import wifiCfg import hat joy_pos = None paired = False", "this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired = True label4.setText(str('paired')) pass", "addr, data, paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False:", "0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22,", "import espnow import wifiCfg import hat joy_pos = None paired = False addr", "main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition')", "check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass", "tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3]))", "#TODO: check if is this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired", "espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos", "M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF,", "float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data, paired addr, _, data = espnow.recv_data(encoder='str')", "\"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label4", "paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO: check", "axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1", "= espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO: check if is this a", "= False addr = None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC)", "data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\",", "lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 =", "hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add msg type", "espnow import wifiCfg import hat joy_pos = None paired = False addr =", "receive_msg(_): global addr, data, paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired", "= M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default,", "= M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default,", "joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired ==", "code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()),", "bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000,", "import hat joy_pos = None paired = False addr = None data =", "wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global", "hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add", "rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90,", "espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO: check if is this a mac", "False: #TODO: check if is this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected'))", "10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0),", "label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF,", "62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0)", "None paired = False addr = None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0", "= M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label3 = M5TextBox(22, 90, \"Text\", lcd.FONT_Default,", "@timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1]))", "and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent()))))", "%.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data, paired addr, _, data =", "def receive_msg(_): global addr, data, paired addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if", "#TODO: Add msg type code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery')", "from m5stack import * from m5ui import * import espnow import wifiCfg import", "hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def", "addr, _, data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO: check if", "M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22, 76, \"Text\", lcd.FONT_Default, 0xFFFFFF,", "= [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True:", "import * import espnow import wifiCfg import hat joy_pos = None paired =", "None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF,", "if paired == True: #TODO: Add msg type code, and check at receiver.", "espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos", "lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True)", "= None paired = False addr = None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8)", "\"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar = M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff)", "hat.get(hat.JOYC) label0 = M5TextBox(22, 48, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62,", "label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add msg type code, and check", "pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data,", "@timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data, paired", "data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr,", "0x00) timerSch.run('UpdateBattery', 1000, 0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0),", "Add msg type code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def", "0xFFFFFF, rotate=0) label1 = M5TextBox(22, 62, \"Text\", lcd.FONT_Default, 0xFFFFFF, rotate=0) label2 = M5TextBox(22,", "0x00) @timerSch.event('UpdatePosition') def tUpdatePosition(): global joy_pos joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0]))", "lcd.FONT_Default, 0xFFFFFF, rotate=0) label4 = M5TextBox(22, 104, \"Unpaired\", lcd.FONT_Default, 0xFFFFFF, rotate=0) titlebar =", "label4.setText(str(data)) if paired == False: #TODO: check if is this a mac address?", "check if is this a mac address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired =", "M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10,", "= None data = None setScreenColor(0x000000) axp.setLDO2Volt(2.8) hat_joyc0 = hat.get(hat.JOYC) label0 = M5TextBox(22,", "tUpdateBattery(): titlebar.setTitle(str(\"%.1fv %.0fma\"%(float(axp.getBatVoltage()), float(axp.getBatCurrent())))) pass def receive_msg(_): global addr, data, paired addr, _,", "address? espnow.add_peer(str(data), id=1) espnow.send(id=1, data=str('connected')) paired = True label4.setText(str('paired')) pass else: pass main()", "= M5Title(title=\"text\", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition',", "import wifiCfg import hat joy_pos = None paired = False addr = None", "data = espnow.recv_data(encoder='str') label4.setText(str(data)) if paired == False: #TODO: check if is this", "label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add msg type code, and", "fgcolor=0xFFFFFF, bgcolor=0x5b5b5b) def main(): hat_joyc0.SetLedColor(0x3232ff) wifiCfg.wlan_ap.active(True) wifiCfg.wlan_sta.active(True) espnow.init() espnow.recv_cb(receive_msg) timerSch.run('UpdatePosition', 10, 0x00) timerSch.run('UpdateBattery',", "hat_joyc0.GetX(1), hat_joyc0.GetY(1)] label0.setText(str(joy_pos[0])) label1.setText(str(joy_pos[1])) label2.setText(str(joy_pos[2])) label3.setText(str(joy_pos[3])) if paired == True: #TODO: Add msg", "msg type code, and check at receiver. espnow.send(id=1, data=bytes(joy_pos)) pass @timerSch.event('UpdateBattery') def tUpdateBattery():", "from m5ui import * import espnow import wifiCfg import hat joy_pos = None" ]
[ "import unittest from mock import Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class", "LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis", "self.name = 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5)", "attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato", "= LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self):", "LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value = 5 self.name = 'mock'", "def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis", "mock import Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self):", "self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute 'potato'\", context.exception.args) def _callback(self):", "lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute", "with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args) def", "= LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\",", "context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object", "MockObject(object): def __init__(self): self.value = 5 self.name = 'mock' def setUp(self): pass def", "LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args)", "self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self):", "class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value = 5 self.name = 'mock' def", "def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with", "= 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def", "def __init__(self): self.value = 5 self.name = 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self):", "has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as", "TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value = 5 self.name = 'mock' def setUp(self):", "no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context:", "lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def", "def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name,", "context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis =", "5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback)", "test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no", "as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis", "as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute 'potato'\", context.exception.args) def _callback(self): return", "5 self.name = 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value,", "from mock import Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def", "self.value = 5 self.name = 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis =", "import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value = 5 self.name =", "self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with", "test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no", "unittest from mock import Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object):", "pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback)", "'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject'", "self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis =", "__init__(self): self.value = 5 self.name = 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis", "<filename>atlas/foundations_contrib/src/test/helpers/test_lazy_redis.py import unittest from mock import Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase):", "= LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context:", "def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has", "object has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError)", "test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock')", "= LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute 'potato'\",", "= 5 self.name = 'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback)", "class MockObject(object): def __init__(self): self.value = 5 self.name = 'mock' def setUp(self): pass", "LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis", "import Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value", "lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as", "'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object", "with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute 'potato'\", context.exception.args) def", "lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute", "'mock' def setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self):", "test_get_attr_returns_attribute_name(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError)", "from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value = 5", "LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute 'potato'\", context.exception.args)", "Mock from foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value =", "foundations_contrib.helpers.lazy_redis import LazyRedis class TestLazyRedis(unittest.TestCase): class MockObject(object): def __init__(self): self.value = 5 self.name", "lazy_redis.redis self.assertIn(\"'MockObject' object has no attribute 'redis'\", context.exception.args) def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback)", "context: lazy_redis.potato self.assertIn(\"'MockObject' object has no attribute 'potato'\", context.exception.args) def _callback(self): return self.MockObject()", "setUp(self): pass def test_get_attr_returns_attribute_value(self): lazy_redis = LazyRedis(self._callback) self.assertEqual(lazy_redis.value, 5) def test_get_attr_returns_attribute_name(self): lazy_redis =", "def test_get_attr_raises_attribute_error_different_attribute(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.potato self.assertIn(\"'MockObject' object has", "self.assertEqual(lazy_redis.name, 'mock') def test_get_attr_raises_attribute_error(self): lazy_redis = LazyRedis(self._callback) with self.assertRaises(AttributeError) as context: lazy_redis.redis self.assertIn(\"'MockObject'" ]
[ "self.__check_buttons = config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons", "设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width =", "+= 1 def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1]", "= DISABLED try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] =", "def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press", "# 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries", "x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column,", "{self.column}\") self.column += 1 index += 1 self.row += 1 if len(self.__comboxs) !=", "+= 1 index += 1 self.row += 1 if len(self.__entries) != 0: Separator(self,", "创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False,", "column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column", "e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions =", "+= 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name]", "sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) #", "self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL:", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple):", "self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column =", "{self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\")", "self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip()", "self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda", "select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值", "logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if config[buttons] else dict() logger.debug(f\"buttons", "= self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if result else", "button {function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row =", "messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] =", "# 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建", "import List, Dict, Any, Union, Optional from automotive.logger.logger import logger from automotive.core.can.can_service import", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮 **********", "filter_nodes: List[str], common_panel: bool = False, max_line_count: int = None): super().__init__(master) self.can_service =", "CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes #", "名字上以【】区别 \"\"\" # 创建事件单选框 if self.row != 0: self.row += 1 index =", "name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for action in", "show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) #", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self,", "value in self.__buttons.items(): function_name = key text_name = value[TEXT] if index == 0:", "values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index =", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"]", "self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W)", "self.column += 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK # 创建Button对象", "self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1 index +=", "self.column += 1 # ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST #", "1 self.column = 0 else: self.column += 1 # 获取下拉框的名称 values = list(value[VALUES].keys())", "msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as", "if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() #", "values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column}, index", "param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions)", "bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name = param[TEXT]", "= Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] =", "# 0x164 values = [value] for msg_id in values: msg_id = msg_id.strip() #", "\"成功\" if result else \"失败\" exact_message = \"精确\" if exact_search else \"不精确\" message", "action in actions: if len(action) == 2: msg_id, signals = action logger.info(f\"{hex(msg_id)} =", "1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param", "logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count", "# 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column,", "else \"失败\" exact_message = \"精确\" if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\", "# 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column,", "= {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value", "NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL: # 获取signal", "{text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\",", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self):", "sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1 # ********** 创建关闭设备按钮 ********** text_name, show_name", "column = {self.column}, index = {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column,", "- 21:24 # -------------------------------------------------------- import copy from time import sleep from tkinter import", "text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in new_actions: if len(action)", "msg_id.strip() # 处理16进制 if \"x\" in msg_id or \"X\" in msg_id: # 把16进制转换成10进制", "column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name,", "+= 1 index += 1 self.row += 1 if len(self.__comboxs) != 0: Separator(self,", "logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS]", "self.tabs = [] for key, value in tab_configs.items(): logger.info(f\"handle tab {key}\") if key", "+ W, columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分 ********** self.__create_message_check() # **********", "= SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20)", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分 **********", "search_count_text != \"\": search_count = int(search_count_text) else: search_count = None # 获取是否精确查找 index", "self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW', self.exit_root) self.tk.mainloop() def exit_root(self):", "signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if result else \"失败\" exact_message", "输入框按钮配置 self.__entries = config[entries] if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置", "NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0: self.row +=", "bool = False, max_line_count: int = None): super().__init__(master) self.can_service = can_service self.thread_pool =", "= values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\")", "= {self.column}, index = {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W)", "!= 0: self.row += 1 # 创建单选框 index = 0 for key, value", "= param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as", "config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for", "column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件", "params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in new_actions: if len(action) == 2:", "= int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width =", "+= 1 # ********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name]", "button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type ==", "for action in new_actions: if len(action) == 2: msg_id, signals = action for", "+= 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0)", "下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task =", "button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type ==", "Dict, Any, Union, Optional from automotive.logger.logger import logger from automotive.core.can.can_service import CANService from", "= # 开始的行列 self.row = 0 self.column = 0 # 布局显示 self.pack() #", "self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS] if", "float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if", "column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple): text_name,", "CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0]", "Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL", "# 总线丢失按钮 = # 开始的行列 self.row = 0 self.column = 0 # 布局显示", "in tab_configs.items(): logger.info(f\"handle tab {key}\") if key == COMMON: common_panel = True else:", "!= 0: self.row += 1 index = 0 for key, value in self.__thread_buttons.items():", "len(action) == 2: msg_id, signals = action for name, value in signals.items(): if", "from tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk,", "BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]] = None, can_fd: bool = False,", "tab_configs = dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {}, entries: {},", "dict() # 闪烁事件Task self.thread_task = dict() # 总线丢失按钮 = # 开始的行列 self.row =", "class Gui(object): def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] =", "function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values", "精确查找的等选择 :return: \"\"\" self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column,", "== 0: self.row += 1 self.column = 0 else: self.column += 1 logger.debug(f\"row", "1 # 创建单选框 index = 0 for key, value in self.__check_buttons.items(): function_name =", "== 2: msg_id, signals = action for name, value in signals.items(): if value", "CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel: bool = False, max_line_count: int =", "text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 #", "self.row += 1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "@Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import copy from time import sleep from", "in msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16)", "+= 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name =", "self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column,", "True else: common_panel = False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count)", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name]", "+= 1 self.row += 1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "automotive.logger.logger import logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from", "= Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUE", "for key, value in self.__receive_buttons.items(): function_name = key text_name = value[TEXT] if index", "= TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") #", "= Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] =", "e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time =", "1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON]", "lizhe, All rights reserved # -------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe #", "= {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__buttons)", "0: self.column = 0 elif index % self.__max_line_count == 0: self.row += 1", "self.column += 1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column,", "message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL", "# 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars", "= dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons = dict() #", "elif index % self.__max_line_count == 0: self.row += 1 self.column = 0 else:", "# self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮", "{self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1", "thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST,", "\"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row += 1 index", "= \"<Return>\", # 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() #", "filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\")", "创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\"", "finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self, excel_file: str, dbc: str,", "\"\"\" self.column = 0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "self.__receive_buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column =", "# 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width,", "function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def", "\"x\" in msg_id or \"X\" in msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16)", "def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for", "-------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe # @Created: 2021/12/15 - 21:24 #", "0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name]", "in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else: if function_name", "获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name == message_lost: value =", "variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row", "# 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column,", "sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column},", "text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] =", "result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空", "columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row,", "= actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\"", "# 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框 entry **********", "logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\"", "index % self.__max_double_line_count == 0: self.row += 1 self.column = 0 else: self.column", "= [] for key, value in tab_configs.items(): logger.info(f\"handle tab {key}\") if key ==", "SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row,", "= NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0", "task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else: if function_name in self.thread_task:", "1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column}, index", "index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row +=", "# 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column,", "for key, value in tab_configs.items(): logger.info(f\"handle tab {key}\") if key == COMMON: common_panel", "create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0: self.row += 1 index", "\"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name", "width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column +", "# 按钮框配置 self.__buttons = config[buttons] if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") #", "if value != \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in value:", "DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row,", "self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测", "1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] =", "Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column},", "text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column}, index =", "= BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]] = None, can_fd: bool =", "= self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if", "SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type if button_type", "# 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = CHECK_MESSAGE #", "dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else dict()", "********** 创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self,", "if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中", "# 布局显示 self.pack() # todo 64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3,", "self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL", "= param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task", "********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "x, y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class", "HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook, Separator from typing", "config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs]", "self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self,", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self,", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name): values =", "DISABLED try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL", "columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "+= 1 self.row += 1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "tab选项框对象字典 self.tabs = [] for key, value in tab_configs.items(): logger.info(f\"handle tab {key}\") if", "# 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width,", "combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values =", "= Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column +=", "stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message =", "0: self.row += 1 index = 0 for key, value in self.__comboxs.items(): function_name", ":param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk", "else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else", "message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" #", "= value.split(\",\") else: # 0x164 values = [value] for msg_id in values: msg_id", "import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from .reader import check_buttons, thread_buttons, comboxs,", "in self.__thread_buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column", "messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook, Separator", "20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表", "self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if", "sticky=W, columnspan=5) self.column += 5 text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] =", "2021/12/15 - 21:24 # -------------------------------------------------------- import copy from time import sleep from tkinter", "key, value in tab_configs.items(): logger.info(f\"handle tab {key}\") if key == COMMON: common_panel =", "width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) #", "# 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y))", "self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if result else \"失败\"", "while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for action in actions: if len(action)", "= 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 #", "= self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get():", "{self.row}, column = {self.column}\") self.column += 1 index += 1 self.row += 1", "= {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index +=", "CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes", "********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column,", "设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width =", "1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name]", "self.column = 0 elif index % self.__max_double_line_count == 0: self.row += 1 self.column", "automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from .reader import check_buttons, thread_buttons,", "= self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL", "W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event)", "filter_nodes: Optional[List[str]] = None, can_fd: bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers:", "class TabFrame(Frame): def __init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel:", "self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row,", "self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons = dict()", "EXACT_SEARCH[0] text_name, show_name = button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL", "for name, value in signals.items(): if value is None: logger.debug(f\"change {name} value to", "as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time", "count, expect_value = check_msgs try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name,", "self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars = dict()", "self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5)", "= DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK", "0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name]", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name):", "+= 1 # ********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row +=", "= {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count =", "创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row += 1 index =", "elif button_type == CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value", "self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__receive_buttons) != 0:", "else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button =", "-*- # -------------------------------------------------------- # Copyright (C), 2016-2021, lizhe, All rights reserved # --------------------------------------------------------", "logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else:", "can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum,", "action for name, value in signals.items(): if value is None: logger.debug(f\"change {name} value", "elif button_type == CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result", "= config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries =", "{message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value =", "= can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons]", "self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框", "baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service", "command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row,", "self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params", "sticky=W) index += 1 self.row += 1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row,", "\"\"\" self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "Dict[str, Any], filter_nodes: List[str], common_panel: bool = False, max_line_count: int = None): super().__init__(master)", "= Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = EXACT_SEARCH", "1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row =", "logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name =", "if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件,", "__entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name ==", "= 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 #", "% self.__max_double_line_count == 0: self.row += 1 self.column = 0 else: self.column +=", "# 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width", "str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate:", "== CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK:", "Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name,", "signal_value_text != \"\": signal_value = int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text", "index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index == 0) stack = self.can_service.get_stack()", "exact=exact_search) show_message = \"成功\" if result else \"失败\" exact_message = \"精确\" if exact_search", "__check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON] off_actions =", "value in self.__comboxs.items(): function_name = key text_name = value[TEXT] if index == 0:", "len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "self.thread_task = dict() # 总线丢失按钮 = # 开始的行列 self.row = 0 self.column =", "except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type:", "= {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__receive_buttons)", "= 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", #", "stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] =", "def __special_button_event(self, button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type)", "检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0:", "self.buttons[text_name][\"state\"] = NORMAL self.column += 1 # ********** 创建关闭设备按钮 ********** text_name, show_name =", "SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name =", "x, y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1 index += 1 self.row +=", "# 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称", "name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack,", "# 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\": search_count = int(search_count_text) else:", "columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name", "excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum, int]", "创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons =", "key, value in self.__buttons.items(): function_name = key text_name = value[TEXT] if index ==", "offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row = {self.row},", "= list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 创建Label框", "signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep", "== 0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search)", "= msg_id.strip() # 处理16进制 if \"x\" in msg_id or \"X\" in msg_id: #", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column", "f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else:", "# 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name", "command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # **********", "= NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL", "self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20", "# 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column,", "self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type", "{key}\") if key == COMMON: common_panel = True else: common_panel = False tab", "List): for action in actions: if len(action) == 2: msg_id, signals = action", "if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions)", "value in self.__thread_buttons.items(): function_name = key text_name = value[TEXT] if index == 0:", "self.column += 1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column,", "exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else:", "= False, max_line_count: int = None): super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name", "# 把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id =", "self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index == 0) stack = self.can_service.get_stack() result =", "show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name]", "def __init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel: bool =", "else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif", "+ W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED", "if key == COMMON: common_panel = True else: common_panel = False tab =", "== CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack =", "None): super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置", "show_name = button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type", "{index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] =", "1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def", "text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W)", "= (index == 0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value,", "创建下拉框 if self.row != 0: self.row += 1 # 创建单选框 index = 0", "出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column,", "********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE:", "f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL", "{signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action)", "if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if", "self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0]", "index = 0 for key, value in self.__buttons.items(): function_name = key text_name =", "self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons", "# @Author: lizhe # @Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import copy from", "in msg_id or \"X\" in msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16) else:", "if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if", "= 1, filter_nodes: Optional[List[str]] = None, can_fd: bool = False, excel_type: ExcelEnum =", "= Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name]", "Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int = 1,", "= EXACT_SEARCH[0] text_name, show_name = button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] =", "\\ HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook, Separator from", "W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param =", "show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\",", "in values: msg_id = msg_id.strip() # 处理16进制 if \"x\" in msg_id or \"X\"", "show_message = \"成功\" if result else \"失败\" exact_message = \"精确\" if expect_value else", "设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys =", "= filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons =", "show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self,", "seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键,", "channel_index: int = 1, filter_nodes: Optional[List[str]] = None, can_fd: bool = False, excel_type:", "NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE:", "SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0]", "int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\": search_count = int(search_count_text)", "signal_name, signal_value, count, expect_value = check_msgs try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack,", "result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\")", "= CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button", "{self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row", "1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column", "EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self, master,", "function_name = key text_name = value[TEXT] if index == 0: self.column = 0", "if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "import Combobox, Notebook, Separator from typing import List, Dict, Any, Union, Optional from", "0 self.column = 0 # 布局显示 self.pack() # todo 64*64 3 3比较合适 #", "Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL", "anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index =", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event,", "if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "self.__filter_nodes = filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] =", "等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name =", "text_name): self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "__thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press", "self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column", "search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\": search_count = int(search_count_text) else: search_count =", "(C), 2016-2021, lizhe, All rights reserved # -------------------------------------------------------- # @Name: gui.py.py # @Author:", "button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE: # 获取signal", "else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else", "ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" #", "VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL,", "# 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name", "check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE,", "\"\"\" if self.row != 0: self.row += 1 index = 0 for key,", "or \"X\" in msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id =", "import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK,", "= {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name]", "布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type:", "show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) #", "search_count = int(search_count_text) else: search_count = None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() #", "width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row},", "float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\"", "data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]] = None,", "button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W)", "= combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index]", "NORMAL class Gui(object): def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None]", "# 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\")", "signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\":", "能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values =", "e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self,", "Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]] = None, can_fd:", "= self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys())", "== COMMON: common_panel = True else: common_panel = False tab = TabFrame(self.tk, can_service=self.can_service,", "1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name],", "None] = None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA,", "self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column,", "else \"失败\" exact_message = \"精确\" if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\", "+= 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x,", "config[entries] if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons]", "= self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value", "创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column}, index =", "self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def", "self.__max_line_count == 0: self.row += 1 self.column = 0 else: self.column += 1", "W, columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分", "BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x),", "buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT,", "self.column += 2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else: if", "= {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if config[entries] else dict() logger.debug(f\"entries =", "Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column}, index =", "False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1,", "DISABLED param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS]", "self.column += 5 text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name,", "# 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self,", "= 500, max_line_count: int = 8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc:", "actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for action in actions:", "excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化", "== 0: self.row += 1 self.column = 0 else: self.column += 1 #", "# 创建下拉框 if self.row != 0: self.row += 1 index = 0 for", "self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\"", "0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"]", "logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\"", "# 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons", "CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE: # 获取signal name signal_name", "= Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column +=", "= 0 for key, value in self.__check_buttons.items(): function_name = key text_name = value[TEXT]", "# 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1 # **********", "column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row", "Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED", "= self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions", "self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1:", "OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO,", "= self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for", "== 0: self.column = 0 elif index % self.__max_line_count == 0: self.row +=", "Gui(object): def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None,", "= Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column", "3) # 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() #", "logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for action in actions: if", "设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name =", "name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text !=", "width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5", "DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL, E , PhotoImage,", "!= 0: self.row += 1 index = 0 for key, value in self.__comboxs.items():", "OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row !=", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\"", "{self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\")", "创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "= combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current()", "received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row", "column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\"", "0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message", "self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__thread_buttons) != 0:", "in self.__comboxs.items(): function_name = key text_name = value[TEXT] if index == 0: self.column", "else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as", "filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW',", "{self.row}, column = {self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1,", "self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__check_buttons) != 0:", "dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict()", "1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W,", "布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1 # ********** 创建关闭设备按钮", "# 选中第一个则表示是True exact_search = (index == 0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack,", "{self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1", "combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def", "import ConfigReader from .reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants", "Separator from typing import List, Dict, Any, Union, Optional from automotive.logger.logger import logger", "off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def", "# ********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self,", "= self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else: if function_name in self.thread_task: self.thread_task.pop(function_name)", "= NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type ==", "self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column = {self.column}\") self.column += 1 index +=", "button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\")", "= config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count =", "= dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {}, entries: {}, buttons:", "# 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons:", "= self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value = int(signal_value_text) # 获取次数 search_count_text =", "= NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED", "\"\"\" # 创建事件单选框 if self.row != 0: self.row += 1 index = 0", "create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row += 1", "1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self,", "{entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try:", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分 ********** self.__create_message_check() #", "self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查", "!= 0: self.row += 1 index = 0 for key, value in self.__receive_buttons.items():", "# ********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self,", "column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1", "+= 1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "function_name = params[1] if function_name == message_lost: value = self.entries[function_name].get() if value !=", "config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if config[buttons]", "= SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name =", "self.column += 1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断", "self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row,", "values = value.split(\",\") else: # 0x164 values = [value] for msg_id in values:", "== CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE: # 获取signal name", "try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\")", "text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column", "1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name =", "# 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True,", "1 self.row += 1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "self.thread_task[function_name] = task else: if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions):", "Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row", "# 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮 button **********", "if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "= self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\"", "time import sleep from tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton,", "= dict() # 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons = dict() #", "0: self.row += 1 index = 0 for key, value in self.__receive_buttons.items(): function_name", "= int(search_count_text) else: search_count = None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True", "logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row != 0: self.row", "Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column}, index", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self,", "value[TEXT] if index == 0: self.column = 0 elif index % self.__max_line_count ==", "self.column += 1 index += 1 self.row += 1 if len(self.__entries) != 0:", "SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes)", "0 for key, value in self.__entries.items(): function_name = key text_name = value[TEXT] if", "sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column +=", "BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row,", "check_msgs try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value)", "signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name,", "sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE", "= Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = SEARCH_COUNT Label(self,", "in new_actions: if len(action) == 2: msg_id, signals = action for name, value", "key, value in self.__receive_buttons.items(): function_name = key text_name = value[TEXT] if index ==", "= self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0,", "TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT,", "index = 0 for key, value in self.__entries.items(): function_name = key text_name =", "W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT", "# 创建输入框 if self.row != 0: self.row += 1 index = 0 for", "= Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column =", "self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons]", "CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row,", "text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value", "获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row,", "__thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\")", "def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row != 0: self.row +=", "self.row != 0: self.row += 1 # 创建单选框 index = 0 for key,", "logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name", "CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data()", "index % self.__max_line_count == 0: self.row += 1 self.column = 0 else: self.column", "= 8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填)", "1 # ********** 创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name]", "expect_value = check_msgs try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value,", "button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row,", "else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row", "sticky=W) index += 1 self.row += 1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row,", "1) elif function_name == BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else:", "NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"]", "!= 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1", "0 for key, value in self.__check_buttons.items(): function_name = key text_name = value[TEXT] if", "+= 1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "= Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for key, value in tab_configs.items(): logger.info(f\"handle", "column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry", "try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message", "= {self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1", "Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column,", "function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column = {self.column}\") self.column += 1 index", "= SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8)", "{self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件", "获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\": search_count = int(search_count_text) else: search_count", "总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE # 创建Button对象", "self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action", "else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x),", "1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name}", "后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES]", "Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] =", "= self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name,", "if value is None: logger.debug(f\"change {name} value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions)", ") self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def", "text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x))", "= self.entries[function_name].get() if value != \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\"", "e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple): open_text_name =", "entry ********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "+= 2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "entries: {}, buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk)", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分", "CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from .reader import check_buttons, thread_buttons, comboxs, entries,", "= DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL", "0 elif index % self.__max_line_count == 0: self.row += 1 self.column = 0", "= {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons =", "show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) #", "self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else: if function_name in self.thread_task: self.thread_task.pop(function_name) def", "self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value =", "except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self):", "Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column +=", "= {self.column}\") self.column += 1 index += 1 self.row += 1 if len(self.__comboxs)", "config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count # 36", "+= 1 self.row += 1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS]", "sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row,", "create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button ********** text_name, show_name", "= Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column}, index", "ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME", "= SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=40,", "sticky=W) index += 1 self.row += 1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row,", "# 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) >", "self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] =", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event, params): message_lost =", "\"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in value: values = value.split(\",\")", "nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can bus lost\")", "value is None: logger.debug(f\"change {name} value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def", "y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "500, max_line_count: int = 8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径", "1 self.row += 1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button", "== BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name]", "CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service: CANService, config:", "None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index == 0)", "Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk", "width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name", "self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"]", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event, function_name): \"\"\"", "# 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column,", "= param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name,", "logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] =", "__create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0 text_name, show_name =", "0x154 value.replace(\",\", \",\") if \",\" in value: values = value.split(\",\") else: # 0x164", "self.column = 0 elif index % self.__max_line_count == 0: self.row += 1 self.column", "None, can_fd: bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500,", "# ********** 创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] =", "can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if", "= NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name = button_type", "can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs", "if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message)", "= self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not", "self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List):", "+ W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE:", "+= 1 index = 0 for key, value in self.__comboxs.items(): function_name = key", "{self.row}, column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) #", "elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE: #", "信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row,", "{}, thread_buttons: {}, comboxs: {}, entries: {}, buttons: {}, receive_buttons: {}} config =", "if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count #", "f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name", "CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row,", "elif len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise", "create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row != 0: self.row += 1", "sticky=W) # 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x,", "action in new_actions: if len(action) == 2: msg_id, signals = action for name,", "输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars =", "0 else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button", "ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int = 8): \"\"\" :param excel_file: Excel文件路径", "Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1", "self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") #", "def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button ********** text_name,", "message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type ==", "打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE #", "self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) #", "to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别", "3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget()", "布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮", "logger.debug(f\"change {name} value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送,", ", PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook, Separator from typing import List,", "\"\"\" # 创建下拉框 if self.row != 0: self.row += 1 # 创建单选框 index", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name): if", ") self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "press release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row += 1 index = 0", "lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1 index += 1 self.row", "else: self.column += 1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column", "def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0: self.row += 1", "绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column =", "dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if config[buttons] else dict()", "# 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框", "self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column,", "self.row += 1 def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages", "columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param =", "布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择", "# 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width", "# 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column}, index =", "messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL: # 获取signal name signal_name", "search_count = None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index", "+= 1 # ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称", "= {self.row}, column = {self.column}, index = {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width,", "__special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name", "self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2)", "lizhe # @Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import copy from time import", "{entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\"", "# 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self,", "else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况", "actions: if len(action) == 2: msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try:", "\"失败\" exact_message = \"精确\" if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\"", "def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param =", "********** text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE:", "msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id", "All rights reserved # -------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe # @Created:", "0: self.row += 1 self.column = 0 else: self.column += 1 # 创建bool对象接收值", "self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮 button", "self.column = 0 # 布局显示 self.pack() # todo 64*64 3 3比较合适 # self.open_image", "function_name): try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name}", "RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action} seconds\")", "+= 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name", "Copyright (C), 2016-2021, lizhe, All rights reserved # -------------------------------------------------------- # @Name: gui.py.py #", "sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press", "show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self,", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def", "for key, value in self.__entries.items(): function_name = key text_name = value[TEXT] if index", "创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False,", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分", "if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name]", "self.buttons = dict() # 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons = dict()", "\"精确\" if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message,", "self.__thread_buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column =", "= param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value = check_msgs try: stack = self.can_service.get_stack()", "创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name,", "= Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5 text_name, show_name = CHECK_SIGNAL", "self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL def", "NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0 text_name,", "Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param", "are {entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED", "= Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] =", "= CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button", "values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") #", "self.column += 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值", "x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1", "if len(action) == 2: msg_id, signals = action for name, value in signals.items():", "self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\")", "\"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0: self.row += 1 index =", "command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are", "{index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__receive_buttons) !=", "self.column += 1 # ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE #", "function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1 # ********** 创建关闭设备按钮 **********", "text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row =", "if len(action) == 2: msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id,", "+= 1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W)", "elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can()", "button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError", "self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() #", "self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons()", "config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons]", "模拟其他ECU接收 \"\"\" if self.row != 0: self.row += 1 index = 0 for", "__thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for action", "button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" )", "1, filter_nodes: Optional[List[str]] = None, can_fd: bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL,", "!= \"\": search_count = int(search_count_text) else: search_count = None # 获取是否精确查找 index =", "values = [value] for msg_id in values: msg_id = msg_id.strip() # 处理16进制 if", "ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str],", "创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row != 0: self.row += 1 index", "column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column =", "def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row +=", "Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = EXACT_SEARCH Label(self,", "= button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column,", "# Copyright (C), 2016-2021, lizhe, All rights reserved # -------------------------------------------------------- # @Name: gui.py.py", "1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 #", "import logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader", "self.row += 1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row,", "# ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self,", "= dict() # 闪烁事件Task self.thread_task = dict() # 总线丢失按钮 = # 开始的行列 self.row", "********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name,", "1 self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar()", "= self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self):", "self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name", "buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column =", "len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "\"\"\" # 创建下拉框 if self.row != 0: self.row += 1 index = 0", "{self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can bus lost\") if", "{self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1", "self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] =", "\"成功\" if result else \"失败\" exact_message = \"精确\" if expect_value else \"不精确\" message", "0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name:", "text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column", "# 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column", "******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") #", "self.row += 1 # 创建单选框 index = 0 for key, value in self.__check_buttons.items():", "check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name", "-------------------------------------------------------- # Copyright (C), 2016-2021, lizhe, All rights reserved # -------------------------------------------------------- # @Name:", "tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0]", "Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUE Label(self,", "\"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name", "# 开始的行列 self.row = 0 self.column = 0 # 布局显示 self.pack() # todo", "= BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton", "2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name]", "总线丢失按钮 = # 开始的行列 self.row = 0 self.column = 0 # 布局显示 self.pack()", "# ********** 创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] =", "NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif", "=signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if result else \"失败\" exact_message =", "self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"]", "service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for key, value", "{self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\")", "self.entries[search_count_text_name].get() if search_count_text != \"\": search_count = int(search_count_text) else: search_count = None #", ":param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param", "!= \"\": signal_value = int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text !=", "# 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) #", "(必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param", "tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError as", "f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\",", "= max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度", "False, max_line_count: int = None): super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes", "params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name == message_lost: value", "= Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name]", "获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {},", "show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) #", "button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST:", "布局显示 self.pack() # todo 64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3)", "# 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__check_buttons)", "f\"【{e}】\") else: entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS] text_name =", "= ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {},", "= CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name =", "self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5 text_name, show_name = CHECK_SIGNAL # 创建Button对象", "将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"]", "result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if result", "logger.info(f\"handle tab {key}\") if key == COMMON: common_panel = True else: common_panel =", "= 0 else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用", "self.__send_actions(actions) def __send_actions(self, actions: List): for action in actions: if len(action) == 2:", "param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value = check_msgs try: stack = self.can_service.get_stack() result", "str, None] = None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] =", "{self.row}, column = {self.column}, index = {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row,", "message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e:", "column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__buttons) != 0: Separator(self,", "function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else:", "2: msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError", "= EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name] =", "获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index == 0) stack =", "= params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in new_actions:", "+ W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param", "is None: logger.debug(f\"change {name} value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self):", "TabFrame(Frame): def __init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel: bool", "= float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待", "self.row += 1 index = 0 for key, value in self.__receive_buttons.items(): function_name =", "+= 1 self.row += 1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "# 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars", "= param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value", "sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row},", "logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in new_actions: if len(action) == 2: msg_id,", "= values[TEXT] on_actions = values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions)", "\"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message)", "# 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width", "{index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x,", "CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from .reader import", "布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name):", "+= 1 self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] =", "tab_configs.items(): logger.info(f\"handle tab {key}\") if key == COMMON: common_panel = True else: common_panel", "self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars = dict()", "self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width", "text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W,", "{self.column}, index = {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) #", "tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\")", "{}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs = []", "+ 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row", "= button logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 布局checkbutton", "PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮", "width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from", "column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column,", "self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value,", "20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度", "创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column", "tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for key, value in", "dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {}, entries: {}, buttons: {},", "self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs = dict()", "combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions", "# 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions)", "column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1 # ********** 创建关闭设备按钮 ********** text_name,", "else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button =", "= self.entries[search_count_text_name].get() if search_count_text != \"\": search_count = int(search_count_text) else: search_count = None", "command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "sticky=W) self.column += 1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "sticky=W) self.column += 1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "self.__comboxs.items(): function_name = key text_name = value[TEXT] if index == 0: self.column =", "# 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width = 25 #", "COMMON: common_panel = True else: common_panel = False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes,", "创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row,", "DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\"", "msg_id, signal_name, signal_value, count, expect_value = check_msgs try: stack = self.can_service.get_stack() result =", "0 # 布局显示 self.pack() # todo 64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开", "function_name = function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name =", "def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name = param[TEXT]", "if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\",", "self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW', self.exit_root) self.tk.mainloop() def exit_root(self): self.can_service.close_can() self.tk.destroy()", "24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值", "value: values = value.split(\",\") else: # 0x164 values = [value] for msg_id in", "self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if", "dict() # 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs = dict() # 输入框对象字典", "int = 8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param", "= Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUES", "Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO,", "= {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name]", "column = {self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index +=", "SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame):", "创建下拉框 if self.row != 0: self.row += 1 index = 0 for key,", "\"<Return>\", # 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典", "0 for key, value in self.__receive_buttons.items(): function_name = key text_name = value[TEXT] if", "CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0]", "== DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] =", "ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {}, entries:", "self.buttons[text_name][\"state\"] = DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name =", "16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError", "创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row != 0: self.row +=", "dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if config[entries] else dict()", "= \"成功\" if result else \"失败\" exact_message = \"精确\" if expect_value else \"不精确\"", "创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\"", "variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row", "else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] =", "# 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width", "self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮 button", "import ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes:", "= NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL", "param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions,", "def __send_actions(self, actions: List): for action in actions: if len(action) == 2: msg_id,", "+= 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\",", "# 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row =", "{self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\")", "********** 创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column,", "# 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\",", "{index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] =", "index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if", "CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS,", "闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries =", "button_type == CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack", "int(search_count_text) else: search_count = None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search", "(必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type:", "index = {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框", "self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\",", "BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\", "column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0)", "创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name =", "self.row += 1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name =", "index += 1 self.row += 1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum,", "try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get()", "in signals.items(): if value is None: logger.debug(f\"change {name} value to {entry_value}\") signals[name] =", "+= 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\",", "1 index = 0 for key, value in self.__comboxs.items(): function_name = key text_name", "# 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width =", "= 0 else: self.column += 1 logger.debug(f\"row = {self.row}, column = {self.column}, index", "as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def", "todo 64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if", "value.split(\",\") else: # 0x164 values = [value] for msg_id in values: msg_id =", "text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name =", "闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons =", "1 # ********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row += 1", "Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value", ":param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填)", "********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name,", "self.pack() # todo 64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) #", "self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\")", "1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\"", "# 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index == 0) stack", "column=self.column, sticky=W) self.column += 1 text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] =", "__init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel: bool = False,", "+= 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name,", "else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if config[buttons] else", "self.column += 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1", "self.column += 1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count", "columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数", "= OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name =", "function_name == BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param =", "logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name =", "x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮", "index += 1 self.row += 1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "= 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "0x164 values = [value] for msg_id in values: msg_id = msg_id.strip() # 处理16进制", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index", "comboxs: {}, entries: {}, buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control", "self.column += 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda", "new_actions = copy.deepcopy(actions) for action in new_actions: if len(action) == 2: msg_id, signals", "= {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons =", "+= 1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5 text_name, show_name = CHECK_SIGNAL #", "exact_message = \"精确\" if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if", "logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0])", "command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # **********", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name):", "self.entries[function_name].get() if value != \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in", "= NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0: self.row", "# -------------------------------------------------------- # Copyright (C), 2016-2021, lizhe, All rights reserved # -------------------------------------------------------- #", "self.row += 1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add", "1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row =", "选中第一个则表示是True exact_search = (index == 0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name", "# 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\"", "text_name = values[TEXT] on_actions = values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\")", "= NORMAL elif button_type == CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() #", "for action in actions: if len(action) == 2: msg_id, signals = action logger.info(f\"{hex(msg_id)}", "self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25", "********** 创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self,", "config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs]", "column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name): if function_name", "self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x,", "sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST", "创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button", "{index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__buttons) !=", "RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\"", "self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1 #", "self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in", "创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width)", "actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method,", "self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键", "in self.__check_buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column", "= {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda", "= NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row != 0:", "********** text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE:", "config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons]", "Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for key, value in tab_configs.items(): logger.info(f\"handle tab", "创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row != 0: self.row += 1 # 创建单选框", "1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row += 1 index = 0 for", "SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def", "except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params =", "{check_buttons: {}, thread_buttons: {}, comboxs: {}, entries: {}, buttons: {}, receive_buttons: {}} config", "actions: List): for action in actions: if len(action) == 2: msg_id, signals =", "self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] =", "column = {self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W)", "# 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys", "message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"]", "= 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars = dict() #", "dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons = dict() # 单选框对象字典", "key == COMMON: common_panel = True else: common_panel = False tab = TabFrame(self.tk,", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"]", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self,", "{self.column}, index = {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) #", "self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] =", "> 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result)", "dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param", "创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self,", "self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF,", "self.__comboxs = config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries", "self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24", "sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column +=", "Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUES Label(self,", "1 # ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name]", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event, function_name):", "= 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\")", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1,", "布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮 button ********** text_name,", "# 闪烁事件Task self.thread_task = dict() # 总线丢失按钮 = # 开始的行列 self.row = 0", "dict() # 总线丢失按钮 = # 开始的行列 self.row = 0 self.column = 0 #", "action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\",", "column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__check_buttons) != 0: Separator(self,", "# tab选项框对象字典 self.tabs = [] for key, value in tab_configs.items(): logger.info(f\"handle tab {key}\")", "1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column", "value.replace(\",\", \",\") if \",\" in value: values = value.split(\",\") else: # 0x164 values", "text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value =", "def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0]", "0 for key, value in self.__comboxs.items(): function_name = key text_name = value[TEXT] if", "闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置", "绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column", "0 else: self.column += 1 logger.debug(f\"row = {self.row}, column = {self.column}, index =", "= function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT]", "int] = BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]] = None, can_fd: bool", "2) # 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度", "= int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\": search_count =", "== 1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\")", "elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type", "初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点", "config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries]", "PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook, Separator from typing import List, Dict,", "List, Dict, Any, Union, Optional from automotive.logger.logger import logger from automotive.core.can.can_service import CANService", "column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y))", "command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # **********", "value in self.__receive_buttons.items(): function_name = key text_name = value[TEXT] if index == 0:", "in value: values = value.split(\",\") else: # 0x164 values = [value] for msg_id", "receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs =", "布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮 button ********** text_name,", "state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5 text_name,", "= Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column +=", "设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 布局下拉框", "= dict() # 按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons = dict() #", "if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name = param[TEXT] actions =", "self.row += 1 index = 0 for key, value in self.__entries.items(): function_name =", "column=self.column, sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name =", "输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task = dict() # 总线丢失按钮 = #", "ConfigReader from .reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants import", "= values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self):", "result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if result", "can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\")", "# 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮", "+= 1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column =", "if self.row != 0: self.row += 1 # 创建单选框 index = 0 for", "sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row", "__special_button_event(self, button_type: tuple): text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except", "text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width)", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"]", "self.column = 0 else: self.column += 1 logger.debug(f\"row = {self.row}, column = {self.column},", "1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2)", "value in self.__entries.items(): function_name = key text_name = value[TEXT] if index == 0:", "= DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button", "= {self.row}, column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index +=", "= check_msgs try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count,", "self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column},", "check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value = check_msgs try: stack =", "thread_buttons: {}, comboxs: {}, entries: {}, buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file)", "max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW', self.exit_root) self.tk.mainloop() def", "\"\": signal_value = int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\":", "# 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5 text_name, show_name", "创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0: self.row += 1 index = 0", "actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框", "# 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in value: values = value.split(\",\") else:", "dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict()", "= BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name:", "text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x))", "创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" #", "columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column", "1 index = 0 for key, value in self.__buttons.items(): function_name = key text_name", "index = 0 for key, value in self.__check_buttons.items(): function_name = key text_name =", "= {self.column}, index = {index}\") # 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W)", "onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row =", "param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task: task =", "key, value in self.__check_buttons.items(): function_name = key text_name = value[TEXT] if index ==", "values: msg_id = msg_id.strip() # 处理16进制 if \"x\" in msg_id or \"X\" in", "import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS,", "logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons", "EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self,", "= {self.row}, column = {self.column}\") self.column += 1 index += 1 self.row +=", "if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit()", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 # **********", "# 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons", "!= 0: self.row += 1 index = 0 for key, value in self.__buttons.items():", "YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service:", "+ W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name", "创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x))", "value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送", "self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers)", "f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as", "from .reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE,", "if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if", "Entry, Label, Tk, messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk import", "text_name, show_name = button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif", "def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON] off_actions", "self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row !=", "master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel: bool = False, max_line_count:", "self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons()", "= 0 elif index % self.__max_double_line_count == 0: self.row += 1 self.column =", "width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index", "List[str], common_panel: bool = False, max_line_count: int = None): super().__init__(master) self.can_service = can_service", "event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name == message_lost:", "self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if result else \"失败\"", "lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column = {self.column}\") self.column", "当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event)", "帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row,", "expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else:", "msg_id = msg_id.strip() # 处理16进制 if \"x\" in msg_id or \"X\" in msg_id:", "self.entries = dict() # 闪烁事件Task self.thread_task = dict() # 总线丢失按钮 = # 开始的行列", "# @Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import copy from time import sleep", "data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service =", "= 0 elif index % self.__max_line_count == 0: self.row += 1 self.column =", "+= 1 index = 0 for key, value in self.__buttons.items(): function_name = key", "else: param = self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if", "21:24 # -------------------------------------------------------- import copy from time import sleep from tkinter import Frame,", "common_panel = True else: common_panel = False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value,", "= 0 else: self.column += 1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row =", "anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值", "function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions,", "can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮", "from tkinter.ttk import Combobox, Notebook, Separator from typing import List, Dict, Any, Union,", "from .reader import ConfigReader from .reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons", "x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框", "self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() #", "闪烁事件Task self.thread_task = dict() # 总线丢失按钮 = # 开始的行列 self.row = 0 self.column", "text_name = combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name =", "0 for key, value in self.__buttons.items(): function_name = key text_name = value[TEXT] if", "x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column += 1", "\"精确\" if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message,", "= {self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) #", "NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif", "x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column = {self.column}\") self.column +=", "else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if config[entries] else", "self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column =", "= float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框", "values[TEXT] on_actions = values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else:", "task else: if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while", "{self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件 for event_key", "self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = CHECK_MESSAGE", "SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20) #", "OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row,", "Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1", "max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width", "self.__entries.items(): function_name = key text_name = value[TEXT] if index == 0: self.column =", "if index == 0: self.column = 0 elif index % self.__max_line_count == 0:", "event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 获取输入框的名称 Label(self, text=text_name,", "\"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME", "0 else: self.column += 1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row},", "= None, can_fd: bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int =", "text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW', self.exit_root) self.tk.mainloop() def exit_root(self): self.can_service.close_can()", "= \"精确\" if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result:", "BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT from", "= task else: if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions)", "button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"]", "25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度", "= list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name]", "= dict() # 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs = dict() #", "self.row += 1 self.column = 0 else: self.column += 1 logger.debug(f\"row = {self.row},", "= CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button", "elif function_name == BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param", "self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column +=", "sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name", "= config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs =", "1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name],", "1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "result else \"失败\" exact_message = \"精确\" if exact_search else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\"", "new_actions: if len(action) == 2: msg_id, signals = action for name, value in", "value != \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in value: values", "默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON]", "common_panel: bool = False, max_line_count: int = None): super().__init__(master) self.can_service = can_service self.thread_pool", "button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"]", "self.column = 0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST", "= False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab)", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"]", "can_fd: bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count:", "config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons]", "self.row += 1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "= NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL: #", "msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if result else \"失败\" exact_message", "self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL #", "messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0]", "can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0])", "+ W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称", "self.row += 1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "# 输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task = dict() # 总线丢失按钮 =", "self.column = 0 else: self.column += 1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row", "finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row !=", "messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type", "\"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row != 0: self.row += 1 #", "from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader", "Notebook, Separator from typing import List, Dict, Any, Union, Optional from automotive.logger.logger import", "= dict() # 输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task = dict() #", "COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from", "+= 1 index = 0 for key, value in self.__thread_buttons.items(): function_name = key", "value in self.__check_buttons.items(): function_name = key text_name = value[TEXT] if index == 0:", "filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons: {},", "self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] =", "+= 1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1]", "= self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index == 0) stack = self.can_service.get_stack() result", "\"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message)", "actions) self.thread_task[function_name] = task else: if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name,", "int = None): super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes", "1 self.row += 1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] =", "self.row += 1 index = 0 for key, value in self.__comboxs.items(): function_name =", "self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column =", "# 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row},", "None: logger.debug(f\"change {name} value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\"", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column,", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event, params): message_lost", "show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self,", "Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] =", "name, value in signals.items(): if value is None: logger.debug(f\"change {name} value to {entry_value}\")", "self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row,", "column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__thread_buttons) != 0: Separator(self,", "{action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\"", "# todo 64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮", "else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count # 36 #", "self.column += 1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column =", "columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default", "self.row += 1 # ********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row", "f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row", "dict() # 按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典", "show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) #", "y)) logger.debug(f\"row = {self.row}, column = {self.column}\") self.column += 1 index += 1", "self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task else: if function_name in", "button ********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index", "self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda", "message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data()", "import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from .reader", "show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self,", "text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries", "CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20) #", "= {self.row}, column = {self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W)", "= service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for key,", "1 def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if", "common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮", "self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions: List): for action in actions: if len(action) ==", "int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except", "= Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row,", "list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 创建Label框 Label(self,", "= config[entries] if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons =", "1 index = 0 for key, value in self.__thread_buttons.items(): function_name = key text_name", "text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 #", "DISABLED self.column += 1 # ********** 创建清除接收到的CAN信号按钮 ********** text_name, show_name = CLEAR_STACK #", "int = 500, max_line_count: int = 8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param", "\"失败\" exact_message = \"精确\" if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\"", "signals.items(): if value is None: logger.debug(f\"change {name} value to {entry_value}\") signals[name] = float(entry_value)", "= param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"]", "Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name", "self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value = int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get()", "= 0 for key, value in self.__entries.items(): function_name = key text_name = value[TEXT]", "-*- coding:utf-8 -*- # -------------------------------------------------------- # Copyright (C), 2016-2021, lizhe, All rights reserved", "Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row,", "+= 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1", "= params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in new_actions: if len(action) ==", "else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else", "0: self.row += 1 self.column = 0 else: self.column += 1 logger.debug(f\"row =", "button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value = check_msgs try: stack", "SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED)", "创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name,", "\",\" in value: values = value.split(\",\") else: # 0x164 values = [value] for", "self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs", "def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row != 0: self.row", "# 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮 button **********", "创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row != 0: self.row += 1 index =", "+= 1 # 创建单选框 index = 0 for key, value in self.__check_buttons.items(): function_name", "Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL, E", "self.column += 1 index += 1 self.row += 1 if len(self.__comboxs) != 0:", "text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] =", "{self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count", "column=self.column, sticky=W) self.column += 1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW', self.exit_root)", "+= 1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "-------------------------------------------------------- import copy from time import sleep from tkinter import Frame, Button, NORMAL,", "\"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row != 0: self.row += 1", "receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON,", "key, value in self.__entries.items(): function_name = key text_name = value[TEXT] if index ==", "# 处理16进制 if \"x\" in msg_id or \"X\" in msg_id: # 把16进制转换成10进制 message_id", "= filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons:", "+= 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\"", "self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name}", "reserved # -------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe # @Created: 2021/12/15 -", "for msg_id in values: msg_id = msg_id.strip() # 处理16进制 if \"x\" in msg_id", "button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name =", "logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if config[entries] else dict() logger.debug(f\"entries", "message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self, excel_file: str, dbc:", "信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name = SIGNAL_NAME Label(self,", "OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can()", "1, sticky=W) # 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name):", "self.row += 1 self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name]", "self.row += 1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions", ":param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service", "button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e)", "0 else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button", "self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row != 0:", "self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name,", "column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2)", "= key text_name = value[TEXT] if index == 0: self.column = 0 elif", "\\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE,", "= True else: common_panel = False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel,", "0: self.row += 1 self.column = 0 else: self.column += 1 # 获取下拉框的名称", "else: common_panel = False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab,", "None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int", "comboxs, entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\", "self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes =", "CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service: CANService,", "# 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() #", "as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple): open_text_name", "= Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1", "= {self.row}, column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W)", "= {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif", "show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name]", "W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send", "self.row += 1 self.column = 0 else: self.column += 1 # 获取下拉框的名称 values", "max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs =", "column=self.column, sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5)", "can_service: CANService, config: Dict[str, Any], filter_nodes: List[str], common_panel: bool = False, max_line_count: int", "{function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row},", "\"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row != 0: self.row += 1 index", "sticky=W) index += 1 self.row += 1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row,", "1 self.row += 1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result =", "\"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值)", "len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "= dict() # 下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries = dict() #", "self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if self.row != 0:", "= ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int = 8): \"\"\" :param excel_file:", "1 # ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self,", "column=self.column, sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name =", "width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2 text_name,", "if signal_value_text != \"\": signal_value = int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if", "if \"x\" in msg_id or \"X\" in msg_id: # 把16进制转换成10进制 message_id = int(msg_id,", "filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can bus", "message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL", "excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填)", "x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\")", "MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name == message_lost: value = self.entries[function_name].get() if", "entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\")", "tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {}, entries: {}, buttons: {}, receive_buttons:", "param = self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name", "signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if result else \"失败\" exact_message =", "y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column = {self.column}\") self.column += 1", "column=self.column, sticky=W, columnspan=5) self.column += 5 text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name]", "创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def __special_actions(self, button_type: tuple):", "import sleep from tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry,", "+= 1 self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] =", "MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES,", "== 0: self.column = 0 elif index % self.__max_double_line_count == 0: self.row +=", "= {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件 for event_key in self.support_event_keys:", "\"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name] # 字典中定义的值列表 values", "ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int = 8): \"\"\" :param", "self.__entry_event(x, y)) self.column += 1 index += 1 self.row += 1 if len(self.__entries)", "x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column =", "else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message,", "self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] = Button(self,", "创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1 Separator(self,", "incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row != 0: self.row", "# 创建事件单选框 if self.row != 0: self.row += 1 index = 0 for", "def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column = 0 text_name, show_name", "self.row += 1 index = 0 for key, value in self.__thread_buttons.items(): function_name =", "if function_name == message_lost: value = self.entries[function_name].get() if value != \"\": # 0x152,0x153,", "columnspan=5) self.column += 5 text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self,", "@Name: gui.py.py # @Author: lizhe # @Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import", "False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int = 8):", "= {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs =", "BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__check_button_event(x),", "= {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件 for", "self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框 entry", "Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL,", "= NORMAL self.column += 1 # ********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE", "= 0 else: self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用", "dict() # 下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries = dict() # 闪烁事件Task", "CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row,", "= DISABLED param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions =", "typing import List, Dict, Any, Union, Optional from automotive.logger.logger import logger from automotive.core.can.can_service", "{self.row}, column = {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1", "+= 1 logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 获取输入框的名称", "text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x))", "self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip()", "filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk =", "if index == 0: self.column = 0 elif index % self.__max_double_line_count == 0:", "logger.trace(event) function_name = params[1] if function_name == message_lost: value = self.entries[function_name].get() if value", "sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row},", "messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] =", "from time import sleep from tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar,", "can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else dict()", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] =", "= SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8)", "message_lost: value = self.entries[function_name].get() if value != \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\")", "= self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions)", "__send_actions(self, actions: List): for action in actions: if len(action) == 2: msg_id, signals", "{self.row}, column = {self.column}, index = {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row,", "= int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id)", "if \",\" in value: values = value.split(\",\") else: # 0x164 values = [value]", "logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs", "= None): super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes #", "len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON,", "+= 2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "1 self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name", "5 text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL:", "Optional from automotive.logger.logger import logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum,", "if self.row != 0: self.row += 1 index = 0 for key, value", "if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\")", "width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name,", "self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate,", "0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name]", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column", "int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int = 1, filter_nodes:", "等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5) self.column += 5 text_name, show_name =", "DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL", "text_name = value[TEXT] if index == 0: self.column = 0 elif index %", "1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) #", "self.row != 0: self.row += 1 index = 0 for key, value in", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name):", "+= 1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button", "signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e:", "text_name, show_name = SIGNAL_VALUES Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] =", "0: self.row += 1 index = 0 for key, value in self.__buttons.items(): function_name", "# 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True,", "service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict() tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs:", "\\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum class", "if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get():", "column=self.column, sticky=W) self.column += 1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "= {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__thread_buttons)", "2: msg_id, signals = action for name, value in signals.items(): if value is", "value in signals.items(): if value is None: logger.debug(f\"change {name} value to {entry_value}\") signals[name]", "self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons()", "= 0 for key, value in self.__thread_buttons.items(): function_name = key text_name = value[TEXT]", "{}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs", "state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1", "每行能够容纳的数量 self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2)", "from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import ConfigReader from .reader import check_buttons,", "width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W)", "command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL self.column +=", "messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self, excel_file: str,", "= Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index,", "Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column += 2", "signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name)", "== OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE:", "logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self):", "+= 1 # ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象", "text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError", "# 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs", "message_id = int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try:", "self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column},", "result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result) > 0: self.entries[signal_values_text_name][\"state\"]", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event, params):", "and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can", "# -*- coding:utf-8 -*- # -------------------------------------------------------- # Copyright (C), 2016-2021, lizhe, All rights", "+= 1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions =", "NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\ HORIZONTAL, E ,", "Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column", "self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width = 10", "self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button **********", "self.column += 1 # 创建bool对象接收值 self.thread_button_bool_vars[text_name] = BooleanVar() # 创建CheckButton对象并放到thread_buttons中方便调用 button = Checkbutton(self,", "column = {self.column}\") self.column += 1 index += 1 self.row += 1 if", "value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value = int(signal_value_text) # 获取次数", "self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"] = NORMAL def", "NORMAL elif button_type == CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal", "self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS]", "rights reserved # -------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe # @Created: 2021/12/15", "from typing import List, Dict, Any, Union, Optional from automotive.logger.logger import logger from", "self.entries[text_name] = Entry(self, width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name =", "len(action) == 2: msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals)", "copy from time import sleep from tkinter import Frame, Button, NORMAL, DISABLED, W,", "(1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs()", "else \"不精确\" message = f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message,", "command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column +=", "1 self.row += 1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value = int(signal_value_text) #", "self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框", "column=self.column, sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮 button ********** text_name, show_name =", "+= 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column,", "buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) # tab选项框对象字典", "param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e:", "param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value =", "text_name, show_name = button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError as e:", "index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定下拉框事件 self.comboxs[function_name].bind(\"<<ComboboxSelected>>\",", "text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x))", "# 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value = int(signal_value_text)", "DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE:", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] =", "logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row !=", "2016-2021, lizhe, All rights reserved # -------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe", "\"\"\" # ********** 创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name]", "= SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type if button_type == DEFAULT_MESSAGE:", "# 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column}, index", "logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum from .reader import", "function_name): self.__entry_event(x, y)) self.column += 1 index += 1 self.row += 1 if", "创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x))", "36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width = 25", "sticky=W) self.column += 1 text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self,", "1 index = 0 for key, value in self.__entries.items(): function_name = key text_name", "index += 1 self.row += 1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "[] for key, value in tab_configs.items(): logger.info(f\"handle tab {key}\") if key == COMMON:", "show_message = \"成功\" if result else \"失败\" exact_message = \"精确\" if exact_search else", "/ 2) # 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20 #", "text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x))", "处理16进制 if \"x\" in msg_id or \"X\" in msg_id: # 把16进制转换成10进制 message_id =", "gui.py.py # @Author: lizhe # @Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import copy", "= 0 self.column = 0 # 布局显示 self.pack() # todo 64*64 3 3比较合适", "= {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row +=", "{index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key,", "Optional[List[str]] = None, can_fd: bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int", "self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self, actions:", "try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) ==", "else: search_count = None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search =", "index = {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框", "self.buttons[open_text_name][\"state\"] = DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] =", "# -------------------------------------------------------- import copy from time import sleep from tkinter import Frame, Button,", ":param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc,", "# 字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name):", "msg_id in values: msg_id = msg_id.strip() # 处理16进制 if \"x\" in msg_id or", "offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row},", "# 下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") #", "text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] =", "0: self.row += 1 # 创建单选框 index = 0 for key, value in", "# 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\",", "# 按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons", "# 等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name =", "params[1] if function_name == message_lost: value = self.entries[function_name].get() if value != \"\": #", "self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name]", "from ...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service: CANService, config: Dict[str,", "创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries()", "sleep(sleep_time) else: raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if", "== DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1)", "self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is", "lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda", "as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions", "= DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self):", "logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e)", "8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项) :param can_box_device:(选填) :param", "self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"]", "width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index", "search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type if button_type ==", "= [value] for msg_id in values: msg_id = msg_id.strip() # 处理16进制 if \"x\"", "按钮框配置 self.__buttons = config[buttons] if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置", "int = 1, filter_nodes: Optional[List[str]] = None, can_fd: bool = False, excel_type: ExcelEnum", "\"\": search_count = int(search_count_text) else: search_count = None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current()", "if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self,", "DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME,", "10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars = dict() # 闪烁单选框值", "创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\")", "# 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name:", "= SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type if", "index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if", "# 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=10)", "function_name == message_lost: value = self.entries[function_name].get() if value != \"\": # 0x152,0x153, 0x154", "BaudRateEnum from .reader import ConfigReader from .reader import check_buttons, thread_buttons, comboxs, entries, buttons,", "in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1 index", "+ W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送", "# 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() #", "# 输入框按钮配置 self.__entries = config[entries] if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") #", "+= 1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] =", "if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message)", "Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) #", "精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度 self.__comboxs_width = 20", "copy.deepcopy(actions) for action in new_actions: if len(action) == 2: msg_id, signals = action", "Combobox, Notebook, Separator from typing import List, Dict, Any, Union, Optional from automotive.logger.logger", "创建输入框 if self.row != 0: self.row += 1 index = 0 for key,", "SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import ExcelEnum", "{self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\")", "max_line_count: int = 8): \"\"\" :param excel_file: Excel文件路径 (必填项) :param dbc: 项目dbc文件路径 (必填项)", "+= 1 self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W,", "not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if", "else: if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get():", "Union, Optional from automotive.logger.logger import logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import", "= \"成功\" if result else \"失败\" exact_message = \"精确\" if exact_search else \"不精确\"", "count=count, exact=expect_value) show_message = \"成功\" if result else \"失败\" exact_message = \"精确\" if", "= copy.deepcopy(actions) for action in new_actions: if len(action) == 2: msg_id, signals =", "\"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row != 0: self.row", "CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get()", "signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type", "self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except", "width=8) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row,", "0: self.row += 1 index = 0 for key, value in self.__thread_buttons.items(): function_name", "NORMAL def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name =", "self.row = 0 self.column = 0 # 布局显示 self.pack() # todo 64*64 3", "{index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if", "self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif", "NORMAL self.column += 1 # ********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE #", "self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result = self.can_service.get_receive_signal_values(stack, signal_name) if len(result)", "# ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row,", "__combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name]", "param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id,", "(选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService", "= PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() #", "self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column = {self.column},", "except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action}", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"]", "show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) #", "= Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x,", "create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row != 0: self.row += 1", "actual_values = list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions =", ".reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE,", "if search_count_text != \"\": search_count = int(search_count_text) else: search_count = None # 获取是否精确查找", "self.row += 1 self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name]", "+= 1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID,", "OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0]", "actions = param[ACTIONS] self.thread_pool.submit(self.__send_actions, actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally:", "self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count", "if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get():", "# 创建下拉框 if self.row != 0: self.row += 1 # 创建单选框 index =", "创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE", "text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] =", "创建单选框 index = 0 for key, value in self.__check_buttons.items(): function_name = key text_name", "text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 # 创建下拉框", "messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS] text_name", "E , PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook, Separator from typing import", "values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row", "self.row += 1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "0: self.column = 0 elif index % self.__max_double_line_count == 0: self.row += 1", "key, value in self.__comboxs.items(): function_name = key text_name = value[TEXT] if index ==", "+= 1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions =", "NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框 if self.row != 0: self.row", "text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count,", "NORMAL elif button_type == CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值", "= value[TEXT] if index == 0: self.column = 0 elif index % self.__max_double_line_count", "self.thread_buttons = dict() # 下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries = dict()", "self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row +=", "Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self,", "exact=expect_value) show_message = \"成功\" if result else \"失败\" exact_message = \"精确\" if expect_value", "= 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda", "CHECK_SIGNAL: # 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack()", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name): try:", "= Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column +=", "= CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button", "# 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1 # **********", "# @Name: gui.py.py # @Author: lizhe # @Created: 2021/12/15 - 21:24 # --------------------------------------------------------", "self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons()", "Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y))", "text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 #", "tkinter.ttk import Combobox, Notebook, Separator from typing import List, Dict, Any, Union, Optional", "anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column = {self.column}, index =", "+= 1 self.row += 1 if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "= MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] =", "self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\") logger.error(e) self.buttons[text_name][\"state\"]", "+= 1 self.column = 0 else: self.column += 1 logger.debug(f\"row = {self.row}, column", "def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name", "params = self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions)", "= DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type ==", "= params[1] if function_name == message_lost: value = self.entries[function_name].get() if value != \"\":", "index = 0 for key, value in self.__comboxs.items(): function_name = key text_name =", "sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry", "0 for key, value in self.__thread_buttons.items(): function_name = key text_name = value[TEXT] if", "orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event,", "= int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e)", "1 self.row += 1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "\\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e:", "in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row = {self.row}, column", "= NORMAL elif button_type == CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() #", "def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes", "self.comboxs[function_name].bind(\"<<ComboboxSelected>>\", lambda x, y=(\"\", function_name): self.__combox_event(x, y)) logger.debug(f\"row = {self.row}, column = {self.column}\")", "+ 1, sticky=W) # 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x, y=(\"\",", "else: self.column += 1 logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL: # 获取signal name signal_name =", "lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name = param[TEXT] actions", "0: self.row += 1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用", "self.column += 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row,", "self.column += 2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"]", "for key, value in self.__comboxs.items(): function_name = key text_name = value[TEXT] if index", "{}, entries: {}, buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control =", "column = {self.column}, index = {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column,", "= NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED", "********** 创建打开设备按钮 check_button ********** text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self,", "else: # 0x164 values = [value] for msg_id in values: msg_id = msg_id.strip()", ":param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") #", "index += 1 self.row += 1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "bool = False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int", "elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type", "@Author: lizhe # @Created: 2021/12/15 - 21:24 # -------------------------------------------------------- import copy from time", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"]", "1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "= 0 text_name, show_name = SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "0: self.row += 1 index = 0 for key, value in self.__entries.items(): function_name", "value in tab_configs.items(): logger.info(f\"handle tab {key}\") if key == COMMON: common_panel = True", "# -------------------------------------------------------- # @Name: gui.py.py # @Author: lizhe # @Created: 2021/12/15 - 21:24", "+= 1 self.row += 1 if len(self.__check_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5,", "self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column,", "DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif", "1 # ********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] =", "show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self,", "= NORMAL class Gui(object): def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str,", "创建事件单选框 if self.row != 0: self.row += 1 index = 0 for key,", "= {self.__entries}\") # 按钮框配置 self.__buttons = config[buttons] if config[buttons] else dict() logger.debug(f\"buttons =", "logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self, excel_file:", "64*64 3 3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel:", "1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check()", "1 index += 1 self.row += 1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row,", "max_line_count: int = None): super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes =", "logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 创建Label框 Label(self, text=text_name,", "W, columnspan=self.__max_line_count) self.row += 1 self.__create_message_signal_check() def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值,", "index += 1 self.row += 1 if len(self.__buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service =", "self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 logger.debug(f\"add button {function_name} in", "设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度 self.__entrie_width =", "设置输入框(entrie)默认宽度 self.__entrie_width = 10 # 输入框支持的事件列表 self.support_event_keys = \"<Return>\", # 单选框值 self.check_button_bool_vars =", "self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__buttons) != 0:", "{}, buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config) self.tab_control = Notebook(self.tk) #", "# 获取输入框的名称 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self,", "y)) self.column += 1 index += 1 self.row += 1 if len(self.__entries) !=", "column=self.column, sticky=W, columnspan=2) self.column += 2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column,", "布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__check_buttons) !=", "1 def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and filter", "= action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e)", "\"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # ********** 创建打开设备按钮 check_button ********** text_name, show_name =", "index += 1 self.row += 1 if len(self.__comboxs) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0,", "= BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]]", "dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH,", "self.row += 1 index = 0 for key, value in self.__buttons.items(): function_name =", "for key, value in self.__buttons.items(): function_name = key text_name = value[TEXT] if index", "= self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\"", "[value] for msg_id in values: msg_id = msg_id.strip() # 处理16进制 if \"x\" in", "\"X\" in msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\",", "+= 5 text_name, show_name = CHECK_SIGNAL # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row", "self.buttons[text_name][\"state\"] = NORMAL elif button_type == BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type", "expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if result else \"失败\" exact_message = \"精确\"", "# 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column,", "把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id = int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\")", "logger.debug(f\"row = {self.row}, column = {self.column}\") self.column += 1 index += 1 self.row", "if result else \"失败\" exact_message = \"精确\" if exact_search else \"不精确\" message =", "self.row += 1 self.column = 0 else: self.column += 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name]", "event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name = function_name[1] combox_param = self.__comboxs[function_name] #", "max_workers: int = 500, max_line_count: int = 8): \"\"\" :param excel_file: Excel文件路径 (必填项)", "1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name]", "messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\"", "key, value in self.__thread_buttons.items(): function_name = key text_name = value[TEXT] if index ==", "1 logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") # 获取输入框的名称 Label(self,", "创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "text_name = param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get(): if function_name not in self.thread_task:", "self.entries[function_name].bind(event_key, lambda x, y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1 index += 1", "self.__max_double_line_count == 0: self.row += 1 self.column = 0 else: self.column += 1", "self.__entries = config[entries] if config[entries] else dict() logger.debug(f\"entries = {self.__entries}\") # 按钮框配置 self.__buttons", "= can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else", "布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框 entry ********** text_name,", "1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count)", "= CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes", "check_button ********** text_name, show_name = OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "+= 1 # ********** 创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __thread_check_button_event(self, function_name): if function_name ==", "2) else: param = self.__thread_buttons[function_name] text_name = param[TEXT] actions = param[ACTIONS] if self.thread_button_bool_vars[text_name].get():", "str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum, int] =", "__init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum,", "{text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value = check_msgs try:", "布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = CHECK_MESSAGE # 创建Button对象", "expect_value=signal_value, count=search_count, exact=exact_search) show_message = \"成功\" if result else \"失败\" exact_message = \"精确\"", "self.column += 1 # ********** 创建一个总线丢失的按钮 button ********** text_name, show_name = BUS_LOST #", "command=lambda x=function_name: self.__check_button_event(x), width=self.__checkBut_width, anchor=\"w\",wraplength=150,justify=\"left\" ) self.check_buttons[function_name] = button logger.debug(f\"row = {self.row}, column", "= 0 for key, value in self.__receive_buttons.items(): function_name = key text_name = value[TEXT]", "== CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text =", "y)) self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "# 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type) tab_configs = dict()", "+= 1 text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "is not received\") self.buttons[text_name][\"state\"] = NORMAL def create_check_buttons(self): \"\"\" 创建选中框,适用于单选发送消息的情况 \"\"\" # 创建下拉框", "+= 1 def __thread_check_button_event(self, function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and", "in actions: if len(action) == 2: msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\")", "+= 1 index = 0 for key, value in self.__entries.items(): function_name = key", "帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name, show_name = SIGNAL_NAME", "self.column += 1 # ********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE # 创建Button对象", "else: entry_value = self.entries[function_name].get() params = self.__entries[function_name] actions = params[ACTIONS] text_name = params[TEXT]", "in self.__receive_buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column", "config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置 self.__entries = config[entries] if config[entries]", "= \"精确\" if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result:", "self.tab_control = Notebook(self.tk) # tab选项框对象字典 self.tabs = [] for key, value in tab_configs.items():", "= values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if", "else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if", "value = self.entries[function_name].get() if value != \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if", "= False, excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int =", "if len(self.__receive_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "index = 0 for key, value in self.__receive_buttons.items(): function_name = key text_name =", "self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs", "= config[buttons] if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons =", "list(values.keys()) # 当前选中的是第几个 combox_index = self.comboxs[function_name].current() select_name = actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\")", "def __create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0", "# ********** 创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] =", "exact_search_text_name = EXACT_SEARCH[0] text_name, show_name = button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"]", "text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button", "字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys()) # 当前选中的是第几个 combox_index", "self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.column", "CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums", "signal_value = int(signal_value_text) # 获取次数 search_count_text = self.entries[search_count_text_name].get() if search_count_text != \"\": search_count", "width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name): self.__entry_event(x, y)) self.row", "onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row =", "# 单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") #", "# ********** 创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self,", "self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] =", "\\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] = NORMAL else:", "elif index % self.__max_double_line_count == 0: self.row += 1 self.column = 0 else:", "columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\" function_name", "e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收", "self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self,", "sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry self.entries[text_name].grid(row=self.row,", "CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME from ...utils.common.enums import", "anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建输入框 self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column", "result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not received\") self.buttons[text_name][\"state\"] = NORMAL", "!= \"\": # 0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in value: values =", "pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分 ********** self.__create_message_check()", "function_name): if function_name == DEFAULT_MESSAGE: logger.info(f\"send default messages and filter nodes {self.__filter_nodes}\") if", "sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0],", "W, columnspan=self.__max_line_count) self.row += 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param", "Label, Tk, messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk import Combobox,", "actions = params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in", "int(f\"0x{msg_id}\", 16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\",", "tab {key}\") if key == COMMON: common_panel = True else: common_panel = False", "text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "+= 1 text_name, show_name = SEARCH_COUNT Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1", "sticky=W) self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name", "f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e)", "2 text_name, show_name = SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name]", "def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row != 0: self.row +=", "msg_id or \"X\" in msg_id: # 把16进制转换成10进制 message_id = int(msg_id, 16) else: message_id", "设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width =", "self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\", text_name):", "value[TEXT] if index == 0: self.column = 0 elif index % self.__max_double_line_count ==", "else: messagebox.showerror(title=\"失败\", message=\"请填写需要查询的信号值\") self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_SIGNAL: # 获取signal name", "# 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def", "x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\"", "MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self,", "actual_values[combox_index] actions = values[select_name] logger.debug(f\"设置{text_name}为{select_name}\") self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" #", "messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮, 模拟其他ECU接收 \"\"\" if", "from automotive.logger.logger import logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum", "create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row != 0: self.row +=", "= {self.column}, index = {index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row +=", "self.__comboxs[function_name] # 字典中定义的值列表 values = combox_param[VALUES] text_name = combox_param[TEXT] actual_values = list(values.keys()) #", "self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif button_type == CHECK_MESSAGE: # 获取signal name signal_name =", "# 每行能够容纳的数量 self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count /", "= BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name, variable=self.check_button_bool_vars[function_name], onvalue=True, offvalue=False, command=lambda x=function_name:", "= 20 # 设置单选按钮(checkBut)默认宽度 self.__checkBut_width = 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 #", "== message_lost: value = self.entries[function_name].get() if value != \"\": # 0x152,0x153, 0x154 value.replace(\",\",", "== BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] =", "command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self):", "len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object):", "sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return: \"\"\" self.column", "Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate, channel_index=channel_index, can_fd=can_fd,", "text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 # 创建下拉框 self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state=\"readonly\",", "+= 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar() # 创建CheckButton对象并放到check_buttons中方便调用 button = Checkbutton(self, text=text_name,", "message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except", "common_panel = False tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key)", "# 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") #", "Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values, state=\"readonly\",", "创建Label框 Label(self, text=text_name, width=self.__label_width, anchor=\"w\",wraplength=180,justify=\"left\").grid(row=self.row, column=self.column, sticky=W) # 创建下拉框 self.comboxs[function_name] = Combobox(self, values=values,", "\"\"\" # 创建输入框 if self.row != 0: self.row += 1 index = 0", "self.row += 1 def __thread_button_event(self, function_name): try: self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name]", "# 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") #", "as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def create_receive_buttons(self): \"\"\" 创建接收检查按钮,", "logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\")", "tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox,", "self.row += 1 if len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E +", "self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E", "= value[TEXT] if index == 0: self.column = 0 elif index % self.__max_line_count", "= Combobox(self, values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W)", "********** text_name, show_name = CLEAR_STACK # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK:", "W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name =", "16) logger.debug(f\"message_id = {message_id}\") try: self.can_service.stop_transmit(message_id) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\")", ".reader import ConfigReader from .reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons from", "= DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") check_msgs =", "result else \"失败\" exact_message = \"精确\" if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\"", "# 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if", "W, columnspan=self.__max_line_count) self.row += 1 def __combox_event(self, event, function_name): \"\"\" 能够找到下拉框,并根据下拉框的内容进行判断 后续能够根据内容进行消息的发送 \"\"\"", "BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions, 2) else: param = self.__thread_buttons[function_name] text_name", "y=(\"\", function_name): self.__entry_event(x, y)) self.column += 1 index += 1 self.row += 1", "dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量 self.__max_line_count = max_line_count # 36 # 双行能够容纳的数量", "1 self.column = 0 else: self.column += 1 # 创建bool对象接收值 self.check_button_bool_vars[function_name] = BooleanVar()", "signal_value, count, expect_value = check_msgs try: stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id,", "等同于signal_value = Entry self.entries[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 text_name, show_name = SEARCH_COUNT", "单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典 self.comboxs =", "text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x))", "选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row != 0: self.row += 1 index", "if function_name not in self.thread_task: task = self.thread_pool.submit(self.__thread_method, text_name, actions) self.thread_task[function_name] = task", "len(result) > 0: self.entries[signal_values_text_name][\"state\"] = NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0,", "\"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate,", "创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self):", "key text_name = value[TEXT] if index == 0: self.column = 0 elif index", "下拉框按钮配置 self.__comboxs = config[comboxs] if config[comboxs] else dict() logger.debug(f\"comboxs = {self.__comboxs}\") # 输入框按钮配置", "创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_SIGNAL: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)", "开始的行列 self.row = 0 self.column = 0 # 布局显示 self.pack() # todo 64*64", "if result else \"失败\" exact_message = \"精确\" if expect_value else \"不精确\" message =", "values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions)", "self.column += 1 text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name,", "index = 0 for key, value in self.__thread_buttons.items(): function_name = key text_name =", "logger.debug(f\"press {text_name} button\") check_msgs = param[CHECK_MSGS] msg_id, signal_name, signal_value, count, expect_value = check_msgs", "= button_type self.buttons[text_name][\"state\"] = DISABLED try: self.__special_actions(button_type) except RuntimeError as e: messagebox.showerror(\"出错了\", f\"【{e}】\")", "+= 1 index = 0 for key, value in self.__receive_buttons.items(): function_name = key", "params[ACTIONS] text_name = params[TEXT] logger.debug(f\"设置{text_name}值为{entry_value}\") new_actions = copy.deepcopy(actions) for action in new_actions: if", "msg_id, signals = action for name, value in signals.items(): if value is None:", "= self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message = \"成功\" if result else", "dict() # 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons = dict() # 下拉框对象字典", "1 self.entries[text_name] = Entry(self, width=10) self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2) self.entries[text_name].bind(self.support_event_keys[0], lambda x, y=(\"\",", "def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None, baud_rate:", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个信号丢失的输入框 entry ********** text_name, show_name", "% self.__max_line_count == 0: self.row += 1 self.column = 0 else: self.column +=", ":return: \"\"\" self.column = 0 text_name, show_name = CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)", "signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name", "单选框按钮配置 self.__check_buttons = config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置", "dict() # 输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task = dict() # 总线丢失按钮", "signals = action for name, value in signals.items(): if value is None: logger.debug(f\"change", "self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name", "if config[thread_buttons] else dict() logger.debug(f\"thread_buttons = {self.__thread_buttons}\") # 下拉框按钮配置 self.__comboxs = config[comboxs] if", "self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键 \"\"\" # **********", "messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST:", "= 0 for key, value in self.__buttons.items(): function_name = key text_name = value[TEXT]", "self.__buttons = config[buttons] if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons", "CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE,", "NORMAL # 将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else:", "self.row += 1 def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0] logger.trace(event) function_name =", "column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name): values", "Any, Union, Optional from automotive.logger.logger import logger from automotive.core.can.can_service import CANService from automotive.core.can.common.enums", "values=YES_OR_NO, state=\"readonly\", width=5) # 设置下拉框初始值为第一个值 self.comboxs[text_name].current(0) # 布局下拉框 self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W) self.column +=", "创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮 self.create_receive_buttons() def create_common_widget(self): \"\"\" 创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息),", "command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" ) self.thread_buttons[function_name] = button logger.debug(f\"row = {self.row}, column", "column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row", "def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row !=", "sticky=E + W, columnspan=self.__max_line_count) self.row += 1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED", "SIGNAL_VALUE Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=8) #", "双行能够容纳的数量 self.__max_double_line_count = int(self.__max_line_count / 2) # 设置标签(label)默认宽度 self.__label_width = 25 # 设置下拉框(comboxs)默认宽度", "+ W, columnspan=self.__max_line_count) self.row += 1 def __entry_event(self, event, params): message_lost = MESSAGE_LOST[0]", "raise RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row !=", "{index}\") self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__thread_buttons) !=", "self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name == BUS_LOST: logger.info(\"can bus lost\") if self.thread_button_bool_vars[BUS_LOST].get(): self.thread_pool.submit(self.__special_actions,", "0x152,0x153, 0x154 value.replace(\",\", \",\") if \",\" in value: values = value.split(\",\") else: #", "= OPEN_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=OPEN_DEVICE: self.__special_button_event(x)) # 布局button", "= SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name = EXACT_SEARCH[0] text_name, show_name", "in self.__buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column", "x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.buttons[function_name].grid(row=self.row,", "= None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index:", "self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda", "ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \\ SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH,", "len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time) else: raise RuntimeError(f\"value[{action}]", "..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST, TEXT, ON, OFF, VALUES,", "actions) except RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") finally: self.buttons[function_name][\"state\"] = NORMAL def", "# 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") #", "CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLOSE_DEVICE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row,", "= 0 # 布局显示 self.pack() # todo 64*64 3 3比较合适 # self.open_image =", "\",\") if \",\" in value: values = value.split(\",\") else: # 0x164 values =", "coding:utf-8 -*- # -------------------------------------------------------- # Copyright (C), 2016-2021, lizhe, All rights reserved #", "if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons] if", "common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab self.tab_control.select(self.tabs[0]) self.tk.protocol('WM_DELETE_WINDOW', self.exit_root) self.tk.mainloop()", "can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\"", "(index == 0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count,", "!= 0: self.row += 1 index = 0 for key, value in self.__entries.items():", "x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个发送默认消息的按钮", "= CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name = SIGNAL_VALUES[0] search_count_text_name = SEARCH_COUNT[0] exact_search_text_name =", "********** text_name, show_name = BUS_LOST # 创建CheckButton对象并放到check_buttons中方便调用 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST:", "3比较合适 # self.open_image = PhotoImage(file=rf\"D:\\Download\\Chrome\\打开 (1).png\").subsample(3, 3) # 创建公共按钮 if common_panel: self.create_common_widget() #", "= {self.row}, column = {self.column}, index = {index}\") # 创建Label框 Label(self, text=text_name, width=self.__label_width,", "stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value) show_message =", "column=self.column + 1, sticky=W) # 绑定事件 for event_key in self.support_event_keys: self.entries[function_name].bind(event_key, lambda x,", "Tk, messagebox, \\ HORIZONTAL, E , PhotoImage, LEFT from tkinter.ttk import Combobox, Notebook,", "exact_message = \"精确\" if expect_value else \"不精确\" message = f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if", "max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk() self.tk.title(\"CAN面板\") # 初始化 CANService self.can_service = CANService(dbc, can_box_device=can_box_device,", "= 25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 #", "signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text != \"\": signal_value = int(signal_value_text) # 获取次数 search_count_text", "接收按钮框配置 self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict() logger.debug(f\"receive_buttons = {self.__receive_buttons}\") # 每行能够容纳的数量", "exact_search = (index == 0) stack = self.can_service.get_stack() result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name,", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=BUS_LOST: self.__special_button_event(x)) # 布局checkbutton self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column", "# 创建公共按钮 if common_panel: self.create_common_widget() # 创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框", "1 index = 0 for key, value in self.__receive_buttons.items(): function_name = key text_name", "{name} value to {entry_value}\") signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯", "{}, comboxs: {}, entries: {}, buttons: {}, receive_buttons: {}} config = service.read_from_file(excel_file) tab_configs.update(config)", "self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons =", "创建关闭设备按钮 ********** text_name, show_name = CLOSE_DEVICE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "1 def __receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT]", "if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row", "excel_type: ExcelEnum = ExcelEnum.OPENPYXL, max_workers: int = 500, max_line_count: int = 8): \"\"\"", "self.check_button_bool_vars = dict() # 闪烁单选框值 self.thread_button_bool_vars = dict() # 按钮框对象字典 self.buttons = dict()", "RuntimeError as e: logger.error(e) messagebox.showerror(\"出错了\", f\"【{e}】\") else: entry_value = self.entries[function_name].get() params = self.__entries[function_name]", "logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\" 创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row", "1 self.column = 0 else: self.column += 1 logger.debug(f\"row = {self.row}, column =", "index == 0: self.column = 0 elif index % self.__max_line_count == 0: self.row", "Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column", "for key, value in self.__check_buttons.items(): function_name = key text_name = value[TEXT] if index", "import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \\", "on_actions = values[ON] off_actions = values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name}", "create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row != 0:", "self.__check_buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column =", "按钮框对象字典 self.buttons = dict() # 单选框对象字典 self.check_buttons = dict() # 闪烁单选框对象字典 self.thread_buttons =", "********** text_name, show_name = MESSAGE_LOST # 获取输入框的名称 Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column +=", "= 0 for key, value in self.__comboxs.items(): function_name = key text_name = value[TEXT]", "= action for name, value in signals.items(): if value is None: logger.debug(f\"change {name}", "logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict() logger.debug(f\"thread_buttons", "1 index += 1 self.row += 1 if len(self.__entries) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row,", "RuntimeError(f\"value[{action}] incorrect\") def create_buttons(self): \"\"\" 创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待 \"\"\" if self.row != 0:", "columnspan=self.__max_line_count) self.row += 1 # ********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 *******", "1 text_name, show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE:", "25 # 设置多线程按钮框(thread_buttons)默认宽度 self.__thread_buttons_width = 20 # 设置按钮(button)默认宽度 self.__buttons_width = 24 # 设置输入框(entrie)默认宽度", "1 text_name, show_name = EXACT_SEARCH Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 #", "self.entries[function_name] = Entry(self, width=self.__entrie_width) logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\")", "default messages and filter nodes {self.__filter_nodes}\") if self.thread_button_bool_vars[DEFAULT_MESSAGE].get(): self.thread_pool.submit(self.__special_actions, 1) elif function_name ==", "self.buttons[text_name][\"state\"] = NORMAL logger.debug(f\"entries are {entries}\") def __special_button_event(self, button_type: tuple): text_name, show_name =", "__create_message_check(self): \"\"\" 创建信号检查部分 帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查 \"\"\" self.column = 0 text_name,", "self.column += 1 logger.debug(f\"row = {self.row}, column = {self.column}, index = {index}\") #", "for key, value in self.__thread_buttons.items(): function_name = key text_name = value[TEXT] if index", "获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text = self.entries[signal_value_text_name].get() if signal_value_text", "import copy from time import sleep from tkinter import Frame, Button, NORMAL, DISABLED,", "sleep from tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label,", "= {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index += 1 self.row += 1", "logger.debug(f\"add button {function_name} in buttons\") self.buttons[function_name] = Button(self, text=f\"【{text_name}】\", command=lambda x=function_name: self.__receive_button_event(x)) logger.debug(f\"row", "else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally: self.can_service.clear_stack_data() self.buttons[function_name][\"state\"]", "LEFT from tkinter.ttk import Combobox, Notebook, Separator from typing import List, Dict, Any,", "signals[name] = float(entry_value) self.__send_actions(new_actions) def create_thread_buttons(self): \"\"\" 创建周期交替变化或者有时间延迟的信号发送, 如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" #", "= config[check_buttons] if config[check_buttons] else dict() logger.debug(f\"check_buttons = {self.__check_buttons}\") # 闪烁单选框按钮配置 self.__thread_buttons =", "= Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row = {self.row}, column = {self.column},", "config: Dict[str, Any], filter_nodes: List[str], common_panel: bool = False, max_line_count: int = None):", "button = Checkbutton(self, text=f\"【{text_name}】\", variable=self.thread_button_bool_vars[text_name], onvalue=True, offvalue=False, command=lambda x=function_name: self.__thread_check_button_event(x), width=self.__thread_buttons_width, anchor=\"w\",wraplength=180,justify=\"left\" )", "如双闪灯 选中会发送,不选中则不发送 名字上以【】区别 \"\"\" # 创建事件单选框 if self.row != 0: self.row += 1", "{self.row}, column = {self.column}, index = {index}\") # 布局checkbutton self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W) index", "Button(self, text=show_name, command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1", "self.comboxs = dict() # 输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task = dict()", "Union[CanBoxDeviceEnum, str, None] = None, baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int]", "super().__init__(master) self.can_service = can_service self.thread_pool = can_service.can_bus.thread_pool self.__filter_nodes = filter_nodes # 单选框按钮配置 self.__check_buttons", "values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON] off_actions = values[OFF] if", "+= 1 # 获取下拉框的名称 values = list(value[VALUES].keys()) logger.debug(f\"row = {self.row}, column = {self.column},", "\"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name} is not", "values[OFF] if self.check_button_bool_vars[function_name].get(): logger.debug(f\"{text_name} ON\") self.__send_actions(on_actions) else: logger.debug(f\"{text_name} OFF\") self.__send_actions(off_actions) def create_comboxs(self): \"\"\"", "__receive_button_event(self, function_name): self.buttons[function_name][\"state\"] = DISABLED param = self.__receive_buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name}", "= Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] =", "self.can_service.clear_stack_data() self.buttons[function_name][\"state\"] = NORMAL class Gui(object): def __init__(self, excel_file: str, dbc: str, can_box_device:", "show_name = CHECK_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) #", "baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int =", "== 2: msg_id, signals = action logger.info(f\"{hex(msg_id)} = {signals}\") try: self.can_service.send_can_signal_message(msg_id, signals) except", "适用于枚举类型的选中框 \"\"\" # 创建下拉框 if self.row != 0: self.row += 1 index =", "TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes, config=value, common_panel=common_panel, max_line_count=max_line_count) self.tab_control.add(tab, text=key) self.tabs.append(tab) self.tab_control.pack(expand=1, fill=\"both\") # 第一个tab", "BUS_LOST: self.can_service.stop_transmit() self.buttons[text_name][\"state\"] = NORMAL elif button_type == OPEN_DEVICE: self.can_service.open_can() self.buttons[open_text_name][\"state\"] = DISABLED", "self.buttons[text_name] = Button(self, text=show_name, command=lambda x=CLEAR_STACK: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column", "open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name", "channel_index=channel_index, can_fd=can_fd, max_workers=max_workers) # 默认消息发送要过滤的节点 self.__filter_nodes = filter_nodes # 获取按钮 service = ConfigReader(can_service=self.can_service,type_=excel_type)", "function_name): values = self.__check_buttons[function_name] text_name = values[TEXT] on_actions = values[ON] off_actions = values[OFF]", "= CHECK_SIGNAL_NAME Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=20)", "button_type == CHECK_MESSAGE: # 获取signal name signal_name = self.entries[signal_name_text_name].get().strip() # 获取signal value signal_value_text", "column=self.column, sticky=W) index += 1 self.row += 1 if len(self.__receive_buttons) != 0: Separator(self,", "self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = DISABLED self.column += 1 #", "= f\"检查信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) self.buttons[text_name][\"state\"] =", "state=\"readonly\", width=self.__comboxs_width) # 设置下拉框初始值为第一个值 self.comboxs[function_name].current(0) logger.debug(f\"row = {self.row}, column = {self.column}, index =", "entries, buttons, receive_buttons from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \\ MESSAGE_LOST,", "# 获取signal name signal_name = self.entries[check_signal_name_text_name].get().strip() # 检测信号值是否已经发送过,并返回检测到的信号值 result stack = self.can_service.get_stack() result", "Any], filter_nodes: List[str], common_panel: bool = False, max_line_count: int = None): super().__init__(master) self.can_service", "创建一个发送默认消息的按钮 button ********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name,", "NORMAL self.buttons[close_text_name][\"state\"] = DISABLED elif button_type == CLEAR_STACK: self.can_service.clear_stack_data() self.buttons[text_name][\"state\"] = NORMAL elif", "...utils.common.enums import ExcelEnum class TabFrame(Frame): def __init__(self, master, can_service: CANService, config: Dict[str, Any],", "= None # 获取是否精确查找 index = self.comboxs[exact_search_text_name].current() # 选中第一个则表示是True exact_search = (index ==", "Button(self, text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL", "messagebox.showerror(\"出错了\", f\"【{e}】\") elif len(action) == 1: logger.debug(f\"sleep {action} seconds\") sleep_time = float(action[0]) sleep(sleep_time)", "index = {index}\") self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W) # 绑定事件 for event_key in", "text_name, actions) self.thread_task[function_name] = task else: if function_name in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self,", "= {check_buttons: {}, thread_buttons: {}, comboxs: {}, entries: {}, buttons: {}, receive_buttons: {}}", "********** 创建信号检查部分 ********** self.__create_message_check() # ********** 创建检测信号是否之前发送值部分 ******* self.row += 1 Separator(self, orient=HORIZONTAL).grid(row=self.row,", "创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程) self.create_buttons() # 创建接收检查按钮", "= dict() # 总线丢失按钮 = # 开始的行列 self.row = 0 self.column = 0", ":param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改", "0 elif index % self.__max_double_line_count == 0: self.row += 1 self.column = 0", "button ********** text_name, show_name = DEFAULT_MESSAGE # 创建Button对象 self.buttons[text_name] = Button(self, text=show_name, command=lambda", "= button_type if button_type == DEFAULT_MESSAGE: self.can_service.send_default_messages(filter_sender=self.__filter_nodes) self.buttons[text_name][\"state\"] = NORMAL elif button_type ==", "= f\"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数\" \\ f\"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】\" if result: messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError", "= MESSAGE_LOST[0] logger.trace(event) function_name = params[1] if function_name == message_lost: value = self.entries[function_name].get()", "len(self.__thread_buttons) != 0: Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W, columnspan=self.__max_line_count) self.row +=", "# 创建单选框 index = 0 for key, value in self.__check_buttons.items(): function_name = key", "close_text_name = CLOSE_DEVICE[0] signal_name_text_name = SIGNAL_NAME[0] check_signal_name_text_name = CHECK_SIGNAL_NAME[0] signal_value_text_name = SIGNAL_VALUE[0] signal_values_text_name", "= {self.row}, column = {self.column}, index = {index}\") # 布局下拉框 self.comboxs[function_name].grid(row=self.row, column=self.column +", "= NORMAL def __special_actions(self, button_type: tuple): open_text_name = OPEN_DEVICE[0] close_text_name = CLOSE_DEVICE[0] signal_name_text_name", "将之前的值先清空 self.entries[signal_values_text_name].delete(0, \"end\") # 将返回的值插入到输入框中 self.entries[signal_values_text_name].insert(0, result) self.entries[signal_values_text_name][\"state\"] = DISABLED else: messagebox.showerror(title=\"失败\", message=f\"{signal_name}", "self.__buttons.items(): function_name = key text_name = value[TEXT] if index == 0: self.column =", "+= 1 # 创建CheckButton对象并放到thread_buttons中方便调用 self.buttons[function_name] = Button(self, text=text_name, command=lambda x=function_name: self.__thread_button_event(x), width=self.__buttons_width,wraplength=170,justify=\"left\",anchor=\"w\") logger.debug(f\"row", "BaudRateEnum.HIGH, data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA, channel_index: int = 1, filter_nodes: Optional[List[str]] =", "+= 1 self.column = 0 else: self.column += 1 # 获取下拉框的名称 values =", "column=self.column, sticky=W) self.column += 1 self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value =", "self.__send_actions(actions) logger.trace(event) def create_entries(self): \"\"\" 创建输入框,适用于车速类型的线性信号值 \"\"\" # 创建输入框 if self.row != 0:", "config[buttons] if config[buttons] else dict() logger.debug(f\"buttons = {self.__buttons}\") # 接收按钮框配置 self.__receive_buttons = config[receive_buttons]", "DISABLED self.buttons[close_text_name][\"state\"] = NORMAL elif button_type == CLOSE_DEVICE: self.can_service.close_can() self.buttons[open_text_name][\"state\"] = NORMAL self.buttons[close_text_name][\"state\"]", "count=search_count, exact=exact_search) show_message = \"成功\" if result else \"失败\" exact_message = \"精确\" if", "创建单选按钮 self.create_check_buttons() # 创建下拉按钮 self.create_comboxs() # 创建输入框 self.create_entries() # 创建事件单选按钮 self.create_thread_buttons() # 创建按钮框(多线程)", "in self.thread_task: self.thread_task.pop(function_name) def __thread_method(self, name, actions): logger.debug(actions) while self.thread_button_bool_vars[name].get(): self.__send_actions(actions) def __send_actions(self,", "in self.__entries.items(): function_name = key text_name = value[TEXT] if index == 0: self.column", ":param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填) :param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改 \"\"\" self.tk = Tk()", "index == 0: self.column = 0 elif index % self.__max_double_line_count == 0: self.row", "text=show_name, command=lambda x=CHECK_MESSAGE: self.__special_button_event(x)) # 布局button self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.buttons[text_name][\"state\"] = NORMAL def __create_message_signal_check(self): \"\"\" 创建信号之前发送过那些值检测 帧ID,信号名称 精确查找的等选择 :return:", "# 下拉框对象字典 self.comboxs = dict() # 输入框对象字典 self.entries = dict() # 闪烁事件Task self.thread_task", "项目dbc文件路径 (必填项) :param can_box_device:(选填) :param filter_nodes:发送默认信号筛选器(默认值) :param can_fd:(选填) :param excel_type: (选填) :param max_workers:默认值就行(选填)", "messagebox.showinfo(title=show_message, message=message) else: messagebox.showerror(title=show_message, message=message) except RuntimeError as e: logger.error(e) messagebox.showerror(title=\"出错了\", message=f\"【{e}】\") finally:", "columnspan=self.__max_line_count) self.row += 1 def __check_button_event(self, function_name): values = self.__check_buttons[function_name] text_name = values[TEXT]", "self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W) self.column += 1 # ********** 创建一个总线丢失的按钮 button ********** text_name, show_name", "self.buttons[function_name][\"state\"] = DISABLED param = self.__buttons[function_name] text_name = param[TEXT] logger.debug(f\"press {text_name} button\") actions" ]
[ "in reffiles] lang = None if args['lang'] is not None: if is_language(args['lang']): lang", "{args['lang']} is not a ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically,", "an existing file!\") exit(1) # todo: user-interactive question here if args['dry_run']: logging.info(\"Dry run", "len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) # subfiles = [Path(s).absolute() for", "+ ('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to", "= None if args['lang'] is not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang", "f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath) logging.info(f\"...done\") if __name__ == '__main__':", "logging.critical(f\"Renaming subtitle to {newpath} will overwrite an existing file!\") exit(1) # todo: user-interactive", "if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference files! Got {len(subfiles)}", "ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda", "{newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if not r.exists():", "a ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\") else:", "[Path(r).absolute() for r in reffiles] subfiles = [Path(s) for s in subfiles] reffiles", "# todo: user-interactive question here if args['dry_run']: logging.info(\"Dry run mode, not writing changes.\")", "\" f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1)", "in reffiles] subfiles = [Path(s) for s in subfiles] reffiles = [Path(r) for", "if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath)", "logging from pathlib import Path import pycountry from pprint import pprint def start():", "will overwrite the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will", "get_args_subzipper from subs2cia.sources import is_language import logging from pathlib import Path import pycountry", "pprint import pprint def start(): args = get_args_subzipper() args = vars(args) if args['verbose']:", "files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the first \" f\"{len(subfiles)", "{args}\") subfiles = args['subfiles'] reffiles = args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number", "for s, r in zip(subfiles, reffiles): newpath = r.parent / (r.stem + (''", "f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file", "todo: user-interactive question here if args['dry_run']: logging.info(\"Dry run mode, not writing changes.\") return", "# logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles = args['reffiles'] if len(subfiles)", "sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for", "s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't", "# reffiles = [Path(r).absolute() for r in reffiles] subfiles = [Path(s) for s", "is_language import logging from pathlib import Path import pycountry from pprint import pprint", "# elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles =", "of subtitle and reference files! Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference", "file doesn't exist: {r}\") if newpath == r: logging.critical(f\"Renaming subtitle to {newpath} will", "changes.\") return for s, r in zip(subfiles, reffiles): newpath = r.parent / (r.stem", "reffiles] subfiles = [Path(s) for s in subfiles] reffiles = [Path(r) for r", "else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for s, r in zip(subfiles, reffiles):", "args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference files! Got", "lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is not a", "pycountry from pprint import pprint def start(): args = get_args_subzipper() args = vars(args)", "== r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite the reference file!\") exit(1) if", "subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for s, r in zip(subfiles, reffiles): newpath", "= pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup failure:", "exist: {r}\") if newpath == r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite the", "is None else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if not", "s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't exist:", "logging.info(\"Dry run mode, not writing changes.\") return for s, r in zip(subfiles, reffiles):", "len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) # subfiles = [Path(s).absolute() for s in", "import pprint def start(): args = get_args_subzipper() args = vars(args) if args['verbose']: #", "first \" f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") #", "the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite an", "# subfiles = [Path(s).absolute() for s in subfiles] # reffiles = [Path(r).absolute() for", "the first \" f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\")", "subfiles] # reffiles = [Path(r).absolute() for r in reffiles] subfiles = [Path(s) for", "f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) #", "lang = lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is", "(r.stem + ('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s}", "in subfiles] # reffiles = [Path(r).absolute() for r in reffiles] subfiles = [Path(s)", "process the first \" f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle", "import Path import pycountry from pprint import pprint def start(): args = get_args_subzipper()", "for r in reffiles] subfiles = [Path(s) for s in subfiles] reffiles =", "lookup failure: {args['lang']} is not a ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting", "zip(subfiles, reffiles): newpath = r.parent / (r.stem + ('' if lang is None", "exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if newpath", "file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite an existing file!\")", "logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't exist:", "reffiles = args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference", "will overwrite an existing file!\") exit(1) # todo: user-interactive question here if args['dry_run']:", "if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG)", "reffiles = [Path(r).absolute() for r in reffiles] subfiles = [Path(s) for s in", "None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath) logging.info(f\"...done\") if __name__", "+ s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't", "if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite an existing file!\") exit(1) #", "files.\") logging.warning(f\"Will only process the first \" f\"{len(subfiles) if len(subfiles) < len(reffiles) else", "import get_args_subzipper from subs2cia.sources import is_language import logging from pathlib import Path import", "reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite an existing", "x: str(x)) for s, r in zip(subfiles, reffiles): newpath = r.parent / (r.stem", "import pycountry from pprint import pprint def start(): args = get_args_subzipper() args =", "= get_args_subzipper() args = vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO)", "alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for s, r", "import logging from pathlib import Path import pycountry from pprint import pprint def", "and reference files! Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will", "r.parent / (r.stem + ('' if lang is None else f'.{lang}') + s.suffix)", "if not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference", "lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup", "\" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the first \" f\"{len(subfiles) if len(subfiles)", "('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to", "else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath) logging.info(f\"...done\") if __name__ ==", "/ (r.stem + ('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming", "= [Path(r) for r in reffiles] lang = None if args['lang'] is not", "args['lang'] is not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending", "x: str(x)) reffiles.sort(key=lambda x: str(x)) for s, r in zip(subfiles, reffiles): newpath =", "language code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is not a ISO recognized", "only process the first \" f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} \"", "s, r in zip(subfiles, reffiles): newpath = r.parent / (r.stem + ('' if", "subfiles = args['subfiles'] reffiles = args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of", "not a ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\")", "newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite an existing file!\") exit(1) # todo:", "logging.warning(f\"Will only process the first \" f\"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)}", "and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the first \" f\"{len(subfiles) if", "logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is not a ISO", "subtitle to {newpath} will overwrite the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle", "if args['dry_run']: logging.info(\"Dry run mode, not writing changes.\") return for s, r in", "to {newpath} will overwrite the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to", "if len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) # subfiles", "[Path(s).absolute() for s in subfiles] # reffiles = [Path(r).absolute() for r in reffiles]", "else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles']", "if newpath == r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite the reference file!\")", "logging.error(f\"Language lookup failure: {args['lang']} is not a ISO recognized language\") if args['no_sort']: logging.info(\"Not", "rename {s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1)", "to {newpath} will overwrite an existing file!\") exit(1) # todo: user-interactive question here", "not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file", "subfiles] reffiles = [Path(r) for r in reffiles] lang = None if args['lang']", "Path import pycountry from pprint import pprint def start(): args = get_args_subzipper() args", "[Path(s) for s in subfiles] reffiles = [Path(r) for r in reffiles] lang", "args = vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif", "subfiles = [Path(s) for s in subfiles] reffiles = [Path(r) for r in", "import is_language import logging from pathlib import Path import pycountry from pprint import", "{s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if newpath ==", "for r in reffiles] lang = None if args['lang'] is not None: if", "args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles = args['reffiles'] if", "subtitle files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the first \"", "not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if newpath == r: logging.critical(f\"Renaming subtitle", "from pathlib import Path import pycountry from pprint import pprint def start(): args", "= args['subfiles'] reffiles = args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle", "if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to {newpath}\")", "lang is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath) logging.info(f\"...done\")", "args['dry_run']: logging.info(\"Dry run mode, not writing changes.\") return for s, r in zip(subfiles,", "!= len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference files! Got {len(subfiles)} subtitle files", "exit(1) # subfiles = [Path(s).absolute() for s in subfiles] # reffiles = [Path(r).absolute()", "in subfiles] reffiles = [Path(r) for r in reffiles] lang = None if", "logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles = args['reffiles'] if len(subfiles) !=", "r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if newpath == r: logging.critical(f\"Renaming subtitle to", "s in subfiles] # reffiles = [Path(r).absolute() for r in reffiles] subfiles =", "as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for s, r in zip(subfiles,", "if not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if newpath == r: logging.critical(f\"Renaming", "('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\")", "writing changes.\") return for s, r in zip(subfiles, reffiles): newpath = r.parent /", "doesn't exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if", "= lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is not", "f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the first \" f\"{len(subfiles) if len(subfiles) <", "None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code {lang}')", "args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x:", "{lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is not a ISO recognized language\") if", "from subs2cia.sources import is_language import logging from pathlib import Path import pycountry from", "lang is None else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if", "return for s, r in zip(subfiles, reffiles): newpath = r.parent / (r.stem +", "= [Path(s) for s in subfiles] reffiles = [Path(r) for r in reffiles]", "failure: {args['lang']} is not a ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs", "exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite an existing file!\") exit(1)", "get_args_subzipper() args = vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) #", "args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start", "if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\")", "logging.info(f\"Will rename {s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\")", "overwrite an existing file!\") exit(1) # todo: user-interactive question here if args['dry_run']: logging.info(\"Dry", "# if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments:", "{r}\") if newpath == r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite the reference", "run mode, not writing changes.\") return for s, r in zip(subfiles, reffiles): newpath", "# exit(1) # subfiles = [Path(s).absolute() for s in subfiles] # reffiles =", "mode, not writing changes.\") return for s, r in zip(subfiles, reffiles): newpath =", "\" f\"reference-subtitle pairs.\") # exit(1) # subfiles = [Path(s).absolute() for s in subfiles]", "for s in subfiles] # reffiles = [Path(r).absolute() for r in reffiles] subfiles", "+ s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath) logging.info(f\"...done\") if __name__ == '__main__': start()", "files! Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process", "reffiles): newpath = r.parent / (r.stem + ('' if lang is None else", "= vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']:", "to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if not", "str(x)) reffiles.sort(key=lambda x: str(x)) for s, r in zip(subfiles, reffiles): newpath = r.parent", "number of subtitle and reference files! Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)}", "len(subfiles) < len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) # subfiles =", "r in reffiles] lang = None if args['lang'] is not None: if is_language(args['lang']):", "language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x))", "pprint def start(): args = get_args_subzipper() args = vars(args) if args['verbose']: # if", "reffiles.sort(key=lambda x: str(x)) for s, r in zip(subfiles, reffiles): newpath = r.parent /", "args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles", "reffiles = [Path(r) for r in reffiles] lang = None if args['lang'] is", "= args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference files!", "exit(1) # todo: user-interactive question here if args['dry_run']: logging.info(\"Dry run mode, not writing", "(r.stem + ('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Will rename", "exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\") if newpath == r:", "f\"reference-subtitle pairs.\") # exit(1) # subfiles = [Path(s).absolute() for s in subfiles] #", "{len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the first", "question here if args['dry_run']: logging.info(\"Dry run mode, not writing changes.\") return for s,", "= [Path(r).absolute() for r in reffiles] subfiles = [Path(s) for s in subfiles]", "[Path(r) for r in reffiles] lang = None if args['lang'] is not None:", "subfiles = [Path(s).absolute() for s in subfiles] # reffiles = [Path(r).absolute() for r", "pathlib import Path import pycountry from pprint import pprint def start(): args =", "reference files.\") logging.warning(f\"Will only process the first \" f\"{len(subfiles) if len(subfiles) < len(reffiles)", "is None else f'.{lang}') + s.suffix) logging.info(f\"Renaming {s} to {newpath}...\") s.rename(newpath) logging.info(f\"...done\") if", "{newpath} will overwrite the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath}", "logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles = args['reffiles'] if len(subfiles) != len(reffiles):", "Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only process the", "user-interactive question here if args['dry_run']: logging.info(\"Dry run mode, not writing changes.\") return for", "vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: #", "len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference files! Got {len(subfiles)} subtitle", "reffiles] lang = None if args['lang'] is not None: if is_language(args['lang']): lang =", "newpath = r.parent / (r.stem + ('' if lang is None else f'.{lang}')", "def start(): args = get_args_subzipper() args = vars(args) if args['verbose']: # if args['debug']:", "else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle", "doesn't exist: {r}\") if newpath == r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite", "if args['lang'] is not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3", "<gh_stars>10-100 from subs2cia.argparser import get_args_subzipper from subs2cia.sources import is_language import logging from pathlib", "None if args['lang'] is not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang =", "if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code {lang}') else:", "recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x:", "r in reffiles] subfiles = [Path(s) for s in subfiles] reffiles = [Path(r)", "if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda", "None else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s} to {newpath}\") if not s.exists():", "args = get_args_subzipper() args = vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG) else:", "newpath == r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite the reference file!\") exit(1)", "reference files! Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference files.\") logging.warning(f\"Will only", "else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) # subfiles = [Path(s).absolute() for s", "{newpath} will overwrite an existing file!\") exit(1) # todo: user-interactive question here if", "file!\") exit(1) # todo: user-interactive question here if args['dry_run']: logging.info(\"Dry run mode, not", "pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']}", "using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for s, r in", "existing file!\") exit(1) # todo: user-interactive question here if args['dry_run']: logging.info(\"Dry run mode,", "logging.warning(f\"Mismatched number of subtitle and reference files! Got {len(subfiles)} subtitle files and \"", "code {lang}') else: logging.error(f\"Language lookup failure: {args['lang']} is not a ISO recognized language\")", "is not a ISO recognized language\") if args['no_sort']: logging.info(\"Not sorting inputs alphabetically, using", "overwrite the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming subtitle to {newpath} will overwrite", "from subs2cia.argparser import get_args_subzipper from subs2cia.sources import is_language import logging from pathlib import", "< len(reffiles) else len(reffiles)} \" f\"reference-subtitle pairs.\") # exit(1) # subfiles = [Path(s).absolute()", "logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles", "subs2cia.sources import is_language import logging from pathlib import Path import pycountry from pprint", "r: logging.critical(f\"Renaming subtitle to {newpath} will overwrite the reference file!\") exit(1) if newpath.exists():", "not writing changes.\") return for s, r in zip(subfiles, reffiles): newpath = r.parent", "subs2cia.argparser import get_args_subzipper from subs2cia.sources import is_language import logging from pathlib import Path", "start(): args = get_args_subzipper() args = vars(args) if args['verbose']: # if args['debug']: logging.basicConfig(level=logging.DEBUG)", "subtitle to {newpath} will overwrite an existing file!\") exit(1) # todo: user-interactive question", "/ (r.stem + ('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Will", "subtitle and reference files! Got {len(subfiles)} subtitle files and \" f\"{len(reffiles)} reference files.\")", "= r.parent / (r.stem + ('' if lang is None else f'.{lang}') +", "pairs.\") # exit(1) # subfiles = [Path(s).absolute() for s in subfiles] # reffiles", "is not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language", "+ ('' if lang is None else f'.{lang}') + s.suffix) logging.info(f\"Will rename {s}", "arguments: {args}\") subfiles = args['subfiles'] reffiles = args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched", "from pprint import pprint def start(): args = get_args_subzipper() args = vars(args) if", "s in subfiles] reffiles = [Path(r) for r in reffiles] lang = None", "is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code {lang}') else: logging.error(f\"Language", "len(reffiles): logging.warning(f\"Mismatched number of subtitle and reference files! Got {len(subfiles)} subtitle files and", "str(x)) for s, r in zip(subfiles, reffiles): newpath = r.parent / (r.stem +", "logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles =", "= [Path(s).absolute() for s in subfiles] # reffiles = [Path(r).absolute() for r in", "lang = None if args['lang'] is not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang'])", "logging.warning(f\"Reference file doesn't exist: {r}\") if newpath == r: logging.critical(f\"Renaming subtitle to {newpath}", "else: logging.error(f\"Language lookup failure: {args['lang']} is not a ISO recognized language\") if args['no_sort']:", "logging.critical(f\"Renaming subtitle to {newpath} will overwrite the reference file!\") exit(1) if newpath.exists(): logging.critical(f\"Renaming", "here if args['dry_run']: logging.info(\"Dry run mode, not writing changes.\") return for s, r", "args['subfiles'] reffiles = args['reffiles'] if len(subfiles) != len(reffiles): logging.warning(f\"Mismatched number of subtitle and", "r in zip(subfiles, reffiles): newpath = r.parent / (r.stem + ('' if lang", "inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x)) for s,", "{s} to {newpath}\") if not s.exists(): logging.critical(f\"Subtitle file doesn't exist: {s}\") exit(1) if", "file doesn't exist: {s}\") exit(1) if not r.exists(): logging.warning(f\"Reference file doesn't exist: {r}\")", "for s in subfiles] reffiles = [Path(r) for r in reffiles] lang =", "not None: if is_language(args['lang']): lang = pycountry.languages.lookup(args['lang']) lang = lang.alpha_3 logging.info(f'Appending language code", "in zip(subfiles, reffiles): newpath = r.parent / (r.stem + ('' if lang is", "elif args['debug']: # logging.basicConfig(level=logging.DEBUG) logging.debug(f\"Start arguments: {args}\") subfiles = args['subfiles'] reffiles = args['reffiles']", "logging.info(\"Not sorting inputs alphabetically, using as-is.\") else: subfiles.sort(key=lambda x: str(x)) reffiles.sort(key=lambda x: str(x))" ]
[ "diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2", "to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for", "self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] #", "cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden", "overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self,", "import torch import math def nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return", "int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :,", "- 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2", "[torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention probs from", "ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1` converts to bool", "# location of the padding values within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn", "attention_window // 2 def forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)`", "value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices,", "1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000,", "the chunks overlap with an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1]", "the padding values within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return", "[hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def", "chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) # copy parts", "torch import math def nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1)", "padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads,", "self.head_dim).transpose(0, 1) # compute local attention output with global attention value and add", "0: local attention +ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index", "= self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size", "-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ]", "1` converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len +", "attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size", "chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores", "global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states)", "compute local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size()", "2 def forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be", "diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the main diagonal", "global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads,", "= torch.nn.functional.pad( hidden_states_padded, padding ) # padding value is not important because it", "* self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))", "query.size() == key.size() chunks_count = seq_len // window_overlap - 1 # group batch_size", "= diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :,", "overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap", "= chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads", "because it will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) )", "+ 1 chunks_count = seq_len // window_overlap - 1 # group batch_size and", "separate projection layers for tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global", "splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained", "lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1):", "None, None] # cast to fp32/fp16 then replace 1's with -inf float_mask =", "happens in LongformerModel.forward to avoid redoing the padding on each layer. The `attention_mask`", "torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection", "overwrite to attention output # TODO: remove the redundant computation if is_global_attn: global_attn_output", "nonzero_tuple(is_local_index_global_attn) # location of the padding values within global attention indices is_local_index_no_global_attn_nonzero =", ") # batch_size * self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores =", "= attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with", "and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap", "if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) #", "beginning of the sequence and another window overlap at the end padded_value =", "be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks", "= nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs(", "num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1)", "f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim ) return global_attn_output", "conding:utf-8 _*_ # Author : Nick # Time : 2020/9/15 3:21 下午 from", "(e.g. 512 for pretrained Longformer) with an overlap of size window_overlap\"\"\" batch_size, seq_len,", "= -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size", "= torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute", "chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return", "hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size,", "and then flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) #", "1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap:", "self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) #", "# `== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key:", "tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global", "= query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads,", "global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn", "global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero,", "padding value is not important because it will be overwritten hidden_states_padded = hidden_states_padded.view(", "2 + 1) columns. The first (window_overlap) columns are the window_overlap lower #", "2)) assert global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size.", "max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper", "= global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0", "ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask ==", "global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /=", "= ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads,", "probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn", "value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with w", "Nick # Time : 2020/9/15 3:21 下午 from typing import List, Tuple import", "x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert", "batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0) should be {batch_size", "{self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim", "\\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the wrong", "window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs:", "= global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices,", "2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn(", "implementation splits the input into overlapping chunks of size 2w (e.g. 512 for", "size 2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap\"\"\"", "for attn_probs and value tensors. Returned tensor will be of the same shape", "# 待补充超参数 assert ( attention_window % 2 == 0 ), f\"`attention_window` for layer", ") key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors,", "= torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores =", "self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert", "key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat", "probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical", "attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for", "chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk size =", "def nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def", "0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584,", "0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372,", "overlap with an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1]", "= cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert", "batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size", "to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask", "to `attention_window` happens in LongformerModel.forward to avoid redoing the padding on each layer.", "def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size =", "embed_dim == self.embed_dim ), f\"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}\"", "apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0,", "num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad", "fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global", "attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow(", "copying the main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] =", "nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) #", "0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000,", "compute global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use", "torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id #", "has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the", "# max number of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() #", "with a sliding window attention pattern. This implementation splits the input into overlapping", "states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global", ":, 0, : window_overlap - 1, 1 - window_overlap: ] # separate batch_size", "value=-1.0) # chunk padded_value into chunks of size 3 window overlap and an", "f\"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}\" assert query.size()", "# triangles (attention from a word to window_overlap previous words). The following column", "0.2629 ] window_overlap = num_rows = 4 (pad & diagonilize) => [ 0.4983,", "columns. The first (window_overlap) columns are the window_overlap lower # triangles (attention from", "assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = seq_len // window_overlap", "class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0:", "contact over window dim if is_global_attn: # compute global attn indices required through", "compute global attn indices required throughout forward pass \"\"\" # helper variable num_global_attn_indices", "# compute global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) #", ":window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1, window_overlap + 1: ]", "embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ), f\"hidden_states should have embed_dim", "] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores =", "not important because it will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3),", "batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 *", "# total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]", "2, hidden_states.size(2), ) # use `as_strided` to make the chunks overlap with an", "masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)", "= hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift", "numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training: # del", "should have embed_dim = {self.embed_dim}, but has {embed_dim}\" # normalize query query_vectors /=", "beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask", "0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states,", "cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden size (%d) is not a multiple", "for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] =", "{attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)]", "indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange(", "global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = (", "global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads,", "(window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2", "not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 )", "tensors using with a sliding window attention pattern. This implementation splits the input", "hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided`", "global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the wrong size. size(2) should be {seq_len},", "= torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values", "has to be an even value. Given {attention_window}\" assert ( attention_window > 0", "torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query and key tensors using", "`attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2)", "window_overlap lower # triangles (attention from a word to window_overlap previous words). The", "List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden", "pretrained Longformer) with an overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim =", "size. size(2) should be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view(", "\" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim )", "reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) )", ":-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ]", "for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] # cast", "global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] =", "= {self.embed_dim}, but has {embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors =", "global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices},", "+ 1) columns. The first (window_overlap) columns are the window_overlap lower # triangles", "= input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1,", "\"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention is_index_masked =", "value for global attention and overwrite to attention output # TODO: remove the", "= torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return", "(0, window_overlap + 1) ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1).", "window_overlap + 1 ) # group batch_size and num_heads dimensions into one value", "head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1],", "shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap", "# `== 1` converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :,", "compute local attention probs from global attention keys and contact over window dim", ") # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size", "# diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask =", "hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap:", "has to be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def forward(", "global attn indices required through out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices =", "\\ f\"global_attn_scores have the wrong size. size(2) should be {self.head_dim}, \" \\ f\"but", "math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads,", "cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window % 2 == 0 ), f\"`attention_window` for", "max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut local attn", "= num_rows = 4 (pad & diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492,", "0 ), f\"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}\" self.one_sided_attn_window_size", "an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2", "Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206,", "2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600,", "seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one,", "chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads x", "None, None], 0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors =", "] window_overlap = num_rows = 4 (pad & diagonilize) => [ 0.4983, 2.6918,", "group batch_size and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads,", "0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\"", "0 ), f\"`attention_window` for layer {self.layer_id} has to be an even value. Given", "have the wrong size. size(0) should be {batch_size * self.num_heads}, \" \\ f\"but", "torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong", "free memory # if self.query.training: # del attn_probs_fp32 # softmax sometimes inserts NaN", "\"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] /", "# attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size", "global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability attn_probs", "(batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() #", "\"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global", "(chunked_query, chunked_key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores,", "super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden size (%d)", "0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim =", "window_overlap: int): \"\"\"Matrix multiplication of query and key tensors using with a sliding", "value. Given {attention_window}\" assert ( attention_window > 0 ), f\"`attention_window` for layer {self.layer_id}", "global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float", "x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] !=", "return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len +", "), f\"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}\" self.one_sided_attn_window_size =", "= torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs,", "int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors", "window_overlap) # matrix multipication # bcxd: batch_size * num_heads x chunks x 2window_overlap", "seq_len with w at the beginning of the sequence and another window overlap", "2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to make the chunks", "be multiple of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward to avoid redoing", "hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1", "= value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with w at", "% (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) ==", "self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window", "should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) ==", "chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x", "is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states", "diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :,", "assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the wrong size. size(2) should be", "cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert (", "# compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size )", "each word to itself, then followed by window_overlap columns for the upper triangle.", "into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an", "x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id):", "attn_probs_fp32 # softmax sometimes inserts NaN if all positions are masked, replace them", "-0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000,", "is_global_attn: # compute global attn indices required through out forward fn ret =", "and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else:", "-0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]", "2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x", "(attention_mask != 0)[:, :, None, None] # cast to fp32/fp16 then replace 1's", "cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim)", "be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores", "% cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden size (%d) is not a", "total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads x num_chunks, window_overlap x", "remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere and -inf inplace", "seq_len, num_heads, head_dim = query.size() assert ( seq_len % (window_overlap * 2) ==", "words). The following column is attention # score from each word to itself,", "variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn indices in batch", "of the same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size() assert", "* 2}. Given {seq_len}\" assert query.size() == key.size() chunks_count = seq_len // window_overlap", "window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride =", "will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded", "torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query and key tensors using with a", "4 (pad & diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000", "coding: utf-8 -*- # _*_ conding:utf-8 _*_ # Author : Nick # Time", ".view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices,", "value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with fp16 # attn =", ": window_overlap + 1 ] # - copying the lower triangle diagonal_attention_scores[:, 1:,", ":, :, -(window_overlap + 1): -1, window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap,", "= hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query,", "is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global attn output nonzero_global_attn_output", ":-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks,", "2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with w at the beginning", "key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key", "def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices required throughout forward pass \"\"\"", ") return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero:", ") # concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count +", "+ 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, :", "number of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of", "2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1)", "== 1, -float(\"inf\")) # `== 1` converts to bool or uint8 ending_input =", "-0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372,", "batch_size and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len,", "extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory", "use `matmul` because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs,", "_pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads rows and then flips rows", "{global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the wrong size. size(2) should", "dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores += diagonal_mask", "for global attention and overwrite to attention output # TODO: remove the redundant", "# compute sum of global and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs,", "window_overlap + 1) ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). #", "but is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero =", "hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors =", "= ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1` converts to bool or", "torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local", "with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype,", "probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] # cast to fp32/fp16", ") # use `as_strided` to make the chunks overlap with an overlap size", "window_overlap + 1 chunks_count = seq_len // window_overlap - 1 # group batch_size", "attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window % 2 == 0 ),", "chunks overlap with an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] =", "values within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the padding", "[ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs should be", "variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the", "torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global =", "torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for tokens with global attention self.query_global =", "* 2 + 1) ) # copy parts from diagonal_chunked_attention_scores into the combined", "# helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location", "ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global", "diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672,", "# if self.query.training: # del attn_probs_fp32 # softmax sometimes inserts NaN if all", "num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) #", "positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def forward( self, hidden_states, attention_mask", "chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states =", ") chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value))", "f\"global_attn_scores have the wrong size. size(0) should be {batch_size * self.num_heads}, \" \\", "context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices", "num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global attention indices is_local_index_global_attn_nonzero =", "% (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim", "attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\"", "the beginning of the sequence and another window overlap at the end padded_value", "is not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1", "= ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size", "(pad & diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000,", "affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask =", "value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut", "x chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply", "torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1,", "global attention value and add if is_global_attn: # compute sum of global and", "seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size,", "diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul(", "# if self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use", "(batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values", "ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn probs from global key global_key_attn_scores", "1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads rows", "list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2),", "torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key", "f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size.", "& diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348,", "is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero:", "self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global =", "= torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have", "seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 ) # group batch_size", "global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to", "-*- coding: utf-8 -*- # _*_ conding:utf-8 _*_ # Author : Nick #", "attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size * self.num_heads,", "global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) #", "_sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the", "+ hidden_dim ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:,", "* num_heads, seq_len, head_dim) # pad seq_len with w at the beginning of", "Given {seq_len}\" assert query.size() == key.size() chunks_count = seq_len // window_overlap - 1", "seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication", "torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability global_attn_probs =", "+ 1): -1, window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[", "1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1, window_overlap +", "pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len,", "0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad(", "output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) #", "This implementation splits the input into overlapping chunks of size 2w (e.g. 512", "None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len", "{global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the wrong size. size(2) should", "self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) # compute attn scores", "attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero:", "torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x num_chunks x window_overlap", "+ 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1` converts", "{global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should", "1) # compute local attention output with global attention value and add if", "is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global", "attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) # apply dropout attn_probs =", "attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output(", "assert global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0)", "= global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float =", "batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)", "is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global attn output", "* self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)", "/= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size,", "\"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap", "attentions # - copying the main diagonal and the upper triangle diagonal_attention_scores[:, :-1,", "should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[", "of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global", "bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\",", "= self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim),", "head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication #", "( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs", "{max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have", ") global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs", "key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads,", "2 == 0 ), f\"`attention_window` for layer {self.layer_id} has to be an even", "chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)]", "# separate projection layers for tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim)", "padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local", "* self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)", "{seq_len}\" assert query.size() == key.size() chunks_count = seq_len // window_overlap - 1 #", "layer {self.layer_id} has to be an even value. Given {attention_window}\" assert ( attention_window", "num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x", "1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1` converts to", "key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global))", "# total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap", "columns into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986,", "is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention probs from global", "attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global = value_vectors.new_zeros(", "self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = (", "one, then chunk seq_len into chunks of size window_overlap * 2 query =", "should be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size, self.num_heads,", "1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention and overwrite to attention", "layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window % 2", "global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores", "2 + 1) ) # copy parts from diagonal_chunked_attention_scores into the combined matrix", "attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size,", ": ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores", "= self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)", "), f\"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}\" assert", "return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk size", "global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None,", "# prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[", ":, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs,", "seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None,", "compute sum of global and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices,", "self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output =", "of query and key tensors using with a sliding window attention pattern. This", "within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero,", "_concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size", "num_chunks, -1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :,", "+ 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap", "multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0,", "self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ), f\"hidden_states", "-10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor],", "is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] #", ") # group batch_size and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size", "dtype=torch.float32 ) # use fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout,", "wrong size. size(2) should be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output =", "window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ):", "# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy:", "value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because", "{global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should", "{self.layer_id} has to be an even value. Given {attention_window}\" assert ( attention_window >", "is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get", "overlap at the end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) #", "self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) )", "window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for", "= remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere and -inf", "# del attn_probs_fp32 # softmax sometimes inserts NaN if all positions are masked,", "value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output", "= global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]]", "num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) # copy", "window_overlap: ] # separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size,", "torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and", "hidden_states.size() assert ( embed_dim == self.embed_dim ), f\"hidden_states should have embed_dim = {self.embed_dim},", "if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg,", "None], 0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len,", "0, 0, 1) ) # allocate space for the overall attention matrix where", "= nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self,", "): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will", "is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :", "= (attention_mask != 0)[:, :, None, None] # cast to fp32/fp16 then replace", "hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) ) #", "is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors,", "self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have", ": window_overlap - 1, 1 - window_overlap: ] # separate batch_size and num_heads", "expects `len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward", "= cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def", "p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention", "{self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}\"", "List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create only global key vectors", "`as_strided` to make the chunks overlap with an overlap size = window_overlap chunk_size", "* self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors", "has {embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads,", "max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size, seq_len, num_heads,", "\\ f\"global_attn_scores have the wrong size. size(0) should be {batch_size * self.num_heads}, \"", "= self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs", "attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn(", "\"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be", "device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores += diagonal_mask assert", "uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask", "] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors", "value tensors. Returned tensor will be of the same shape as `attn_probs`\"\"\" batch_size,", "(batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0),", "is_index_masked[:, :, None, None], 0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)", ": affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `==", "use fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) #", "(attention from a word to window_overlap previous words). The following column is attention", "# bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores =", "= torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id", "2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices required throughout forward pass", "_chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk size = 2w, overlap", ":, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view(", "chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context =", "num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim )", "int): \"\"\"convert into overlapping chunkings. Chunk size = 2w, overlap size = w\"\"\"", "assert ( attention_window > 0 ), f\"`attention_window` for layer {self.layer_id} has to be", "attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training: # del attn_probs_fp32 #", "masked or global attention is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask >", "self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim ==", "1, 3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = (", "index masked or global attention is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask", "in `BertModel.forward` from 0, 1, 2 to -ve: no attention 0: local attention", "self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul(", "[padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], )", "right, converting columns into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348,", "chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs =", "to window_overlap previous words). The following column is attention # score from each", "= (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride =", "0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()", "window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1,", "only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] #", "wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2)", "* num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) #", "query.size() assert ( seq_len % (window_overlap * 2) == 0 ), f\"Sequence length", "\\ f\"global_attn_scores have the wrong size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\"", "0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap =", "attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project hidden", "probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size *", "= [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention probs", "affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None,", "self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert", "self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices,", "for tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim)", "typing import List, Tuple import torch import math def nonzero_tuple(x): if x.dim() ==", "window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk size = 2w, overlap size =", ":, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] =", "* self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores", "input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\"))", "{seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :,", "\" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the", "attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads,", "_mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask", "-10000.0 ) # diagonal mask with zeros everywhere and -inf inplace of padding", "and another window overlap at the end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap,", "seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to", "x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert diagonals into", "* window_overlap + 1 ) # group batch_size and num_heads dimensions into one", "sliding window attention pattern. This implementation splits the input into overlapping chunks of", "first (window_overlap) columns are the window_overlap lower # triangles (attention from a word", "wrong size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads,", "an overlap of size window overlap chunked_value_size = (batch_size * num_heads, chunks_count +", "# compute local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert", "global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have", "head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) ==", "`einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute", "is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size, seq_len, num_heads, extra", "padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value =", "`attention_window` happens in LongformerModel.forward to avoid redoing the padding on each layer. The", "use `as_strided` to make the chunks overlap with an overlap size = window_overlap", "ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]", "torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask", "hidden_dim ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :,", "to itself, then followed by window_overlap columns for the upper triangle. diagonal_attention_scores =", "concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores", "query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len,", "self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size =", "* num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads", "= chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d", "seq_len % (window_overlap * 2) == 0 ), f\"Sequence length should be multiple", "* window_overlap + 1 chunks_count = seq_len // window_overlap - 1 # group", "): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices,", "states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim", "global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size.", "remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] # cast to fp32/fp16 then", "attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device )", "should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\", "__init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The", "= chunk_size[1] * 2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1]", "self.one_sided_attn_window_size = attention_window // 2 def forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention", "self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size * num_heads x chunks x", "0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000,", "hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk size = 2w, overlap size", "< num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global attention indices is_local_index_global_attn_nonzero", "batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ), f\"hidden_states should have", "of global and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,", "0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599,", "2w, overlap size = w\"\"\" # non-overlapping chunks of size = 2w hidden_states", "from each word to itself, then followed by window_overlap columns for the upper", "del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability", "attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2)", "f\"{self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0)", "== 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors,", "converting columns into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672,", "Tuple[int, int, int, int]): \"\"\"pads rows and then flips rows and columns\"\"\" hidden_states_padded", "total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[", "attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value", "# multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0,", "nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded,", "global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert", "window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len % (window_overlap *", "= diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 - window_overlap: ] #", "indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero =", ") # use fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training", "2 + 1, ], f\"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, \"", "dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input", "if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden size (%d) is", "w at the beginning of the sequence and another window overlap at the", "of the padding values within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0)", "x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap", "< 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0,", "last dimension # has (window_overlap * 2 + 1) columns. The first (window_overlap)", "self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size", "is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut local attn probs", "# (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores),", "-float(\"inf\")) # `== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor,", "global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output =", "seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs should be of size", "0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 -", "-ve: no attention 0: local attention +ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1)", "= ret[3] # calculate global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs(", "= hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2), )", "1, window_overlap, window_overlap * 2 + 1) ) # copy parts from diagonal_chunked_attention_scores", "local attention +ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked", "0 ), f\"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}\"", "self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout", "hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads rows and then flips rows and", "all positions are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :,", "self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) # compute attn", "Given {attention_window}\" assert ( attention_window > 0 ), f\"`attention_window` for layer {self.layer_id} has", "be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def forward( self, hidden_states,", "seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size", ":, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:,", "be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices,", "# batch_size * self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global,", "flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) # padding value", "# global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors =", "= w\"\"\" # non-overlapping chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0),", "% 2 == 0 ), f\"`attention_window` for layer {self.layer_id} has to be an", "value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero:", "even value. Given {attention_window}\" assert ( attention_window > 0 ), f\"`attention_window` for layer", "= input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask ==", "dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad", "is attention # score from each word to itself, then followed by window_overlap", ":, None, None] # cast to fp32/fp16 then replace 1's with -inf float_mask", "seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]", "to fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 )", "window overlap at the end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0)", "is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, )", "= key_vectors.shape[0] # create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices,", "`attention_window`. Padding to `attention_window` happens in LongformerModel.forward to avoid redoing the padding on", "-inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere", "torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"]", "value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with w at the", "(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) )", "return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor],", "# score from each word to itself, then followed by window_overlap columns for", "2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1,", "only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero]", "diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1)", "window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to make the chunks overlap", "window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size * num_heads", "self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0) should be {batch_size * self.num_heads},", "= torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate", "0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,", "+ 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory # if self.query.training:", "value.size() assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3]", "multiple of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward to avoid redoing the", "# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are", "# - copying the main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :,", "= attention_window // 2 def forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects", "redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked,", "0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514,", "is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the wrong size. size(2)", "= layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window %", "be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores", "1) ) # allocate space for the overall attention matrix where the chunks", "List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare", "make the chunks overlap with an overlap size = window_overlap chunk_size = list(hidden_states.size())", "if self.query.training: # del attn_probs_fp32 # softmax sometimes inserts NaN if all positions", ":-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk", "torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory # if self.query.training: # del global_key_attn_scores attn_probs_fp32", "# pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size,", "total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding value is not important", "key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask", "are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None],", "1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return", "hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size =", "keys and contact over window dim if is_global_attn: # compute global attn indices", "# (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads,", "torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) #", "chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions", "beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:,", "-(window_overlap + 1): -1, window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] =", "project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len,", "hidden_states_padded, padding ) # padding value is not important because it will be", "cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden size (%d) is not", "ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn probs from", "with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros", "# softmax sometimes inserts NaN if all positions are masked, replace them with", "overlapping chunkings. Chunk size = 2w, overlap size = w\"\"\" # non-overlapping chunks", "beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask =", "chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len,", "diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ] # - copying the", ").transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor,", "1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap -", "# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions # -", "is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :],", "= ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate", "sum of global and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero,", "1, -float(\"inf\")) # `== 1` converts to bool or uint8 ending_input = input_tensor[:,", "to avoid redoing the padding on each layer. The `attention_mask` is changed in", "diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1", "# 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window % 2 ==", "local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get", "be multiple of {window_overlap * 2}. Given {seq_len}\" assert query.size() == key.size() chunks_count", "of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero", "converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):]", "padding on each layer. The `attention_mask` is changed in `BertModel.forward` from 0, 1,", "global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim)", "max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global attn", "head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim #", "raise ValueError( \"The hidden size (%d) is not a multiple of the number", "(window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap", "attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global", "% (window_overlap * 2) == 0 ), f\"Sequence length should be multiple of", "attention 0: local attention +ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is", "# free memory # if self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1,", "seq_len) # compute global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 )", "is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size, seq_len, num_heads, extra attention", "1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size", "num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x", "# del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical", "self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size *", "total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] #", "throughout forward pass \"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number", "num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap +", "\"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn", "1 step right, converting columns into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071,", "window overlap and an overlap of size window overlap chunked_value_size = (batch_size *", "x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks", "overlap of size window overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1,", "global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape", "to be multiple of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward to avoid", "2) == 0 ), f\"Sequence length should be multiple of {window_overlap * 2}.", "diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local attention", ".transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1,", "zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device),", "dimensions into one, then chunk seq_len into chunks of size 2 window overlap", "step right, converting columns into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,", "then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal", "* self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0) should be {batch_size *", "-*- # _*_ conding:utf-8 _*_ # Author : Nick # Time : 2020/9/15", "memory # if self.query.training: # del attn_probs_fp32 # softmax sometimes inserts NaN if", "global attention and overwrite to attention output # TODO: remove the redundant computation", "0, window_overlap, window_overlap), value=-1.0) # chunk padded_value into chunks of size 3 window", "only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape", "num_heads, head_dim = query.size() assert ( seq_len % (window_overlap * 2) == 0", "same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len %", "batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 )", "overall attention matrix where the chunks are combined. The last dimension # has", "the number of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads =", "# overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output", "head_dim = query.size() assert ( seq_len % (window_overlap * 2) == 0 ),", "value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with global attention", "remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere and -inf inplace of", ") # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size *", "The first (window_overlap) columns are the window_overlap lower # triangles (attention from a", "Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def forward( self, hidden_states, attention_mask ):", "torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local", "the combined matrix of attentions # - copying the main diagonal and the", "hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor,", "is index masked or global attention is_index_masked = attention_mask < 0 is_index_global_attn =", "tensors. Returned tensor will be of the same shape as `attn_probs`\"\"\" batch_size, seq_len,", "self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero", "), f\"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}\"", "global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global", "value: torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and value", "* 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1,", "max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor],", "is changed in `BertModel.forward` from 0, 1, 2 to -ve: no attention 0:", "* 2, hidden_states.size(2), ) # use `as_strided` to make the chunks overlap with", "num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 ) # group", "# (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :,", "num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free", "attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output =", "None] # cast to fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(", "attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap +", "self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) )", "selected_v)) # compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1,", "attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size )", "is_global_attn: # compute sum of global and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors,", "= window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride", "forward pass \"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of", "# free memory # if self.query.training: # del attn_probs_fp32 # softmax sometimes inserts", "x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads", "math def nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module):", "should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\", "p=self.dropout, training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0)", "self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores =", "seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute", "size. size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert", "is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero:", "to be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def forward( self,", "overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len", "in LongformerModel.forward to avoid redoing the padding on each layer. The `attention_mask` is", "= 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap *", "in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn)", "global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim)", "= value.size() assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] ==", "* num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query", "size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)", "chunk_size[1] * 2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] =", "self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for", "attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count =", "each layer. The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to", "required throughout forward pass \"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max", "): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window`", "max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32", "window_overlap, window_overlap), value=-1.0) # chunk padded_value into chunks of size 3 window overlap", "should be of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 +", "cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim)", "vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero]", "batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2) ==", "self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes", "combined matrix of attentions # - copying the main diagonal and the upper", "== 0 ), f\"`attention_window` for layer {self.layer_id} has to be an even value.", "(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim =", "query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors =", "global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability", "overlap size = w\"\"\" # non-overlapping chunks of size = 2w hidden_states =", "1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 - window_overlap: ]", "attention probs from global attention keys and contact over window dim if is_global_attn:", "value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim )", "query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim =", "_pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step right, converting columns into diagonals. Example:", "key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) #", "hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self,", "values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0,", "chunkings. Chunk size = 2w, overlap size = w\"\"\" # non-overlapping chunks of", "return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices,", "on each layer. The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2", "== 0 ), f\"Sequence length should be multiple of {window_overlap * 2}. Given", ":], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global", "an overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size() assert (", "global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the padding values within", "# compute local attention probs from global attention keys and contact over window", "-1 ) attn_output = attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int,", "0)[:, :, None, None] # cast to fp32/fp16 then replace 1's with -inf", "are the window_overlap lower # triangles (attention from a word to window_overlap previous", "# concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1)", "hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] #", "# -*- coding: utf-8 -*- # _*_ conding:utf-8 _*_ # Author : Nick", "wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2)", "0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1)", "= [torch.tensor(0)] # compute local attention probs from global attention keys and contact", "+ 1, window_overlap, window_overlap * 2 + 1) ) # copy parts from", "global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size *", "2 to -ve: no attention 0: local attention +ve: global attention \"\"\" attention_mask", "batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices)", "LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window` happens in", "`matmul` because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))", "= ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads,", "0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000,", "1) ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding value", "is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1)", "0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if", "is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create only global key", "window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key =", ":, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0,", "hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1,", "head_dim = value.size() assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3]", "bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication", "wrong size. size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\"", "f\"global_attn_scores have the wrong size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores", "= torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) # chunk padded_value into chunks of", "key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert (", "allocate space for the overall attention matrix where the chunks are combined. The", "overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2", "0, 1) ) # allocate space for the overall attention matrix where the", "compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size *", "attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads rows and then", "attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) return attn_output def", "reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() #", "num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn indices in batch max_num_global_attn_indices", "local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads,", "// window_overlap, window_overlap, 2 * window_overlap + 1 ) # group batch_size and", "= diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1, window_overlap + 1: ] diagonal_attention_scores[:,", "self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for", "use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory # if", "size(2) should be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size,", "1) columns. The first (window_overlap) columns are the window_overlap lower # triangles (attention", "{embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0,", "self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1,", "attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) return attn_output", "triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1,", "num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x", "!= 0: raise ValueError( \"The hidden size (%d) is not a multiple of", "2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert diagonals", "def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ):", "free memory # if self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32)", "for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn", "batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size *", "head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap)", "List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut local attn probs to", "-1, window_overlap:, : window_overlap + 1 ] # - copying the lower triangle", "self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = (", "seq_len, \\ f\"global_attn_scores have the wrong size. size(2) should be {seq_len}, but is", "( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len,", "# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd:", "dimensions into one, then chunk seq_len into chunks of size window_overlap * 2", "rows and then flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding )", "or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of", "chunk seq_len into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size", "be an even value. Given {attention_window}\" assert ( attention_window > 0 ), f\"`attention_window`", "0: raise ValueError( \"The hidden size (%d) is not a multiple of the", "assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count", "- max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global,", "diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap)", "= diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores,", "global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero,", "= attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention and overwrite", "seq_len into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size *", "key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size,", "List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create only global key vectors key_vectors_only_global =", "window_overlap - 1, 1 - window_overlap: ] # separate batch_size and num_heads dimensions", "# convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1)", "self.layer_id = layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window", "= seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into", "compute value for global attention and overwrite to attention output # TODO: remove", "with w at the beginning of the sequence and another window overlap at", "self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global =", "= torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout =", "(hidden_dim+window_overlap+1). # Padding value is not important because it'll be overwritten chunked_hidden_states =", "= hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]]", "== key.size() chunks_count = seq_len // window_overlap - 1 # group batch_size and", "self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs #", "(batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0,", "replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask", "= torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability global_attn_probs", "to make the chunks overlap with an overlap size = window_overlap chunk_size =", "dim=-1) # free memory # if self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores,", "diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1", ":, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads,", "window_overlap + hidden_dim ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states =", "global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only", "1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors,", ":, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, :", "matrix multipication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim", "attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with global", "be of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 + 1}),", "# matrix multipication # bcxd: batch_size * num_heads x chunks x 2window_overlap x", "{attention_window}\" assert ( attention_window > 0 ), f\"`attention_window` for layer {self.layer_id} has to", ") # values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:,", "{global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ]", "== 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap +", "calculate global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices,", "= torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for tokens", "chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of", "`attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to -ve: no attention", "* chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context", "key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0]", "Chunk size = 2w, overlap size = w\"\"\" # non-overlapping chunks of size", "into one, then chunk seq_len into chunks of size window_overlap * 2 query", "batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size *", "one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with", "batch_size = attn_probs.shape[0] # cut local attn probs to global only attn_probs_only_global =", "待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window % 2 == 0", "-(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1`", "for the overall attention matrix where the chunks are combined. The last dimension", "List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut local attn probs to global only", "( attention_window % 2 == 0 ), f\"`attention_window` for layer {self.layer_id} has to", "float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size())", "attention # score from each word to itself, then followed by window_overlap columns", "# compute value for global attention and overwrite to attention output # TODO:", "1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value:", "0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1", "attn_probs and value tensors. Returned tensor will be of the same shape as", "as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of", "hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors", "num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2) == 0 assert", "float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere and", "global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32", "max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn(", "0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629", "for layer {self.layer_id} has to be an even value. Given {attention_window}\" assert (", "2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)", "chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape(", "embed_dim).contiguous() # compute value for global attention and overwrite to attention output #", "-1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ]", "_compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len,", "num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def", "num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) #", "replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) #", "query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size,", "dim if is_global_attn: # compute global attn indices required through out forward fn", "= attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]):", "assert global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0)", "stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training: # del attn_probs_fp32", "lower # triangles (attention from a word to window_overlap previous words). The following", "fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training:", "into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285,", "but for attn_probs and value tensors. Returned tensor will be of the same", "NaN if all positions are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs,", "probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors", "return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same", "the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap +", "as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap *", "attention_window % 2 == 0 ), f\"`attention_window` for layer {self.layer_id} has to be", "= self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self,", "size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key", "size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len % (window_overlap", "chunks are combined. The last dimension # has (window_overlap * 2 + 1)", "( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len,", "{max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have", "512 for pretrained Longformer) with an overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads,", "chunk padded_value into chunks of size 3 window overlap and an overlap of", "# project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states)", "2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows = 4 (pad & diagonilize)", "x window_overlap x (hidden_dim+window_overlap+1). # Padding value is not important because it'll be", "query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query and key tensors", "triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap +", "* window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap", "chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space for the overall attention", "# normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size", "= diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ] # - copying", "= list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1),", "key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size, seq_len,", "global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] =", "output # TODO: remove the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states,", "with global attention value and add if is_global_attn: # compute sum of global", "columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) # padding value is not important", "seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2) == 0", "1 chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads", "inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) #", "({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 + 1}), but is of", "def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul", "\\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong", "layer. The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to -ve:", "( seq_len % (window_overlap * 2) == 0 ), f\"Sequence length should be", "max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] #", "* window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self,", "2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap,", "local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() ==", "2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] //", "torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)", "through out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1]", "size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices,", "attention is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn =", "= attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states", "return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step right, converting columns", "chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size,", "inserts NaN if all positions are masked, replace them with 0 attn_probs =", "beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1` converts to bool or uint8", "window_overlap + 1 ] # - copying the lower triangle diagonal_attention_scores[:, 1:, :,", "global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ]", "multiple of the number of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) )", "num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores", "then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1,", "into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size *", "global attn indices required throughout forward pass \"\"\" # helper variable num_global_attn_indices =", "`len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward to", "1, 1 - window_overlap: ] # separate batch_size and num_heads dimensions again diagonal_attention_scores", "* self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores,", "(batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)", "max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] #", "max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices,", "seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def", "== 2 * window_overlap + 1 chunks_count = seq_len // window_overlap - 1", "device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global attention", "= self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space for the", "because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads", "and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size", "= torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the", "overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 -", "have the wrong size. size(2) should be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\"", "from global attention keys and contact over window dim if is_global_attn: # compute", "of attentions # - copying the main diagonal and the upper triangle diagonal_attention_scores[:,", "the padding on each layer. The `attention_mask` is changed in `BertModel.forward` from 0,", "_sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query and key", "2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap\"\"\" batch_size,", "hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) # padding value is not important because", "batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs", "> 0 ), f\"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}\"", "of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices,", "global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices},", "hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding", "chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int):", "pattern. This implementation splits the input into overlapping chunks of size 2w (e.g.", "the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert", "] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0,", "is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut local attn probs to global", "attn indices required throughout forward pass \"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1)", "-1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states =", "from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the main", "have the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\"", "followed by window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size *", "key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output(", "input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1,", "return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads rows and", ":, None, None], 0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors", "diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1", "= torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores)", "f\"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is", ").contiguous() # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size", "= self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero =", "rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) # padding value is", "= self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local attention probs", "= num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable", "is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the wrong size. size(2)", "key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key,", "at the end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) # chunk", "to pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]", "= chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert", "beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :,", "assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len,", "[torch.tensor(0)] # compute local attention probs from global attention keys and contact over", "local attention output with global attention value and add if is_global_attn: # compute", "f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the wrong size.", "return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]", "def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0])", "attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,", "the wrong size. size(2) should be {self.head_dim}, \" \\ f\"but is {global_attn_output.size(2)}.\" global_attn_output", "torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for tokens with", "x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] %", "global attention is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn", "seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices required throughout", "0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size,", "chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 *", "# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global", "attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to", "key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads,", "0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600,", "combined. The last dimension # has (window_overlap * 2 + 1) columns. The", "attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute", "affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1`", "2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states", "score from each word to itself, then followed by window_overlap columns for the", "chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads x num_chunks, window_overlap", "window_overlap = num_rows = 4 (pad & diagonilize) => [ 0.4983, 2.6918, -0.0071,", "attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs", "max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) #", "to attention output # TODO: remove the redundant computation if is_global_attn: global_attn_output =", "diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 - window_overlap: ] # separate", "batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)", "== batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0) should be", "{self.layer_id} has to be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def", "size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) ==", "have the wrong size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores =", "+ 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1,", "self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask !=", "device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input =", "dtype=torch.float32) # use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory", "attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim)", "chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\",", "1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629", "size = 2w, overlap size = w\"\"\" # non-overlapping chunks of size =", "\"\"\"Matrix multiplication of query and key tensors using with a sliding window attention", "sometimes inserts NaN if all positions are masked, replace them with 0 attn_probs", "padding: Tuple[int, int, int, int]): \"\"\"pads rows and then flips rows and columns\"\"\"", "torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\"", "`BertModel.forward` from 0, 1, 2 to -ve: no attention 0: local attention +ve:", "with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) # apply dropout", "of size window overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3", "= chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads x num_chunks,", "* 2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to make the", "fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global", "attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads, \\", "fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2]", "attention_window > 0 ), f\"`attention_window` for layer {self.layer_id} has to be positive. Given", "main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :,", "window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap,", "\\ f\"but is {global_attn_output.size(2)}.\" global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim ) return", "= [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention probs from global attention", "# batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads,", ":, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[", "is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding", "* num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1),", "bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size", "with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only", "triangles (attention from a word to window_overlap previous words). The following column is", "overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output =", "with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global", ") # diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask", "the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer)", "value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global", "self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have", "attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global =", "max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:,", "batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs should be of", "get value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim", "self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask", "chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap *", "window_overlap), value=-1.0) # chunk padded_value into chunks of size 3 window overlap and", "], f\"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size *", "attention and overwrite to attention output # TODO: remove the redundant computation if", "# normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)", "= diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 +", "def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step right, converting columns into diagonals.", "diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) #", ") self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query", "chunked_key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0,", "query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask =", "torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id]", "diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same as", "local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: #", "but has {embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size,", "chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x num_chunks", "self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states,", "Tuple import torch import math def nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1)", ").transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) -", "# _*_ conding:utf-8 _*_ # Author : Nick # Time : 2020/9/15 3:21", "= beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1` converts to bool or", "* 2 + 1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero", "Author : Nick # Time : 2020/9/15 3:21 下午 from typing import List,", "and value tensors. Returned tensor will be of the same shape as `attn_probs`\"\"\"", "size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero =", "# global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size *", "attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) #", "self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`.", "self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with global attention value and", "size 3 window overlap and an overlap of size window overlap chunked_value_size =", "is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item()", "int, int, int]): \"\"\"pads rows and then flips rows and columns\"\"\" hidden_states_padded =", "1 - window_overlap: ] # separate batch_size and num_heads dimensions again diagonal_attention_scores =", "(query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def", "# use `matmul` because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd',", "batch_size * num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query,", "self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) #", "size = w\"\"\" # non-overlapping chunks of size = 2w hidden_states = hidden_states.view(", "3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size())", "# pad seq_len with w at the beginning of the sequence and another", "x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap", "of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad", "= self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size * num_heads x chunks", "(batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :,", "numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output", "1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero", "f\"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}\" # normalize query query_vectors", "window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads", "0.5372, 0.2629 ] window_overlap = num_rows = 4 (pad & diagonilize) => [", "pass \"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global", "dim=-1, dtype=torch.float32) # use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free", ") # allocate space for the overall attention matrix where the chunks are", "# padding value is not important because it will be overwritten hidden_states_padded =", "-1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000", "attention value and add if is_global_attn: # compute sum of global and local", "-1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :,", "head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size", "upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap", "list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs", "max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors,", "attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention is_index_masked", "into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate", "softmax sometimes inserts NaN if all positions are masked, replace them with 0", "= [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)", "-10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn", "local attention probs from global attention keys and contact over window dim if", "-inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size )", "# Padding value is not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view(", "have embed_dim = {self.embed_dim}, but has {embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim)", "1, ], f\"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size", "window dim if is_global_attn: # compute global attn indices required through out forward", "attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global", "of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 + 1}), but", "chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride", ":, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]),", "stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output", "triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap *", "max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes", "itself, then followed by window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(", "window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ] # -", "* self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) # compute", "columns are the window_overlap lower # triangles (attention from a word to window_overlap", "hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states) key_vectors =", "create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim )", "# total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ]", "# helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn indices", "for pretrained Longformer) with an overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim", "separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2", "1): -1, window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :,", "= torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global = torch.matmul(", "batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return", "forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple of", "f\"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window", "global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory #", "int]): \"\"\"pads rows and then flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded,", "attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention", "x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim", "chunked_key = self._chunk(key, window_overlap) # matrix multipication # bcxd: batch_size * num_heads x", "List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states", "(window_overlap) columns are the window_overlap lower # triangles (attention from a word to", "* 2 + 1, ], f\"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},", "size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len,", "padded_value into chunks of size 3 window overlap and an overlap of size", "projection layers for tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global =", "math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0,", "assert query.size() == key.size() chunks_count = seq_len // window_overlap - 1 # group", "head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices required throughout forward", "num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap *", "if all positions are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:,", "batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores =", "diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space for", "nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self,", "2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2,", ":, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices,", "is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global attn output nonzero_global_attn_output =", "chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1,", "memory # if self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) #", "max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global =", "seq_len, num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) #", "window_overlap x (hidden_dim+window_overlap+1). # Padding value is not important because it'll be overwritten", "then chunk seq_len into chunks of size window_overlap * 2 query = query.transpose(1,", "self.one_sided_attn_window_size ) # pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) ==", "2020/9/15 3:21 下午 from typing import List, Tuple import torch import math def", "num_rows = 4 (pad & diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000,", "hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2),", "= value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul`", "attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory # if self.query.training: # del", "+ 1 ) # group batch_size and num_heads dimensions into one value =", "\\ f\"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but", "an even value. Given {attention_window}\" assert ( attention_window > 0 ), f\"`attention_window` for", "= is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states)", "max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global", "def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError(", "hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step", "def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query and", "(selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2),", "is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the padding values within global attention indices", "chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d =", "1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap", "2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len", "of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)", "] # separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads,", "batch_size = key_vectors.shape[0] # create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size,", "= key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1)", "window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap *", "global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device", "hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every", ") else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size", "seq_len, head_dim) # pad seq_len with w at the beginning of the sequence", "self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers", "= cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数 assert ( attention_window % 2 == 0 ), f\"`attention_window`", "* 2 - 1 chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2), hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1]", "import math def nonzero_tuple(x): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class", ":, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ] #", "a multiple of the number of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"])", "batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0,", ") # padding value is not important because it will be overwritten hidden_states_padded", "following column is attention # score from each word to itself, then followed", "mask with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(),", "attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size,", "== (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()", "self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value", "batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len % (window_overlap * 2)", "* 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 *", "x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks", "size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len //", "max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key", "assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should be", "previous words). The following column is attention # score from each word to", "import List, Tuple import torch import math def nonzero_tuple(x): if x.dim() == 0:", "`== 1` converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len", "output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores", "size. size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert", "self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size,", "* self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors =", "# compute global attn indices required through out forward fn ret = self._get_global_attn_indices(is_index_global_attn)", ") # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :,", "stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype,", "with an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] *", "[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285,", "utf-8 -*- # _*_ conding:utf-8 _*_ # Author : Nick # Time :", "] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap,", "attn_probs.shape[0] # cut local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0,", "1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap:", "\" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"]", "global_attn_output.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0) should", "* 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower", "List, Tuple import torch import math def nonzero_tuple(x): if x.dim() == 0: return", "self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for tokens with global", "3 window overlap and an overlap of size window overlap chunked_value_size = (batch_size", "and contact over window dim if is_global_attn: # compute global attn indices required", "tensor will be of the same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim", "for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1,", "chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x", "and add if is_global_attn: # compute sum of global and local attn attn_output", "of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len %", "2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x", "copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying", "0, : window_overlap - 1, 1 - window_overlap: ] # separate batch_size and", "= ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn probs", "{window_overlap * 2}. Given {seq_len}\" assert query.size() == key.size() chunks_count = seq_len //", "is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def", "attention keys and contact over window dim if is_global_attn: # compute global attn", "= hidden_states.size() assert ( embed_dim == self.embed_dim ), f\"hidden_states should have embed_dim =", "have the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\"", "training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output", "(window_overlap * 2) == 0 ), f\"Sequence length should be multiple of {window_overlap", "out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero", "batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global", "= self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size()", "is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention probs from global attention keys and", "total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap +", "= query.size() assert ( seq_len % (window_overlap * 2) == 0 ), f\"Sequence", "TODO: remove the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,", "0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows = 4 (pad &", "0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows", "then flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) # padding", "): batch_size = attn_probs.shape[0] # cut local attn probs to global only attn_probs_only_global", "# cast to fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask,", "self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key", "attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores,", "if self.query.training: # del global_key_attn_scores attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32", "global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads,", "global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]", "diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:]", "1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size", "attention output with global attention value and add if is_global_attn: # compute sum", "no attention 0: local attention +ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) #", "nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self,", "num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding value is not important because it'll", "one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs =", "window_overlap, 2 * window_overlap + 1 ) # group batch_size and num_heads dimensions", "= key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key =", "assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should be", "// (window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to", "= self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous()", "global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the wrong size. size(2) should be {self.head_dim},", "self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors,", "columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space", "attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute", "== seq_len, \\ f\"global_attn_scores have the wrong size. size(2) should be {seq_len}, but", "compute local attention output with global attention value and add if is_global_attn: #", "= is_index_global_attn.long().sum(dim=1) # max number of global attn indices in batch max_num_global_attn_indices =", "cut local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) #", "self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor],", "multipication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim #", "dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores),", "global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states)", "the wrong size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size,", "attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero,", "2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap", "size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1", "key tensors using with a sliding window attention pattern. This implementation splits the", "self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] #", "chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads,", "of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size", "window overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap,", "important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) #", "The last dimension # has (window_overlap * 2 + 1) columns. The first", "* 2) == 0 ), f\"Sequence length should be multiple of {window_overlap *", "= torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory # if self.query.training: # del global_key_attn_scores", "# non-overlapping chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) //", "total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap +", "( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero:", "attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention is_index_masked = attention_mask", "chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap", "is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create only global key vectors key_vectors_only_global", "if is_global_attn: # compute global attn indices required through out forward fn ret", "ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1` converts to bool or uint8", "fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) #", "attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0] # cut local", "torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the", "is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)]", "it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x", ") assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0,", "of the number of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads", "chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into", "= value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with global", "# values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :,", "if is_global_attn: # compute sum of global and local attn attn_output = self._compute_attn_output(", "from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, )", "hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step right,", "row 1 step right, converting columns into diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918,", "* num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) #", "with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global =", "self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float = torch.nn.functional.softmax( global_attn_scores, dim=-1,", "0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors", "the window_overlap lower # triangles (attention from a word to window_overlap previous words).", "the sequence and another window overlap at the end padded_value = torch.nn.functional.pad(value, (0,", "positions are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None,", "window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x", "a word to window_overlap previous words). The following column is attention # score", "changed in `BertModel.forward` from 0, 1, 2 to -ve: no attention 0: local", "query and key tensors using with a sliding window attention pattern. This implementation", "window_overlap, window_overlap + hidden_dim ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states", "value is not important because it will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0),", "query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] #", "0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap,", ":, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1, window_overlap + 1:", "num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ]", "chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int):", "indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, )", "* num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key))", "attention matrix where the chunks are combined. The last dimension # has (window_overlap", "self.query.training: # del attn_probs_fp32 # softmax sometimes inserts NaN if all positions are", "avoid redoing the padding on each layer. The `attention_mask` is changed in `BertModel.forward`", "f\"but is {global_attn_scores.size(1)}.\" assert global_attn_scores.size(2) == seq_len, \\ f\"global_attn_scores have the wrong size.", "pad seq_len with w at the beginning of the sequence and another window", "location of the padding values within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn ==", "1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3))", "is_index_global_attn.long().sum(dim=1) # max number of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max()", "indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the padding values within global attention", "is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn probs from global", "= value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with fp16 # attn", "attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention is_index_masked = attention_mask < 0", "where the chunks are combined. The last dimension # has (window_overlap * 2", "self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors =", "prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1]", "self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3]", "computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, )", "sequence and another window overlap at the end padded_value = torch.nn.functional.pad(value, (0, 0,", "the end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) # chunk padded_value", "= (batch_size, seq_len, num_heads, window*2+1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) #", "forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero =", "(%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"])", "probs from global attention keys and contact over window dim if is_global_attn: #", "parts from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the", "= torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs,", "within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the padding values", "is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states =", "f\"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}\" assert", "wrong size. size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\"", "is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states) key_vectors", "is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn probs from global key global_key_attn_scores =", "== self.embed_dim ), f\"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}\" #", "num_heads, seq_len, head_dim) # pad seq_len with w at the beginning of the", "is not important because it will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1),", "hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] =", "下午 from typing import List, Tuple import torch import math def nonzero_tuple(x): if", "self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space for the overall", "== 1, -float(\"inf\")) # `== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self,", "return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"]", "to be an even value. Given {attention_window}\" assert ( attention_window > 0 ),", "# get only non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :,", "hidden size (%d) is not a multiple of the number of attention \"", "x (hidden_dim+window_overlap+1). # Padding value is not important because it'll be overwritten chunked_hidden_states", "size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) ==", "global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong", "of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2),", "# get value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads,", "be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices,", "and overwrite to attention output # TODO: remove the redundant computation if is_global_attn:", "multiplication of query and key tensors using with a sliding window attention pattern.", "seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0)", "LongformerModel.forward to avoid redoing the padding on each layer. The `attention_mask` is changed", "attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs", "window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count", "return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn", "padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) # chunk padded_value into chunks", "and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)", "1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len,", "non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] #", "attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with", "2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices", "self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数", "size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 + 1}), but is", "# TODO: remove the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices,", "+= diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 +", "attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads", "): batch_size = key_vectors.shape[0] # create only global key vectors key_vectors_only_global = key_vectors.new_zeros(", "<filename>lichee/module/torch/layer/longformer_multi_headed_attn.py # -*- coding: utf-8 -*- # _*_ conding:utf-8 _*_ # Author :", "\" \\ f\"{self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices", "= [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2],", "+ attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor],", "embed_dim = {self.embed_dim}, but has {embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors", "every row 1 step right, converting columns into diagonals. Example: chunked_hidden_states: [ 0.4983,", "= self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ),", "1) # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors =", "+ 1) ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding", ":affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\"))", "0, 1, 2 to -ve: no attention 0: local attention +ve: global attention", "non-padding values within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the", "0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks, window_overlap, hidden_dim", "= ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)", "window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) )", "0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows = 4", "-affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) #", "values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None,", "2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap)", "hidden_states.size(2), ) # use `as_strided` to make the chunks overlap with an overlap", "matrix where the chunks are combined. The last dimension # has (window_overlap *", "size window overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 *", "stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len,", "( attention_window > 0 ), f\"`attention_window` for layer {self.layer_id} has to be positive.", "= self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs", "hidden_states.stride(3)] chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len:", "of {window_overlap * 2}. Given {seq_len}\" assert query.size() == key.size() chunks_count = seq_len", "// 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) def _mask_invalid_locations(self, input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len,", "List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create only global", ": window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1,", "= torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global", "max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2]", "0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405,", "not a multiple of the number of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "= beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len,", "nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention", "count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory # if", "value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with", "return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ):", "= chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states", "is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states)", "[torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention probs from global attention keys", ") attn_output = attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int,", "int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor", "zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite", "diagonals. Example: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584,", "global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout(", "hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value", "num_chunks, window_overlap, window_overlap + hidden_dim ) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap", "def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings. Chunk size = 2w,", ": 2020/9/15 3:21 下午 from typing import List, Tuple import torch import math", "into overlapping chunkings. Chunk size = 2w, overlap size = w\"\"\" # non-overlapping", "chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405,", "cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"]", "is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1", "query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size *", "global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn", "self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs should be of size ({batch_size}, {seq_len},", "chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap chunked_hidden_states =", "/ cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim)", "1 # group batch_size and num_heads dimensions into one, then chunk seq_len into", "only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len,", "-0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows =", ") # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap", "] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1,", "diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1,", "# compute local attention output with global attention value and add if is_global_attn:", ") # pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [", "1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with", "over window dim if is_global_attn: # compute global attn indices required through out", "vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] #", "int): \"\"\"Matrix multiplication of query and key tensors using with a sliding window", "batch_size, embed_dim).contiguous() # compute value for global attention and overwrite to attention output", "padding ) # padding value is not important because it will be overwritten", "# reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous()", "cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value", "window_overlap, window_overlap, 2 * window_overlap + 1 ) # group batch_size and num_heads", "attention pattern. This implementation splits the input into overlapping chunks of size 2w", "the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap,", "context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self,", "probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,", "max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create", "1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1` converts to", "word to itself, then followed by window_overlap columns for the upper triangle. diagonal_attention_scores", "is_index_masked=is_index_masked, ) # get only non zero global attn output nonzero_global_attn_output = global_attn_output[", "= key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len,", "self.head_dim, \\ f\"global_attn_scores have the wrong size. size(2) should be {self.head_dim}, \" \\", "upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap", "training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) ==", "key.size() chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads", "- copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :,", "self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key =", "window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk", "of the sequence and another window overlap at the end padded_value = torch.nn.functional.pad(value,", "space for the overall attention matrix where the chunks are combined. The last", "dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1", "the chunks are combined. The last dimension # has (window_overlap * 2 +", "size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1)", "chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride", ") return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step right, converting", "for layer {self.layer_id} has to be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window //", "global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) return", "# apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads,", "dimension # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns", "= torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window =", "seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query = self._chunk(query,", "chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix multipication # bcxd:", "global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float = torch.nn.functional.softmax(", "2}. Given {seq_len}\" assert query.size() == key.size() chunks_count = seq_len // window_overlap -", "output with global attention value and add if is_global_attn: # compute sum of", "hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row 1 step right, converting columns into", "the non-padding values within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of", "# Time : 2020/9/15 3:21 下午 from typing import List, Tuple import torch", "seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ), f\"hidden_states should", "window attention pattern. This implementation splits the input into overlapping chunks of size", "is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) <", "another window overlap at the end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap),", "and an overlap of size window overlap chunked_value_size = (batch_size * num_heads, chunks_count", "size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention", "global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)", "by window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads,", "is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute local attention", "2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory # if self.query.training: #", "convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( chunked_attention_scores, padding=(0, 0, 0, 1) )", "self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only", "max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] =", "self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), \"Unexpected", "= int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key =", "2 * window_overlap + 1 ) # group batch_size and num_heads dimensions into", "attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output( self,", "torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within", "0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap", "# compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2)", "f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size.", "copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap", "del attn_probs_fp32 # softmax sometimes inserts NaN if all positions are masked, replace", "chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size,", "self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id = layer_id # 待补充超参数 attention_window = cfg[\"CONFIG\"][\"ATTENTION_WINDOW\"][self.layer_id] # 待补充超参数", "{seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2 + 1}), but is of size", "hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global", "待补充超参数 assert ( attention_window % 2 == 0 ), f\"`attention_window` for layer {self.layer_id}", "-0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads,", "None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute", "into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads,", "1) ) # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions", "diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)", "== max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices}, \"", "is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors =", "global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert global_attn_output.size(0) == batch_size * self.num_heads,", "chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = [hidden_states.stride(0),", "1, -float(\"inf\")) # `== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query:", "self.embed_dim) # separate projection layers for tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "of the non-padding values within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location", "# - copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :,", "1 ) # group batch_size and num_heads dimensions into one value = value.transpose(1,", "key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]", "end padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) # chunk padded_value into", "diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1, window_overlap + 1: ] diagonal_attention_scores[:, 0,", "window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:,", "of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len", "x num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding value is not important because", "chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap", "assert ( seq_len % (window_overlap * 2) == 0 ), f\"Sequence length should", "== 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) class LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__()", "Padding value is not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads,", ":, -(window_overlap + 1): -1, window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap]", "] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap +", "= attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention is_index_masked = attention_mask <", "+ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global", "hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states): \"\"\"shift every row", "+ 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1` converts", "The following column is attention # score from each word to itself, then", "from typing import List, Tuple import torch import math def nonzero_tuple(x): if x.dim()", "batch_size * self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1,", "= attn_probs.shape[0] # cut local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1,", "attention +ve: global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or", "\"\"\"shift every row 1 step right, converting columns into diagonals. Example: chunked_hidden_states: [", "+ 1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)]", "= self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn", "def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ):", "input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with", "1 ] # - copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] =", "is not a multiple of the number of attention \" \"heads (%d)\" %", "indices required through out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero", "= hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors", "x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks", "add if is_global_attn: # compute sum of global and local attn attn_output =", "value is not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks,", "+ 1, 3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride =", "_*_ # Author : Nick # Time : 2020/9/15 3:21 下午 from typing", ") # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions #", "attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = seq_len // window_overlap -", "global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] =", "\" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the", "0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) # apply dropout attn_probs", "# separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len,", "# cut local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)", "length should be multiple of {window_overlap * 2}. Given {seq_len}\" assert query.size() ==", "attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local", "number of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]", "is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], ): batch_size = key_vectors.shape[0] # create only", "batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors", "LongformerSelfAttention(torch.nn.Module): def __init__(self, cfg, layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise", "# use fp32 for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory #", "of size 3 window overlap and an overlap of size window overlap chunked_value_size", "def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]): \"\"\"pads rows and then flips", "Time : 2020/9/15 3:21 下午 from typing import List, Tuple import torch import", "assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert", "(chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute", "# total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding value is not", "1, 2 to -ve: no attention 0: local attention +ve: global attention \"\"\"", "# is index masked or global attention is_index_masked = attention_mask < 0 is_index_global_attn", ") def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor],", "key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0],", "self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id =", "x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def _chunk(self, hidden_states,", "# Author : Nick # Time : 2020/9/15 3:21 下午 from typing import", "at the beginning of the sequence and another window overlap at the end", "attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training: # del attn_probs_fp32 # softmax sometimes", "word to window_overlap previous words). The following column is attention # score from", "+ 1) ) # copy parts from diagonal_chunked_attention_scores into the combined matrix of", "attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention and overwrite to", "w\"\"\" # non-overlapping chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1)", ") value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with fp16", "\"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window` happens", "self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim = int(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] / cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query =", "= attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap", "= self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim)", "value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = seq_len //", "- copying the main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:]", "{self.embed_dim}, but has {embed_dim}\" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len,", "num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices required", "x window_overlapL+window_overlapwindow_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) #", "window_overlap:, : window_overlap + 1 ] # - copying the lower triangle diagonal_attention_scores[:,", "max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\", "global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values", "with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1)", ":window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :,", "= self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non", "the same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len", "2 * window_overlap + 1 chunks_count = seq_len // window_overlap - 1 #", "attn_output = attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int,", "columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count +", ":, :, :-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping", "should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) ==", "input_tensor, affected_seq_len: int): beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask =", "head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size", ": Nick # Time : 2020/9/15 3:21 下午 from typing import List, Tuple", "3:21 下午 from typing import List, Tuple import torch import math def nonzero_tuple(x):", "def forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to be multiple", "the main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[", "or global attention is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0", "\" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the", "assert ( embed_dim == self.embed_dim ), f\"hidden_states should have embed_dim = {self.embed_dim}, but", "== [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs should", "= attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global =", "helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of", "self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int", "output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global", "2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size", "attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero", "\" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the", "== value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = seq_len", "window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)", "0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] \"\"\" total_num_heads, num_chunks,", "assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the wrong size. size(2) should be", "= hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states)", "to attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores =", "beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1` converts to bool or uint8 ending_input", "them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) # apply", "attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] # cast to", "is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) #", "=> [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986,", "Longformer) with an overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size()", "= attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training: # del attn_probs_fp32 # softmax", "is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn", "\"The hidden size (%d) is not a multiple of the number of attention", "only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global only", "* num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 ) #", "f\"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\ f\"{self.one_sided_attn_window_size * 2", "max number of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices", "multiple of {window_overlap * 2}. Given {seq_len}\" assert query.size() == key.size() chunks_count =", "== self.head_dim, \\ f\"global_attn_scores have the wrong size. size(2) should be {self.head_dim}, \"", "self.head_dim), \"Unexpected size\" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for", "!= 0)[:, :, None, None] # cast to fp32/fp16 then replace 1's with", "should be multiple of {window_overlap * 2}. Given {seq_len}\" assert query.size() == key.size()", "// 2 def forward( self, hidden_states, attention_mask ): \"\"\" LongformerSelfAttention expects `len(hidden_states)` to", "layer_id): super().__init__() if cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] % cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] != 0: raise ValueError( \"The hidden size", "num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)]", "be {seq_len}, but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0],", "# chunk padded_value into chunks of size 3 window overlap and an overlap", "= nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1)", "query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0,", "x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap", "= self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global", "and key tensors using with a sliding window attention pattern. This implementation splits", "size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1)", "( embed_dim == self.embed_dim ), f\"hidden_states should have embed_dim = {self.embed_dim}, but has", "beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None,", "will be of the same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim =", "# use fp32 for numerical stability global_attn_probs = torch.nn.functional.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training )", "it will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return", "overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x", "query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) #", "= beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask", "size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap", "ValueError( \"The hidden size (%d) is not a multiple of the number of", "+ 1, ], f\"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, \" \\", "ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `== 1` converts to bool or uint8 def", "seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3)", "assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ],", "query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len,", "# indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn =", ") < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global attention indices", "for numerical stability attn_probs = attn_probs_fp32.type_as(attn_scores) # free memory # if self.query.training: #", "window_overlap, window_overlap * 2 + 1) ) # copy parts from diagonal_chunked_attention_scores into", "self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\",", "global and local attn attn_output = self._compute_attn_output( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, )", "beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) # `== 1` converts to bool", "1) ) # batch_size * self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores", "0, max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size,", "\\ f\"{self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices =", "# location of the non-padding values within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn)", "2 + 1}), but is of size {attn_scores.size()}\" max_num_global_attn_indices = torch.tensor(0) is_index_global_attn_nonzero =", "location of the non-padding values within global attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) #", "attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim =", "with an overlap of size window_overlap\"\"\" batch_size, seq_len, num_heads, head_dim = query.size() assert", "pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] #", "2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000", "-1.1600, 0.5372, 0.2629 ] window_overlap = num_rows = 4 (pad & diagonilize) =>", "diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1): -1, window_overlap", "from 0, 1, 2 to -ve: no attention 0: local attention +ve: global", "head_dim) # pad seq_len with w at the beginning of the sequence and", "is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor],", "global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores", "self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f\"attn_probs should be of size ({batch_size},", "then followed by window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size", "num_heads dimensions into one, then chunk seq_len into chunks of size 2 window", "crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn", "global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) #", ") # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size *", "into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len", "column is attention # score from each word to itself, then followed by", "), f\"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}\" # normalize query", "everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask,", "x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states", "dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)", "// window_overlap - 1 # group batch_size and num_heads dimensions into one, then", "] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 )", "attn_scores), dim=-1) # free memory # if self.query.training: # del global_key_attn_scores attn_probs_fp32 =", ":, :-1] return chunked_hidden_states def _chunk(self, hidden_states, window_overlap: int): \"\"\"convert into overlapping chunkings.", "compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return", "seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) #", "batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) #", "= torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for tokens with global attention self.query_global", "self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for", "= -10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero:", "torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0) # chunk padded_value into chunks of size", "a sliding window attention pattern. This implementation splits the input into overlapping chunks", "overlap and an overlap of size window overlap chunked_value_size = (batch_size * num_heads,", "attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output", "hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) # use", "the wrong size. size(0) should be {batch_size * self.num_heads}, \" \\ f\"but is", "attention indices is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn) # location of the padding values within global", "attn indices required through out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0]", "= torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn):", "window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, :", "self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) # separate projection layers for tokens with global attention", "The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to -ve: no", "size (%d) is not a multiple of the number of attention \" \"heads", "into chunks of size 3 window overlap and an overlap of size window", "attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2", "chunks x 2window_overlap x window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply #", "window_overlap chunked_attention_scores = torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert diagonals into columns", "attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices,", "batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum`", "( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size *", "-0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows = 4 (pad", "= nonzero_tuple(is_local_index_global_attn) # location of the padding values within global attention indices is_local_index_no_global_attn_nonzero", "num_heads, seq_len, head_dim) chunked_query = self._chunk(query, window_overlap) chunked_key = self._chunk(key, window_overlap) # matrix", "= torch.tensor(0) is_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_global_attn_nonzero = [torch.tensor(0)] is_local_index_no_global_attn_nonzero = [torch.tensor(0)] # compute", "f\"global_attn_scores have the wrong size. size(2) should be {self.head_dim}, \" \\ f\"but is", "# create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim", "\"\"\" compute global attn indices required throughout forward pass \"\"\" # helper variable", "torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) # apply dropout attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout,", "-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global", "* self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores", "-float(\"inf\")) # `== 1` converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:,", "self._sliding_chunks_query_key_matmul( torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores", "window_overlap * 2 + 1) ) # copy parts from diagonal_chunked_attention_scores into the", "_compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size = attn_probs.shape[0]", "into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs", "uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query", "global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key,", "attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states =", "indices required throughout forward pass \"\"\" # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) #", "value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim", "self.embed_dim = cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value =", "using with a sliding window attention pattern. This implementation splits the input into", "is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs,", "{batch_size * self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\", "/= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)", "of `attention_window`. Padding to `attention_window` happens in LongformerModel.forward to avoid redoing the padding", "the overall attention matrix where the chunks are combined. The last dimension #", "to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): \"\"\"Matrix", "important because it will be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2)", "torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors.", "= 2w, overlap size = w\"\"\" # non-overlapping chunks of size = 2w", "# group batch_size and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size *", "= self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim", "because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) #", "is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(", "self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def", "be of the same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads, head_dim = value.size()", "- 1, 1 - window_overlap: ] # separate batch_size and num_heads dimensions again", "0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000,", "batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with global attention value", "# compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size", "None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) #", "attn_output_without_global def _compute_global_attn_output( self, hidden_states, max_num_global_attn_indices, is_local_index_global_attn_nonzero: List[torch.Tensor], is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero: List[torch.Tensor], is_index_masked,", "chunked_hidden_states.size() chunked_hidden_states = torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x", "are combined. The last dimension # has (window_overlap * 2 + 1) columns.", "be overwritten hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def", "self.embed_dim ), f\"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}\" # normalize", "query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize", "int, int]): \"\"\"pads rows and then flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad(", "converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):", "and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap", "- window_overlap: ] # separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view(", "] = -10000.0 return attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor],", "but is {global_attn_scores.size(2)}.\" global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1],", "hidden_states_padded = hidden_states_padded.view( hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3), hidden_states_padded.size(2) ) return hidden_states_padded def _pad_and_diagonalize(self, chunked_hidden_states):", "Returned tensor will be of the same shape as `attn_probs`\"\"\" batch_size, seq_len, num_heads,", "from a word to window_overlap previous words). The following column is attention #", "is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to attn_probs # (batch_size, seq_len, num_heads, extra attention count", "\"\"\"convert into overlapping chunkings. Chunk size = 2w, overlap size = w\"\"\" #", "chunked_hidden_states): \"\"\"shift every row 1 step right, converting columns into diagonals. Example: chunked_hidden_states:", "ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero = ret[1] is_local_index_global_attn_nonzero = ret[2] is_local_index_no_global_attn_nonzero", "attn_probs_from_global_key def _compute_attn_output( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], ): batch_size", "head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap chunked_attention_scores", "and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding ) # padding value is not", "{attention_window}\" self.one_sided_attn_window_size = attention_window // 2 def forward( self, hidden_states, attention_mask ): \"\"\"", "# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks", "= torch.nn.functional.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x num_chunks x", "global attention keys and contact over window dim if is_global_attn: # compute global", "= attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project", "window_overlap previous words). The following column is attention # score from each word", "= torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype, device=input_tensor.device).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :]", "`== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor,", "only non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ]", "window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states def _chunk(self,", "-1, window_overlap + 1: ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0,", "num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn) # helper variable is_local_index_global_attn", "helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn indices in", "attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention and", "* num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads", "or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size())", "2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1", "and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap,", "# use `as_strided` to make the chunks overlap with an overlap size =", "non-overlapping chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap", "group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of", "into the combined matrix of attentions # - copying the main diagonal and", "chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs,", "layers for tokens with global attention self.query_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "and num_heads dimensions into one, then chunk seq_len into chunks of size 2", "_sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but", "chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1],", "bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size", "key_vectors.shape[0] # create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads,", "is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong size. size(1)", "is_index_global_attn): \"\"\" compute global attn indices required throughout forward pass \"\"\" # helper", "= query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)", "= cfg[\"CONFIG\"][\"HIDDEN_SIZE\"] self.query = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.key = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"],", "scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert global_attn_scores.size(0) == batch_size * self.num_heads, \\", "the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,", "self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs =", "batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) #", "output with global attn_output_without_global = self._sliding_chunks_matmul_attn( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global +", "1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 - window_overlap:", "# calculate global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors,", "= padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum(\"bcwd,bcdh->bcwh\", (chunked_attn_probs, chunked_value)) return context.view(batch_size,", "layer {self.layer_id} has to be positive. Given {attention_window}\" self.one_sided_attn_window_size = attention_window // 2", ":] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len +", "chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x num_chunks x window_overlap x", "compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1,", "x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim", "redoing the padding on each layer. The `attention_mask` is changed in `BertModel.forward` from", "hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) #", "0) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors,", "3 * window_overlap, head_dim) chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0],", "0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514,", "(window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to make", "padding=(0, 0, 0, 1) ) # allocate space for the overall attention matrix", ":, -(affected_seq_len + 1):] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float(\"inf\")) # `==", "compute global attn indices required through out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices", "value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output", "value and add if is_global_attn: # compute sum of global and local attn", "be {batch_size * self.num_heads}, \" \\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices,", ":, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float(\"inf\")) #", "total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return", "global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size *", "] # - copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[", "+ 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn( self, attn_probs: torch.Tensor,", "-10000.0 global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size *", "= 4 (pad & diagonilize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000,", "\\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim, \\ f\"global_attn_scores have the wrong", "torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device), float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores +=", "again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2,", "Padding to `attention_window` happens in LongformerModel.forward to avoid redoing the padding on each", "bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):] ending_mask =", "cast to fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0", "torch.einsum(\"bcxd,bcyd->bcxy\", (chunked_query, chunked_key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(", "global attention \"\"\" attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1) # is index masked or global attention", "values within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return ( max_num_global_attn_indices,", "+ 1 ] # - copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap]", "# allocate space for the overall attention matrix where the chunks are combined.", "\"\"\"pads rows and then flips rows and columns\"\"\" hidden_states_padded = torch.nn.functional.pad( hidden_states_padded, padding", "chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) def _get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global", "{batch_size * self.num_heads}, \" \\ f\"but is {global_attn_scores.size(0)}.\" assert global_attn_scores.size(1) == max_num_global_attn_indices, \\", "attention output # TODO: remove the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output(", "padding values within global attention indices is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0) return (", "self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with", "= key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[", "normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size *", "= chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap chunked_hidden_states", "_get_global_attn_indices(self, is_index_global_attn): \"\"\" compute global attn indices required throughout forward pass \"\"\" #", "key: torch.Tensor, window_overlap: int): \"\"\"Matrix multiplication of query and key tensors using with", "of attention \" \"heads (%d)\" % (cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"]) ) self.num_heads = cfg[\"CONFIG\"][\"NUM_ATTENTION_HEADS\"] self.head_dim", "ret[3] # calculate global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors,", "# reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1)", "len(is_local_index_global_attn_nonzero[0]), -1 ) attn_output = attn_output.transpose(0, 1) return attn_output def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding:", "value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use", "num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) chunked_query =", ") # get only non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0],", "required through out forward fn ret = self._get_global_attn_indices(is_index_global_attn) max_num_global_attn_indices = ret[0] is_index_global_attn_nonzero =", "sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output", "to -ve: no attention 0: local attention +ve: global attention \"\"\" attention_mask =", "padded_value.stride(1), padded_value.stride(2)] chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value", "size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_output.size(1)}.\" assert global_attn_output.size(2) == self.head_dim,", "window_overlap: int ): \"\"\"Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned", "_*_ conding:utf-8 _*_ # Author : Nick # Time : 2020/9/15 3:21 下午", "is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors,", "self.embed_dim) self.key_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.value_global = torch.nn.Linear(cfg[\"CONFIG\"][\"HIDDEN_SIZE\"], self.embed_dim) self.dropout = cfg[\"CONFIG\"][\"ATTENTION_PROBS_DROPOUT_PROB\"] self.layer_id", "get only non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]", "key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # attn_probs = (batch_size, seq_len, num_heads, window*2+1) attn_scores", ":, -1, window_overlap:, : window_overlap + 1 ] # - copying the lower", "assert ( attention_window % 2 == 0 ), f\"`attention_window` for layer {self.layer_id} has", "global_attn_scores.size(0) == batch_size * self.num_heads, \\ f\"global_attn_scores have the wrong size. size(0) should", "torch.nn.functional.pad( hidden_states_padded, padding ) # padding value is not important because it will", "is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero: List[torch.Tensor], is_local_index_global_attn_nonzero: List[torch.Tensor], is_local_index_no_global_attn_nonzero:", "the wrong size. size(1) should be {max_num_global_attn_indices}, \" \\ f\"but is {global_attn_scores.size(1)}.\" assert", ") # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). # Padding value is", "remove the redundant computation if is_global_attn: global_attn_output = self._compute_global_attn_output( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero,", "overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)", "(%d) is not a multiple of the number of attention \" \"heads (%d)\"", "(0, 0, window_overlap, window_overlap), value=-1.0) # chunk padded_value into chunks of size 3", "attn_probs_from_global_key = torch.einsum(\"blhd,bshd->blhs\", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0", "- 1 # group batch_size and num_heads dimensions into one, then chunk seq_len", "\\ f\"but is {global_attn_output.size(0)}.\" assert global_attn_output.size(1) == max_num_global_attn_indices, \\ f\"global_attn_scores have the wrong", "matrix of attentions # - copying the main diagonal and the upper triangle", "attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability attn_probs =", "value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with fp16 #", "> 0 is_global_attn = is_index_global_attn.flatten().any().item() hidden_states = hidden_states.transpose(0, 1) # project hidden states", "the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, :", "chunks of size 3 window overlap and an overlap of size window overlap", "else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn( attn_probs, value_vectors, self.one_sided_attn_window_size )", "[ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599,", "= ret[2] is_local_index_no_global_attn_nonzero = ret[3] # calculate global attn probs from global key" ]
[ "scp, dtype): # only test with values > 0 to avoid NaNs vals", "vals = xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class", "only test with values > 0 to avoid NaNs vals = xp.logspace(-10, 10,", "xp, scp, dtype, order, degree): vals = xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order,", "def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype) assert", "dtype, function): # only test with values > 0 to avoid NaNs x", "the current implementation, # so ensure it returns a NaN rather than a", "x -= 1j * x y += 1j * y return getattr(scp.special, function)(x,", "+= 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def", "scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): # test", "@testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4, 4,", "for code path corresponding to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2),", ") from cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\")", "== 'c': x -= 1j * x y += 1j * y return", "3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5, 10, 20, 30, 40,", "values > 0 to avoid NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype) y", "complex-valued log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"])", "4, 5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def", "0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp,", "assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX:", "from cupy import testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing", "lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is outside", "@pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp,", "+ 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self,", "implementation, # so ensure it returns a NaN rather than a wrong answer.", "@testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype, function): #", "dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype):", "or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3,", "this is outside the domain of the current implementation, # so ensure it", "== 'c': y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\",", "assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12}", "@numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype, order, degree): vals = xp.linspace(-1, 1,", "\"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): # test values for code", "def test_log1p_complex(self): # complex-valued log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j)", "> 0 to avoid NaNs x = xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10,", "1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function): # only test with values >", "to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return", "def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0)", "numpy import pytest import scipy.special # NOQA import cupyx.scipy.special from cupy import testing", "import testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose", "getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func =", "test suite scp = cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7)", "function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self,", "cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40,", "dtype=dtype) if x.dtype.kind == 'c': x -= 1j * x y += 1j", "values for code path corresponding to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 /", "test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1),", "xp, scp, dtype): # test values for code path corresponding to range [1/sqrt(2),", "@testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals", "> 0 to avoid NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype) y =", "cupy import numpy import pytest import scipy.special # NOQA import cupyx.scipy.special from cupy", "y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function):", "= xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind == 'c': x -= 1j *", "cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype, function): # only test with values", "[\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype,", "xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p", "100, 1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind == 'c':", "x y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"])", "tested in the SciPy test suite scp = cupyx.scipy lp = scp.special.lpmv(0, 2,", "cupyx.scipy.special from cupy import testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from", "values > 0 to avoid NaNs x = xp.zeros((1, 100), dtype=dtype) y =", "import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def", "with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64:", "a wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001) finally:", "4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50])", "@testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function): #", "@testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype, order, degree): vals", "log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD')", "4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp,", "3, 4, 5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12)", "NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\",", "log1p = cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf)", "<filename>tests/cupyx_tests/scipy_tests/special_tests/test_basic.py<gh_stars>0 import math import cupy import numpy import pytest import scipy.special # NOQA", "= xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind == 'c': y += 1j *", "values > 0 to avoid NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype) return", "order, degree): vals = xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu", "import pytest import scipy.special # NOQA import cupyx.scipy.special from cupy import testing from", "y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function)", "x.dtype.kind == 'c': x -= 1j * x y += 1j * y", "= xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def", "@pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp,", "returns a NaN rather than a wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp", "@pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function) y", "inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf),", "scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype): # only", "return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): #", "math import cupy import numpy import pytest import scipy.special # NOQA import cupyx.scipy.special", "with values > 0 to avoid NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype)", "[1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def", "10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp,", "avoid NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000,", "test_xlogy_zeros(self, xp, scp, dtype, function): # only test with values > 0 to", "dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf nan =", "100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp,", "function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special,", "if x.dtype.kind == 'c': x -= 1j * x y += 1j *", "# so ensure it returns a NaN rather than a wrong answer. olderr", "NOQA import cupyx.scipy.special from cupy import testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal,", "[0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5, 10,", "dtype): # test values for code path corresponding to range [1/sqrt(2), sqrt(2)] vals", "= xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic:", "cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def", "cupy import testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import", "specific values tested in the SciPy test suite scp = cupyx.scipy lp =", "> 0 to avoid NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals)", "to avoid NaNs x = xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10, 100,", "0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is outside the domain of the", "test with values > 0 to avoid NaNs vals = xp.logspace(-10, 10, 10000,", "= cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2),", "numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self):", "TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4,", "test_log1p_path2(self, xp, scp, dtype): # test values for code path corresponding to range", "# only test with values > 0 to avoid NaNs x = xp.zeros((1,", "[\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype,", "xp, scp, dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\",", "7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is", "answer. olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert", "def test_xlogy(self, xp, scp, dtype, function): # only test with values > 0", "\"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype, function):", "sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self):", "# specific values tested in the SciPy test suite scp = cupyx.scipy lp", "xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self,", "= xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def", "pytest import scipy.special # NOQA import cupyx.scipy.special from cupy import testing from cupy.testing", "a NaN rather than a wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp =", "1e-12}) def test_xlogy(self, xp, scp, dtype, function): # only test with values >", "100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"])", "* y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3,", "dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\")", "return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype): #", "\"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4, 4, 100,", "40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype, order,", "dtype, function): func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype) assert cupy.all(cupy.isnan(func(cupy.nan, y)))", "math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf =", "vals = xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol)", "scp, dtype): # test values for code path corresponding to range [1/sqrt(2), sqrt(2)]", "@testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested in the SciPy", "-inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p not yet implemented", "than a wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001)", "import numpy import pytest import scipy.special # NOQA import cupyx.scipy.special from cupy import", "return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func", "= cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0,", "to avoid NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\",", "cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def", "avoid NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"])", "numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp != 0", "def test_log1p_path2(self, xp, scp, dtype): # test values for code path corresponding to", "10, 100, dtype=dtype) if y.dtype.kind == 'c': y += 1j * y return", "scp = cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp =", "def test_xlogy_zeros(self, xp, scp, dtype, function): # only test with values > 0", "@numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function): # only", "50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype, order, degree):", "from cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class", "y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64:", "= cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): #", "NaNs x = xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype) if", "@numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype, function): # only", "# complex-valued log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\",", "rtol=rtol) def test_log1p_(self, xp, scp, dtype): # only test with values > 0", "1000, dtype=dtype) if x.dtype.kind == 'c': x -= 1j * x y +=", "math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf", "degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp,", "= xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind", "dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind == 'c': x -=", "= xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p =", "1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind == 'c': x", "xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind == 'c':", "dtype=dtype) if y.dtype.kind == 'c': y += 1j * y return getattr(scp.special, function)(x,", "return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def", "30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype,", "[0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\",", "test_lpmv_basic(self): # specific values tested in the SciPy test suite scp = cupyx.scipy", "import math import cupy import numpy import pytest import scipy.special # NOQA import", "= getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype) assert cupy.all(cupy.isnan(func(cupy.nan, y))) assert cupy.all(cupy.isnan(func(y, cupy.nan)))", "SciPy test suite scp = cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125,", "1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested in the", "assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p not yet", "it returns a NaN rather than a wrong answer. olderr = numpy.seterr(all=\"ignore\") try:", "cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function): # only test with values", "1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\",", "lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40, 0.001)", "implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3,", "5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self,", "test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\",", "try: lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp != 0 or", "-1, 0.001) finally: numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1,", "domain of the current implementation, # so ensure it returns a NaN rather", "= {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific", "1j * x y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\",", "assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\",", "from cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol =", "dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\",", "cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan)", "so ensure it returns a NaN rather than a wrong answer. olderr =", "the domain of the current implementation, # so ensure it returns a NaN", "rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype, function): # only test", "code path corresponding to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2),", "in the SciPy test suite scp = cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5)", "xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind == 'c': y += 1j * y", "!= 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1,", "0.001) finally: numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2,", "\"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype, order, degree): vals =", "'c': x -= 1j * x y += 1j * y return getattr(scp.special,", "assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0", "\"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,),", "@numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype): # only test with values >", "rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): #", "-0.125, 7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this", "atol=1e-12) def test_lpmv(self, xp, scp, dtype, order, degree): vals = xp.linspace(-1, 1, 100,", "xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self,", "finally: numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3,", "y.dtype.kind == 'c': y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\",", "wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr)", "\"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function):", "\"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype): # only test with values", "with values > 0 to avoid NaNs x = xp.zeros((1, 100), dtype=dtype) y", "vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p", "lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0,", "class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals =", "def test_lpmv_basic(self): # specific values tested in the SciPy test suite scp =", "function): func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype) assert cupy.all(cupy.isnan(func(cupy.nan, y))) assert", "1e-3, cupy.float64: 1e-12}) def test_xlogy(self, xp, scp, dtype, function): # only test with", "10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp,", "* y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype,", "NaN rather than a wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1,", "def test_log1p_(self, xp, scp, dtype): # only test with values > 0 to", "x = xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind", "def test_lpmv(self, xp, scp, dtype, order, degree): vals = xp.linspace(-1, 1, 100, dtype=dtype)", "10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp,", "cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions:", "import scipy.special # NOQA import cupyx.scipy.special from cupy import testing from cupy.testing import", "'c': y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"])", "testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol", "cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested in", "2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7)", "100, 1000, dtype=dtype) if x.dtype.kind == 'c': x -= 1j * x y", "NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype)", "\"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype)", "100, dtype=dtype) if y.dtype.kind == 'c': y += 1j * y return getattr(scp.special,", "with values > 0 to avoid NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype)", "outside the domain of the current implementation, # so ensure it returns a", "cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4,", "is outside the domain of the current implementation, # so ensure it returns", "vals = xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol)", "# only test with values > 0 to avoid NaNs x = xp.linspace(-100,", "dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype):", "1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function): # only test with", "rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp, scp, dtype, function): # only test", "test values for code path corresponding to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1", "0 to avoid NaNs x = xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10,", "xp, scp, dtype): # only test with values > 0 to avoid NaNs", "rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): # test values for code path corresponding", "{'default': 1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific values", "= scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp,", "xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind ==", "@testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype)", "y = xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind == 'c': x -= 1j", "getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def", "0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p not", "xp, scp, dtype, function): # only test with values > 0 to avoid", "2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5, 10, 20, 30,", "only test with values > 0 to avoid NaNs x = xp.linspace(-100, 100,", "scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self,", "yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default':", "-= 1j * x y += 1j * y return getattr(scp.special, function)(x, y)", "= scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\",", "0 to avoid NaNs vals = xp.logspace(-10, 10, 10000, dtype=dtype) return scp.special.log1p(vals) @testing.for_dtypes([\"e\",", "[\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function) y =", "import cupyx.scipy.special from cupy import testing from cupy.testing import ( assert_array_equal, assert_array_almost_equal, )", "@testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested in the SciPy test", "return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan", "assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64: 1e-12} @testing.gpu", "test with values > 0 to avoid NaNs x = xp.zeros((1, 100), dtype=dtype)", "import cupy import numpy import pytest import scipy.special # NOQA import cupyx.scipy.special from", "to avoid NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001, 100,", "corresponding to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype)", "range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals)", "test_xlogy_nonfinite(self, dtype, function): func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype) assert cupy.all(cupy.isnan(func(cupy.nan,", "cupy.testing import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol = {'default':", "@testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype):", "olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp", "scp, dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals) @testing.for_dtypes([\"e\", \"f\", \"d\"])", "path corresponding to range [1/sqrt(2), sqrt(2)] vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000,", "y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12}) def test_xlogy_zeros(self, xp,", "# NOQA import cupyx.scipy.special from cupy import testing from cupy.testing import ( assert_array_equal,", "suite scp = cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp", "scp, dtype, function): # only test with values > 0 to avoid NaNs", "import ( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5,", "@numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): # test values for code path", "@pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\",", "100), dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind == 'c': y", "nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self):", "7) # XXX: this is outside the domain of the current implementation, #", "test_log1p_(self, xp, scp, dtype): # only test with values > 0 to avoid", "if y.dtype.kind == 'c': y += 1j * y return getattr(scp.special, function)(x, y)", "+= 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\",", "assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is outside the domain of the current", "numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4])", "1, 2, 3, 4, 5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"])", "func = getattr(cupyx.scipy.special, function) y = cupy.ones((5,), dtype=dtype) assert cupy.all(cupy.isnan(func(cupy.nan, y))) assert cupy.all(cupy.isnan(func(y,", "y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD')", "1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5, 10, 20,", "XXX: this is outside the domain of the current implementation, # so ensure", "scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is outside the domain", "= cupy.inf nan = cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf)", "( assert_array_equal, assert_array_almost_equal, ) from cupy.testing import numpy_cupy_allclose rtol = {'default': 1e-5, cupy.float64:", "y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes()", "x = xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001, 100, 1000, dtype=dtype) if", "test_lpmv(self, xp, scp, dtype, order, degree): vals = xp.linspace(-1, 1, 100, dtype=dtype) return", "scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0,", "function): # only test with values > 0 to avoid NaNs x =", "scipy.special # NOQA import cupyx.scipy.special from cupy import testing from cupy.testing import (", "return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12})", "TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested in the SciPy test suite scp", "test_xlogy(self, xp, scp, dtype, function): # only test with values > 0 to", "not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\",", "the SciPy test suite scp = cupyx.scipy lp = scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp,", "scp, dtype, order, degree): vals = xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree,", "rather than a wrong answer. olderr = numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1,", "cupy.nan assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued", "= scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is outside the", "scp.special.lpmv(0, 2, 0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484,", "/ math.sqrt(2), math.sqrt(2), 1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf", "1e-5, cupy.float64: 1e-12} @testing.gpu @testing.with_requires(\"scipy\") class TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested", "= numpy.seterr(all=\"ignore\") try: lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp !=", "def test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype) return scp.special.gammasgn(vals)", "20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp,", "# XXX: this is outside the domain of the current implementation, # so", "@testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): # test values", "\"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", atol=1e-12) def test_lpmv(self, xp, scp, dtype, order, degree): vals = xp.linspace(-1,", "# only test with values > 0 to avoid NaNs vals = xp.logspace(-10,", "assert_array_equal(log1p(0), 0.0) assert_array_equal(log1p(-1), -inf) assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p", "1000, dtype=dtype) return scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf nan", "only test with values > 0 to avoid NaNs x = xp.zeros((1, 100),", "lp = scp.special.lpmv(-1, -1, 0.001) finally: numpy.seterr(**olderr) assert lp != 0 or cupy.isnan(lp)", "@numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp, dtype): vals = xp.linspace(-4, 4, 100, dtype=dtype) return", "* x y += 1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\",", "dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind == 'c': y +=", "0.5) assert_array_almost_equal(lp, -0.125, 7) lp = scp.special.lpmv(0, 40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) #", "@pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2, 3, 4, 5,", "@testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype): # only test", "class TestLegendreFunctions: def test_lpmv_basic(self): # specific values tested in the SciPy test suite", "of the current implementation, # so ensure it returns a NaN rather than", "= xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind ==", "inf) def test_log1p_complex(self): # complex-valued log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 +", "1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_all_dtypes() def test_xlogy_nonfinite(self,", "pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default': 1e-3, cupy.float64: 1e-12})", "vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\") def test_gammasgn(self, xp, scp,", "test_log1p_complex(self): # complex-valued log1p not yet implemented with pytest.raises(TypeError): cupyx.scipy.special.log1p(0 + 0j) @pytest.mark.parametrize(\"function\",", "0.1252678976534484, 7) # XXX: this is outside the domain of the current implementation,", "\"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_path2(self, xp, scp, dtype): # test values for", "dtype, order, degree): vals = xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals)", "0 to avoid NaNs x = xp.linspace(-100, 100, 1000, dtype=dtype) y = xp.linspace(0.001,", "0 or cupy.isnan(lp) @pytest.mark.parametrize(\"order\", [0, 1, 2, 3, 4]) @pytest.mark.parametrize(\"degree\", [0, 1, 2,", "y = xp.linspace(-10, 10, 100, dtype=dtype) if y.dtype.kind == 'c': y += 1j", "degree): vals = xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\")", "assert_array_equal(log1p(-2), nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p not yet implemented with", "values tested in the SciPy test suite scp = cupyx.scipy lp = scp.special.lpmv(0,", "40, 0.001) assert_array_almost_equal(lp, 0.1252678976534484, 7) # XXX: this is outside the domain of", "# test values for code path corresponding to range [1/sqrt(2), sqrt(2)] vals =", "scp.special.log1p(vals) def test_log1p_real(self): log1p = cupyx.scipy.special.log1p inf = cupy.inf nan = cupy.nan assert_array_equal(log1p(0),", "test with values > 0 to avoid NaNs x = xp.linspace(-100, 100, 1000,", "xp.linspace(-1, 1, 100, dtype=dtype) return scp.special.lpmv(order, degree, vals) @testing.gpu @testing.with_requires(\"scipy\") class TestBasic: @testing.for_dtypes([\"e\",", "avoid NaNs x = xp.zeros((1, 100), dtype=dtype) y = xp.linspace(-10, 10, 100, dtype=dtype)", "nan) assert_array_equal(log1p(inf), inf) def test_log1p_complex(self): # complex-valued log1p not yet implemented with pytest.raises(TypeError):", "ensure it returns a NaN rather than a wrong answer. olderr = numpy.seterr(all=\"ignore\")", "1j * y return getattr(scp.special, function)(x, y) @pytest.mark.parametrize(\"function\", [\"xlogy\", \"xlog1py\"]) @testing.for_dtypes('efdFD') @numpy_cupy_allclose(scipy_name=\"scp\", rtol={'default':", "current implementation, # so ensure it returns a NaN rather than a wrong", "dtype): # only test with values > 0 to avoid NaNs vals =", "xp.linspace(0.001, 100, 1000, dtype=dtype) if x.dtype.kind == 'c': x -= 1j * x", "\"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\", rtol=rtol) def test_log1p_(self, xp, scp, dtype): # only test with", "2, 3, 4, 5, 10, 20, 30, 40, 50]) @testing.for_dtypes([\"e\", \"f\", \"d\"]) @numpy_cupy_allclose(scipy_name=\"scp\"," ]