content
stringlengths
7
1.05M
fixed_cases
stringlengths
1
1.28M
def slices(num_string,slice_size): if slice_size>len(num_string): raise ValueError list_of_slices = [] for i in range(len(num_string)-slice_size+1): temp_answer = [] for j in range(slice_size): temp_answer.append(int(num_string[i+j])) list_of_slices.append(temp_answer) return list_of_slices def largest_product(num_string,slice_size): slices_to_test = slices(num_string,slice_size) answer = 1 for i in range(len(slices_to_test)): working_value = 1 for j in range(slice_size): working_value*=slices_to_test[i][j] if working_value > answer: answer = working_value return answer
def slices(num_string, slice_size): if slice_size > len(num_string): raise ValueError list_of_slices = [] for i in range(len(num_string) - slice_size + 1): temp_answer = [] for j in range(slice_size): temp_answer.append(int(num_string[i + j])) list_of_slices.append(temp_answer) return list_of_slices def largest_product(num_string, slice_size): slices_to_test = slices(num_string, slice_size) answer = 1 for i in range(len(slices_to_test)): working_value = 1 for j in range(slice_size): working_value *= slices_to_test[i][j] if working_value > answer: answer = working_value return answer
global LAYER_UNKNOWN LAYER_UNKNOWN = 'unknown' class Design(object): def __init__(self, layers, smells) -> None: self.layers = layers self.smells = smells super().__init__()
global LAYER_UNKNOWN layer_unknown = 'unknown' class Design(object): def __init__(self, layers, smells) -> None: self.layers = layers self.smells = smells super().__init__()
""" Initialization file for tweets library module. These exist here in lib as some of them are useful as help functions of other scripts (such as getting available campaigns). However, these could be moved to utils/reporting/ as individual scripts. And they could be called directly or with make, to avoid having multiple ways of calling something. """
""" Initialization file for tweets library module. These exist here in lib as some of them are useful as help functions of other scripts (such as getting available campaigns). However, these could be moved to utils/reporting/ as individual scripts. And they could be called directly or with make, to avoid having multiple ways of calling something. """
""" Coin change given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin. Input: amount = 25, coins = [1, 2, 5] """ class Solution: def change(self, amount: int, coins: List[int]) -> int: dic = {0:1} for coin in coins: for i in range(amount+1): dic[i] =dic.get(i,0) + dic.get(i-coin,0) return dic.get(amount,0) amount = 25 coins = [1, 2, 5] a = Solution() a.change(amount, coins) # 42
""" Coin change given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin. Input: amount = 25, coins = [1, 2, 5] """ class Solution: def change(self, amount: int, coins: List[int]) -> int: dic = {0: 1} for coin in coins: for i in range(amount + 1): dic[i] = dic.get(i, 0) + dic.get(i - coin, 0) return dic.get(amount, 0) amount = 25 coins = [1, 2, 5] a = solution() a.change(amount, coins)
class RecentCounter: def __init__(self): self.slide_window = deque() def ping(self, t: int) -> int: self.slide_window.append(t) # invalidate the outdated pings while self.slide_window: if self.slide_window[0] < t - 3000: self.slide_window.popleft() else: break return len(self.slide_window) # Your RecentCounter object will be instantiated and called as such: # obj = RecentCounter() # param_1 = obj.ping(t)
class Recentcounter: def __init__(self): self.slide_window = deque() def ping(self, t: int) -> int: self.slide_window.append(t) while self.slide_window: if self.slide_window[0] < t - 3000: self.slide_window.popleft() else: break return len(self.slide_window)
# -------------- # Code starts here class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio'] class_2=['Hilary Mason','Carla Gentry','Corinna Cortes'] new_class=class_1+class_2 print(new_class) new_class.append('Peter Warden') print(new_class) new_class.remove("Carla Gentry") print(new_class) # Code ends here # -------------- # Code starts here courses = {'Math':65,'English':70,'History':80,'French':70,'Science':60} marks=[] for i in courses: marks.append(courses[i]) print(marks) print(courses['Math']) print(courses['English']) print(courses['History']) print(courses['French']) print(courses['Science']) total =courses['Math'] + courses['English'] +courses['History'] + courses['French'] +courses['Science'] print(total) percentage=(total*100/500) print(percentage) # Code ends here # -------------- # Code starts here mathematics= {'Geoffrey Hinton': 78,'Andrew Ng':95,'Sebastian Raschka':65,'Yoshua Benjio':50,'Hilary Mason':70,'Corinna Cortes':66,'Peter Warden':75} max_marks_scored =max(mathematics,key=mathematics.get) topper=max_marks_scored print(topper) # Code ends here # -------------- # Given string topper = 'andrew ng' # Code starts here # Create variable first_name first_name = (topper.split()[0]) print (first_name) # Create variable Last_name and store last two element in the list last_name = (topper.split()[1]) print (last_name) # Concatenate the string full_name = last_name + ' ' + first_name # print the full_name print (full_name) # print the name in upper case certificate_name = full_name.upper() print (certificate_name) # Code ends here
class_1 = ['Geoffrey Hinton', 'Andrew Ng', 'Sebastian Raschka', 'Yoshua Bengio'] class_2 = ['Hilary Mason', 'Carla Gentry', 'Corinna Cortes'] new_class = class_1 + class_2 print(new_class) new_class.append('Peter Warden') print(new_class) new_class.remove('Carla Gentry') print(new_class) courses = {'Math': 65, 'English': 70, 'History': 80, 'French': 70, 'Science': 60} marks = [] for i in courses: marks.append(courses[i]) print(marks) print(courses['Math']) print(courses['English']) print(courses['History']) print(courses['French']) print(courses['Science']) total = courses['Math'] + courses['English'] + courses['History'] + courses['French'] + courses['Science'] print(total) percentage = total * 100 / 500 print(percentage) mathematics = {'Geoffrey Hinton': 78, 'Andrew Ng': 95, 'Sebastian Raschka': 65, 'Yoshua Benjio': 50, 'Hilary Mason': 70, 'Corinna Cortes': 66, 'Peter Warden': 75} max_marks_scored = max(mathematics, key=mathematics.get) topper = max_marks_scored print(topper) topper = 'andrew ng' first_name = topper.split()[0] print(first_name) last_name = topper.split()[1] print(last_name) full_name = last_name + ' ' + first_name print(full_name) certificate_name = full_name.upper() print(certificate_name)
widget = WidgetDefault() widget.border = "None" widget.background = "None" commonDefaults["RadialMenuWidget"] = widget def generateRadialMenuWidget(file, screen, menu, parentName): name = menu.getName() file.write(" %s = leRadialMenuWidget_New();" % (name)) generateBaseWidget(file, screen, menu) writeSetInt(file, name, "NumberOfItemsShown", menu.getItemsShown(), 5) writeSetBoolean(file, name, "HighlightProminent", menu.getHighlightProminent(), False) writeSetInt(file, name, "Theta", menu.getTheta(), 0) width = menu.getSize().width height = menu.getSize().height touchX = menu.getTouchX() touchY = menu.getTouchY() touchWidth = menu.getTouchWidth() touchHeight = menu.getTouchHeight() file.write(" %s->fn->setTouchArea(%s, %d, %d, %d, %d);" % (name, name, touchX, touchY, width * touchWidth / 100, height * touchHeight / 100)) ellipseType = menu.getEllipseType().toString() if ellipseType == "Default": ellipseType = "LE_RADIAL_MENU_ELLIPSE_TYPE_DEFAULT" elif ellipseType == "Orbital": ellipseType = "LE_RADIAL_MENU_ELLIPSE_TYPE_OBITAL" else: ellipseType = "LE_RADIAL_MENU_ELLIPSE_TYPE_ROLLODEX" writeSetLiteralString(file, name, "EllipseType", ellipseType, "LE_RADIAL_MENU_ELLIPSE_TYPE_DEFAULT") writeSetBoolean(file, name, "DrawEllipse", menu.getEllipseVisible(), True) scaleMode = menu.getSizeScale().toString() if scaleMode == "Off": scaleMode = "LE_RADIAL_MENU_INTERPOLATE_OFF" elif scaleMode == "Gradual": scaleMode = "LE_RADIAL_MENU_INTERPOLATE_GRADUAL" else: scaleMode = "LE_RADIAL_MENU_INTERPOLATE_PROMINENT" writeSetLiteralString(file, name, "ScaleMode", scaleMode, "LE_RADIAL_MENU_INTERPOLATE_GRADUAL") blendMode = menu.getAlphaScale().toString() if blendMode == "Off": blendMode = "LE_RADIAL_MENU_INTERPOLATE_OFF" elif blendMode == "Gradual": blendMode = "LE_RADIAL_MENU_INTERPOLATE_GRADUAL" else: blendMode = "LE_RADIAL_MENU_INTERPOLATE_PROMINENT" writeSetLiteralString(file, name, "BlendMode", blendMode, "LE_RADIAL_MENU_INTERPOLATE_GRADUAL") min = menu.getMinSizePercent() max = menu.getMaxSizePercent() if min != 30 or max != 100: file.write(" %s->fn->setScaleRange(%s, %d, %d);" % (name, name, min, max)) min = menu.getMinAlphaAmount() max = menu.getMaxAlphaAmount() if min != 128 or max != 255: file.write(" %s->fn->setBlendRange(%s, %d, %d);" % (name, name, min, max)) touchX = menu.getTouchX() touchY = menu.getTouchY() touchWidth = menu.getTouchWidth() touchHeight = menu.getTouchHeight() if touchX != 0 or touchY != 75 or touchWidth != 100 or touchHeight != 50: file.write(" %s->fn->setTouchArea(%s, %d, %d, %d, %d);" % (name, name, touchX, touchY, touchWidth, touchHeight)) x = menu.getLocation(False).x y = menu.getLocation(False).y width = menu.getSize().width height = menu.getSize().height xp = x + width / 2; yp = y + height / 2; items = menu.getItemList() if len(items) > 0: for idx, item in enumerate(items): varName = "%s_image_%d" % (name, idx) file.write(" %s = leImageScaleWidget_New();" % (varName)) imageName = craftAssetName(item.image) if imageName != "NULL": file.write(" %s->fn->setImage(%s, %s);" % (varName, varName, imageName)) file.write(" %s->fn->setTransformWidth(%s, %d);" % (varName, varName, item.currentSize.width)) file.write(" %s->fn->setTransformHeight(%s, %s);" % (varName, varName, item.currentSize.height)) file.write(" %s->fn->setStretchEnabled(%s, LE_TRUE);" % (varName, varName)) file.write(" %s->fn->setPreserveAspectEnabled(%s, LE_TRUE);" % (varName, varName)) else: file.write(" %s->fn->setBackgroundType(%s, LE_WIDGET_BACKGROUND_FILL);" % (varName, varName)) file.write(" %s->fn->setBorderType(%s, LE_WIDGET_BORDER_LINE);" % (varName, varName)) if not (item.t == 270 and menu.getItemsShown() < len(items)): file.write(" %s->fn->setVisible(%s, LE_FALSE);" % (varName, varName)) file.write(" %s->fn->setPosition(%s, %d, %d);" % (varName, varName, xp, yp)) file.write(" %s->fn->setSize(%s, %d, %d);" % (varName, varName, item.originalSize.width, item.originalSize.height)) if item.originalAlphaAmount != 255: file.write(" %s->fn->setAlphaAmount(%s, %d);" % (varName, varName, item.originalAlphaAmount)); file.write(" %s->fn->addWidget(%s, (leWidget*)%s);" % (name, name, varName)) writeEvent(file, name, menu, "ItemSelectedEvent", "ItemSelectedEventCallback", "OnItemSelected") writeEvent(file, name, menu, "ItemProminenceChangedEvent", "ItemProminenceChangedEvent", "OnItemProminenceChanged") file.write(" %s->fn->addChild(%s, (leWidget*)%s);" % (parentName, parentName, name)) file.writeNewLine() def generateRadialMenuEvent(screen, widget, event, genActions): text = "" if event.name == "ItemSelectedEvent": text += "void %s_OnItemSelected(%s)\n" % (widget.getName(), getWidgetVariableName(widget)) if event.name == "ItemProminenceChangedEvent": text += "void %s_OnItemProminenceChanged(%s)\n" % (widget.getName(), getWidgetVariableName(widget)) text += generateActions(widget, event, genActions, None, None) return text def generateRadialMenuAction(text, variables, owner, event, action): i = 0
widget = widget_default() widget.border = 'None' widget.background = 'None' commonDefaults['RadialMenuWidget'] = widget def generate_radial_menu_widget(file, screen, menu, parentName): name = menu.getName() file.write(' %s = leRadialMenuWidget_New();' % name) generate_base_widget(file, screen, menu) write_set_int(file, name, 'NumberOfItemsShown', menu.getItemsShown(), 5) write_set_boolean(file, name, 'HighlightProminent', menu.getHighlightProminent(), False) write_set_int(file, name, 'Theta', menu.getTheta(), 0) width = menu.getSize().width height = menu.getSize().height touch_x = menu.getTouchX() touch_y = menu.getTouchY() touch_width = menu.getTouchWidth() touch_height = menu.getTouchHeight() file.write(' %s->fn->setTouchArea(%s, %d, %d, %d, %d);' % (name, name, touchX, touchY, width * touchWidth / 100, height * touchHeight / 100)) ellipse_type = menu.getEllipseType().toString() if ellipseType == 'Default': ellipse_type = 'LE_RADIAL_MENU_ELLIPSE_TYPE_DEFAULT' elif ellipseType == 'Orbital': ellipse_type = 'LE_RADIAL_MENU_ELLIPSE_TYPE_OBITAL' else: ellipse_type = 'LE_RADIAL_MENU_ELLIPSE_TYPE_ROLLODEX' write_set_literal_string(file, name, 'EllipseType', ellipseType, 'LE_RADIAL_MENU_ELLIPSE_TYPE_DEFAULT') write_set_boolean(file, name, 'DrawEllipse', menu.getEllipseVisible(), True) scale_mode = menu.getSizeScale().toString() if scaleMode == 'Off': scale_mode = 'LE_RADIAL_MENU_INTERPOLATE_OFF' elif scaleMode == 'Gradual': scale_mode = 'LE_RADIAL_MENU_INTERPOLATE_GRADUAL' else: scale_mode = 'LE_RADIAL_MENU_INTERPOLATE_PROMINENT' write_set_literal_string(file, name, 'ScaleMode', scaleMode, 'LE_RADIAL_MENU_INTERPOLATE_GRADUAL') blend_mode = menu.getAlphaScale().toString() if blendMode == 'Off': blend_mode = 'LE_RADIAL_MENU_INTERPOLATE_OFF' elif blendMode == 'Gradual': blend_mode = 'LE_RADIAL_MENU_INTERPOLATE_GRADUAL' else: blend_mode = 'LE_RADIAL_MENU_INTERPOLATE_PROMINENT' write_set_literal_string(file, name, 'BlendMode', blendMode, 'LE_RADIAL_MENU_INTERPOLATE_GRADUAL') min = menu.getMinSizePercent() max = menu.getMaxSizePercent() if min != 30 or max != 100: file.write(' %s->fn->setScaleRange(%s, %d, %d);' % (name, name, min, max)) min = menu.getMinAlphaAmount() max = menu.getMaxAlphaAmount() if min != 128 or max != 255: file.write(' %s->fn->setBlendRange(%s, %d, %d);' % (name, name, min, max)) touch_x = menu.getTouchX() touch_y = menu.getTouchY() touch_width = menu.getTouchWidth() touch_height = menu.getTouchHeight() if touchX != 0 or touchY != 75 or touchWidth != 100 or (touchHeight != 50): file.write(' %s->fn->setTouchArea(%s, %d, %d, %d, %d);' % (name, name, touchX, touchY, touchWidth, touchHeight)) x = menu.getLocation(False).x y = menu.getLocation(False).y width = menu.getSize().width height = menu.getSize().height xp = x + width / 2 yp = y + height / 2 items = menu.getItemList() if len(items) > 0: for (idx, item) in enumerate(items): var_name = '%s_image_%d' % (name, idx) file.write(' %s = leImageScaleWidget_New();' % varName) image_name = craft_asset_name(item.image) if imageName != 'NULL': file.write(' %s->fn->setImage(%s, %s);' % (varName, varName, imageName)) file.write(' %s->fn->setTransformWidth(%s, %d);' % (varName, varName, item.currentSize.width)) file.write(' %s->fn->setTransformHeight(%s, %s);' % (varName, varName, item.currentSize.height)) file.write(' %s->fn->setStretchEnabled(%s, LE_TRUE);' % (varName, varName)) file.write(' %s->fn->setPreserveAspectEnabled(%s, LE_TRUE);' % (varName, varName)) else: file.write(' %s->fn->setBackgroundType(%s, LE_WIDGET_BACKGROUND_FILL);' % (varName, varName)) file.write(' %s->fn->setBorderType(%s, LE_WIDGET_BORDER_LINE);' % (varName, varName)) if not (item.t == 270 and menu.getItemsShown() < len(items)): file.write(' %s->fn->setVisible(%s, LE_FALSE);' % (varName, varName)) file.write(' %s->fn->setPosition(%s, %d, %d);' % (varName, varName, xp, yp)) file.write(' %s->fn->setSize(%s, %d, %d);' % (varName, varName, item.originalSize.width, item.originalSize.height)) if item.originalAlphaAmount != 255: file.write(' %s->fn->setAlphaAmount(%s, %d);' % (varName, varName, item.originalAlphaAmount)) file.write(' %s->fn->addWidget(%s, (leWidget*)%s);' % (name, name, varName)) write_event(file, name, menu, 'ItemSelectedEvent', 'ItemSelectedEventCallback', 'OnItemSelected') write_event(file, name, menu, 'ItemProminenceChangedEvent', 'ItemProminenceChangedEvent', 'OnItemProminenceChanged') file.write(' %s->fn->addChild(%s, (leWidget*)%s);' % (parentName, parentName, name)) file.writeNewLine() def generate_radial_menu_event(screen, widget, event, genActions): text = '' if event.name == 'ItemSelectedEvent': text += 'void %s_OnItemSelected(%s)\n' % (widget.getName(), get_widget_variable_name(widget)) if event.name == 'ItemProminenceChangedEvent': text += 'void %s_OnItemProminenceChanged(%s)\n' % (widget.getName(), get_widget_variable_name(widget)) text += generate_actions(widget, event, genActions, None, None) return text def generate_radial_menu_action(text, variables, owner, event, action): i = 0
def find_divisor(numbers): for index, number in enumerate(numbers): print("len", len(numbers[index + 1:])) for divider in reversed(numbers[index + 1:]): if number % divider == 0: print("found {} and {}. Rest: {}".format( number, divider, number % divider)) return int(number / divider) return 0 with open("input", "r") as f: input = f.read() lines = input.split("\n") sum = 0 for line in lines: if not len(line): continue numbers = sorted([int(x) for x in line.split("\t")], reverse=True) sum = sum + find_divisor(numbers) print("CS", sum)
def find_divisor(numbers): for (index, number) in enumerate(numbers): print('len', len(numbers[index + 1:])) for divider in reversed(numbers[index + 1:]): if number % divider == 0: print('found {} and {}. Rest: {}'.format(number, divider, number % divider)) return int(number / divider) return 0 with open('input', 'r') as f: input = f.read() lines = input.split('\n') sum = 0 for line in lines: if not len(line): continue numbers = sorted([int(x) for x in line.split('\t')], reverse=True) sum = sum + find_divisor(numbers) print('CS', sum)
class University: def __init__(self, name, country, world_rank): self.name = name self.country = country self.world_rank = world_rank
class University: def __init__(self, name, country, world_rank): self.name = name self.country = country self.world_rank = world_rank
#!/usr/bin/env python class Solution: def copyRandomList(self, head: 'Node') -> 'Node': curr = head while curr: node = Node(curr.val, curr.next, None) curr.next, curr = node, curr.next curr = head while curr: copy = curr.next copy.random = curr.random.next curr.next = copy.next ret = copy = head.next while copy.next: copy = copy.next = copy.next.next return ret
class Solution: def copy_random_list(self, head: 'Node') -> 'Node': curr = head while curr: node = node(curr.val, curr.next, None) (curr.next, curr) = (node, curr.next) curr = head while curr: copy = curr.next copy.random = curr.random.next curr.next = copy.next ret = copy = head.next while copy.next: copy = copy.next = copy.next.next return ret
def _responses_path( config: "Config", sim_runner: "FEMRunner", sim_params: "SimParams", response_type: "ResponseType", ) -> str: """Path to fem that were generated with given parameters.""" return sim_runner.sim_out_path( config=config, sim_params=sim_params, ext="npy", response_types=[response_type] ) # determinant of matrix a def det(a): return ( a[0][0] * a[1][1] * a[2][2] + a[0][1] * a[1][2] * a[2][0] + a[0][2] * a[1][0] * a[2][1] - a[0][2] * a[1][1] * a[2][0] - a[0][1] * a[1][0] * a[2][2] - a[0][0] * a[1][2] * a[2][1] ) # unit normal vector of plane defined by points a, b, and c def unit_normal(a, b, c): x = det([[1, a[1], a[2]], [1, b[1], b[2]], [1, c[1], c[2]]]) y = det([[a[0], 1, a[2]], [b[0], 1, b[2]], [c[0], 1, c[2]]]) z = det([[a[0], a[1], 1], [b[0], b[1], 1], [c[0], c[1], 1]]) magnitude = (x ** 2 + y ** 2 + z ** 2) ** 0.5 return x / magnitude, y / magnitude, z / magnitude # dot product of vectors a and b def dot(a, b): return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] # cross product of vectors a and b def cross(a, b): x = a[1] * b[2] - a[2] * b[1] y = a[2] * b[0] - a[0] * b[2] z = a[0] * b[1] - a[1] * b[0] return x, y, z # area of polygon poly def poly_area(poly): if len(poly) < 3: # not a plane - no area raise ValueError("Not a plane, need >= 3 points") total = [0, 0, 0] for i in range(len(poly)): vi1 = poly[i] if i is len(poly) - 1: vi2 = poly[0] else: vi2 = poly[i + 1] prod = cross(vi1, vi2) total[0] += prod[0] total[1] += prod[1] total[2] += prod[2] result = dot(total, unit_normal(poly[0], poly[1], poly[2])) return abs(result / 2)
def _responses_path(config: 'Config', sim_runner: 'FEMRunner', sim_params: 'SimParams', response_type: 'ResponseType') -> str: """Path to fem that were generated with given parameters.""" return sim_runner.sim_out_path(config=config, sim_params=sim_params, ext='npy', response_types=[response_type]) def det(a): return a[0][0] * a[1][1] * a[2][2] + a[0][1] * a[1][2] * a[2][0] + a[0][2] * a[1][0] * a[2][1] - a[0][2] * a[1][1] * a[2][0] - a[0][1] * a[1][0] * a[2][2] - a[0][0] * a[1][2] * a[2][1] def unit_normal(a, b, c): x = det([[1, a[1], a[2]], [1, b[1], b[2]], [1, c[1], c[2]]]) y = det([[a[0], 1, a[2]], [b[0], 1, b[2]], [c[0], 1, c[2]]]) z = det([[a[0], a[1], 1], [b[0], b[1], 1], [c[0], c[1], 1]]) magnitude = (x ** 2 + y ** 2 + z ** 2) ** 0.5 return (x / magnitude, y / magnitude, z / magnitude) def dot(a, b): return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] def cross(a, b): x = a[1] * b[2] - a[2] * b[1] y = a[2] * b[0] - a[0] * b[2] z = a[0] * b[1] - a[1] * b[0] return (x, y, z) def poly_area(poly): if len(poly) < 3: raise value_error('Not a plane, need >= 3 points') total = [0, 0, 0] for i in range(len(poly)): vi1 = poly[i] if i is len(poly) - 1: vi2 = poly[0] else: vi2 = poly[i + 1] prod = cross(vi1, vi2) total[0] += prod[0] total[1] += prod[1] total[2] += prod[2] result = dot(total, unit_normal(poly[0], poly[1], poly[2])) return abs(result / 2)
KEY_PRESS = 0 MOUSE_DOWN = 1 MOUSE_UP = 2 MOUSE_DOUBLE_CLICK = 3 MOUSE_MOVE = 5 SCROLL_DOWN = 6 SCROLL_UP = 7 SCROLL_STEP = 1 CTRL = 'ctrl' SHIFT = 'shift' ALT = 'alt' MODIFIER_KEYS = (CTRL, SHIFT, ALT,) MODIFIER_KEYS_PRESS_DELAY = .4 EVENTS_DELAY = .05 LEFT = "left" MIDDLE = "middle" RIGHT = "right" HIGH_QUALITY = 75 MEDIUM_QUALITY = 60 LOW_QUALITY = 40 HIGH_SCALE = 70/100 MEDIUM_SCALE = 50/100 LOW_SCALE = 40/100
key_press = 0 mouse_down = 1 mouse_up = 2 mouse_double_click = 3 mouse_move = 5 scroll_down = 6 scroll_up = 7 scroll_step = 1 ctrl = 'ctrl' shift = 'shift' alt = 'alt' modifier_keys = (CTRL, SHIFT, ALT) modifier_keys_press_delay = 0.4 events_delay = 0.05 left = 'left' middle = 'middle' right = 'right' high_quality = 75 medium_quality = 60 low_quality = 40 high_scale = 70 / 100 medium_scale = 50 / 100 low_scale = 40 / 100
#Celsius to Fahrenheit conversion #F = C *9/5 +32 F= 0 print("Give the Number of Celcius: ") c=float(input()) print("The result is: ") F=c*9/5+32 print(F)
f = 0 print('Give the Number of Celcius: ') c = float(input()) print('The result is: ') f = c * 9 / 5 + 32 print(F)
# -*- coding: utf-8 -*- """ ParaMol MM_engines subpackage. Contains modules related to the ParaMol representation of MM engines. """ __all__ = ['openmm', 'resp']
""" ParaMol MM_engines subpackage. Contains modules related to the ParaMol representation of MM engines. """ __all__ = ['openmm', 'resp']
#Software By AwesomeWithRex def read_file(filename): with open(filename) as f: filename = f.readlines() return filename def get_template(): template = '' with open('template.html', 'r') as f: template = f.readlines() return template def put_in_body(file, template): count = 0 body_tag = 0 for i in template: count += 1 if '|b|' in i: body_tag = count - 1 text_to_append = "" for line in file: text_to_append += line formatted_text = "" for word in text_to_append: formatted_text += word if '\n' in word: formatted_text += word.replace('\n', '<br/>\t') template[body_tag] = template[body_tag].replace('|b|', formatted_text) for i in template: print(i) return template def save_template(name_of_doc, saved_doc_file): with open(name_of_doc, 'w') as f: f.writelines(saved_doc_file) def put_in_title(): pass def main(): content = read_file('text.txt') template = get_template() formatted_template = put_in_body(content, template) save_template('the.html',formatted_template) if __name__=='__main__': main()
def read_file(filename): with open(filename) as f: filename = f.readlines() return filename def get_template(): template = '' with open('template.html', 'r') as f: template = f.readlines() return template def put_in_body(file, template): count = 0 body_tag = 0 for i in template: count += 1 if '|b|' in i: body_tag = count - 1 text_to_append = '' for line in file: text_to_append += line formatted_text = '' for word in text_to_append: formatted_text += word if '\n' in word: formatted_text += word.replace('\n', '<br/>\t') template[body_tag] = template[body_tag].replace('|b|', formatted_text) for i in template: print(i) return template def save_template(name_of_doc, saved_doc_file): with open(name_of_doc, 'w') as f: f.writelines(saved_doc_file) def put_in_title(): pass def main(): content = read_file('text.txt') template = get_template() formatted_template = put_in_body(content, template) save_template('the.html', formatted_template) if __name__ == '__main__': main()
class Node: def __init__(self, data): self.data = data self.next = None class LinkedList: def __init__(self): self.head = None def print_list(self): cur_node=self.head while cur_node: print(cur_node.data) cur_node = cur_node.next def append(self, data): new_node = Node(data) if self.head is None: self.head = new_node return last_node = self.head while last_node.next: last_node = last_node.next last_node.next = new_node def prepend(self,data): new_node = Node(data) new_node.next = self.head self.head = new_node def insert_after_node(self, prev_node, data): if not prev_node: print("previous Node not in the list") return new_node = Node(data) new_node.next = prev_node.next prev_node.next = new_node def delete_node(self, key): current_node = self.head if current_node and current_node.data==key: self.head = current_node.next current_node = None return prev = None while current_node and current_node.data != key: prev = current_node current_node = current_node.next if current_node is None: return prev.next = current_node.next current_node = None llist = LinkedList() llist.append("A") llist.append("B") llist.append("C") llist.append("D") #llist.prepend("E") llist.delete_node("A") llist.insert_after_node(llist.head.next,"E") #print(llist.head.data) llist.print_list()
class Node: def __init__(self, data): self.data = data self.next = None class Linkedlist: def __init__(self): self.head = None def print_list(self): cur_node = self.head while cur_node: print(cur_node.data) cur_node = cur_node.next def append(self, data): new_node = node(data) if self.head is None: self.head = new_node return last_node = self.head while last_node.next: last_node = last_node.next last_node.next = new_node def prepend(self, data): new_node = node(data) new_node.next = self.head self.head = new_node def insert_after_node(self, prev_node, data): if not prev_node: print('previous Node not in the list') return new_node = node(data) new_node.next = prev_node.next prev_node.next = new_node def delete_node(self, key): current_node = self.head if current_node and current_node.data == key: self.head = current_node.next current_node = None return prev = None while current_node and current_node.data != key: prev = current_node current_node = current_node.next if current_node is None: return prev.next = current_node.next current_node = None llist = linked_list() llist.append('A') llist.append('B') llist.append('C') llist.append('D') llist.delete_node('A') llist.insert_after_node(llist.head.next, 'E') llist.print_list()
# -*- coding: utf-8 -*- def in_segregation(x0, R, n, N=None): """ return the actual indium concentration in th nth layer Params ------ x0 : float the indium concentration between 0 and 1 R : float the segregation coefficient n : int the current layer N : int number of layers in the well """ if N: return x0*(1-R**N)*R**(n-N) return x0*(1-R**n)
def in_segregation(x0, R, n, N=None): """ return the actual indium concentration in th nth layer Params ------ x0 : float the indium concentration between 0 and 1 R : float the segregation coefficient n : int the current layer N : int number of layers in the well """ if N: return x0 * (1 - R ** N) * R ** (n - N) return x0 * (1 - R ** n)
class Solution: def angleClock(self, hour: int, minutes: int) -> float: hdeg = ((hour*30) + (minutes*0.5))%360 mdeg = (minutes * 6) angle = abs(hdeg-mdeg) return min(angle, 360-angle)
class Solution: def angle_clock(self, hour: int, minutes: int) -> float: hdeg = (hour * 30 + minutes * 0.5) % 360 mdeg = minutes * 6 angle = abs(hdeg - mdeg) return min(angle, 360 - angle)
# Copyright (c) 2010 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'chromium_code': 1, 'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out', }, 'targets': [ { # Protobuf compiler / generate rule for sync.proto. This is used by # test code in net, which is why it's isolated into its own .gyp file. 'target_name': 'sync_proto', 'type': 'none', 'sources': [ 'sync.proto', 'encryption.proto', 'app_specifics.proto', 'autofill_specifics.proto', 'bookmark_specifics.proto', 'extension_specifics.proto', 'nigori_specifics.proto', 'password_specifics.proto', 'preference_specifics.proto', 'session_specifics.proto', 'test.proto', 'theme_specifics.proto', 'typed_url_specifics.proto', ], 'rules': [ { 'rule_name': 'genproto', 'extension': 'proto', 'inputs': [ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', ], 'outputs': [ '<(PRODUCT_DIR)/pyproto/sync_pb/<(RULE_INPUT_ROOT)_pb2.py', '<(protoc_out_dir)/chrome/browser/sync/protocol/<(RULE_INPUT_ROOT).pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/<(RULE_INPUT_ROOT).pb.cc', ], 'action': [ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', '--proto_path=.', './<(RULE_INPUT_ROOT)<(RULE_INPUT_EXT)', '--cpp_out=<(protoc_out_dir)/chrome/browser/sync/protocol', '--python_out=<(PRODUCT_DIR)/pyproto/sync_pb', ], 'message': 'Generating C++ and Python code from <(RULE_INPUT_PATH)', }, ], 'dependencies': [ '../../../../third_party/protobuf/protobuf.gyp:protoc#host', ], }, { 'target_name': 'sync_proto_cpp', 'type': '<(library)', 'sources': [ '<(protoc_out_dir)/chrome/browser/sync/protocol/sync.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/sync.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/encryption.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/encryption.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/app_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/app_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/autofill_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/autofill_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/bookmark_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/bookmark_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/extension_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/extension_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/nigori_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/nigori_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/password_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/password_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/preference_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/preference_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/session_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/session_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/theme_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/theme_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/typed_url_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/typed_url_specifics.pb.h', ], 'export_dependent_settings': [ '../../../../third_party/protobuf/protobuf.gyp:protobuf_lite', 'sync_proto', ], 'dependencies': [ '../../../../third_party/protobuf/protobuf.gyp:protobuf_lite', 'sync_proto', ], 'direct_dependent_settings': { 'include_dirs': [ '<(protoc_out_dir)', ], }, # This target exports a hard dependency because it includes generated # header files. 'hard_dependency': 1, }, ], } # Local Variables: # tab-width:2 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=2 shiftwidth=2:
{'variables': {'chromium_code': 1, 'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out'}, 'targets': [{'target_name': 'sync_proto', 'type': 'none', 'sources': ['sync.proto', 'encryption.proto', 'app_specifics.proto', 'autofill_specifics.proto', 'bookmark_specifics.proto', 'extension_specifics.proto', 'nigori_specifics.proto', 'password_specifics.proto', 'preference_specifics.proto', 'session_specifics.proto', 'test.proto', 'theme_specifics.proto', 'typed_url_specifics.proto'], 'rules': [{'rule_name': 'genproto', 'extension': 'proto', 'inputs': ['<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)'], 'outputs': ['<(PRODUCT_DIR)/pyproto/sync_pb/<(RULE_INPUT_ROOT)_pb2.py', '<(protoc_out_dir)/chrome/browser/sync/protocol/<(RULE_INPUT_ROOT).pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/<(RULE_INPUT_ROOT).pb.cc'], 'action': ['<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)', '--proto_path=.', './<(RULE_INPUT_ROOT)<(RULE_INPUT_EXT)', '--cpp_out=<(protoc_out_dir)/chrome/browser/sync/protocol', '--python_out=<(PRODUCT_DIR)/pyproto/sync_pb'], 'message': 'Generating C++ and Python code from <(RULE_INPUT_PATH)'}], 'dependencies': ['../../../../third_party/protobuf/protobuf.gyp:protoc#host']}, {'target_name': 'sync_proto_cpp', 'type': '<(library)', 'sources': ['<(protoc_out_dir)/chrome/browser/sync/protocol/sync.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/sync.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/encryption.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/encryption.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/app_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/app_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/autofill_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/autofill_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/bookmark_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/bookmark_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/extension_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/extension_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/nigori_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/nigori_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/password_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/password_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/preference_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/preference_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/session_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/session_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/theme_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/theme_specifics.pb.h', '<(protoc_out_dir)/chrome/browser/sync/protocol/typed_url_specifics.pb.cc', '<(protoc_out_dir)/chrome/browser/sync/protocol/typed_url_specifics.pb.h'], 'export_dependent_settings': ['../../../../third_party/protobuf/protobuf.gyp:protobuf_lite', 'sync_proto'], 'dependencies': ['../../../../third_party/protobuf/protobuf.gyp:protobuf_lite', 'sync_proto'], 'direct_dependent_settings': {'include_dirs': ['<(protoc_out_dir)']}, 'hard_dependency': 1}]}
#!/usr/bin/env python # encoding: utf-8 def run(whatweb, pluginname): whatweb.recog_from_file(pluginname, "sysImages/css/PagesCSS.css", "foosun") whatweb.recog_from_file(pluginname, "Tags.html", "Foosun")
def run(whatweb, pluginname): whatweb.recog_from_file(pluginname, 'sysImages/css/PagesCSS.css', 'foosun') whatweb.recog_from_file(pluginname, 'Tags.html', 'Foosun')
# reading withdrawal amount and account balance x,y=map(float,input().split()) # this will check if account balance is less than the withdrawal amount or # withdrawal amount is multiple of 5 and print the current account balance if(x+0.5>=y or x%5!=0 or y<=0): # printing the result upto two decimals print("%.2f"%y) # otherwise transaction will take place and print updated account balance else: y=y-x-0.50 # printing the result upto two decimals print("%.2f"%y)
(x, y) = map(float, input().split()) if x + 0.5 >= y or x % 5 != 0 or y <= 0: print('%.2f' % y) else: y = y - x - 0.5 print('%.2f' % y)
#-------------------------------------- # Open and Parse BF File #-------------------------------------- fileName = input("Enter name of Brainf*** file here: ") file = open(fileName, "r") programCode = [] validCommands = [">", "<", "+", "-", ".", ",", "[", "]"] for x in file: for y in x: if y in validCommands: programCode.append(y) file.close() #-------------------------------------- # Find Indexes of Matching Brackets #-------------------------------------- bracketPositions = [] loopIndex = 0 openIndex = [] for x in programCode: if x == "[": openIndex.append(loopIndex) if x == "]": openPosition = openIndex.pop() bracketPositions.append([openPosition, loopIndex]) loopIndex += 1 #-------------------------------------- # Set Up BF Cells and Pointers #-------------------------------------- memCells = [] memPointer = 0 instructionPointer = 0 memCellsStepper = 0 maxCells = 5000 while memCellsStepper < maxCells: memCells.append(0) memCellsStepper += 1 #-------------------------------------- # Define BF Commands #-------------------------------------- def moveRight(): global memPointer memPointer += 1 if memPointer >= maxCells: memPointer = 0 def moveLeft(): global memPointer memPointer -= 1 if memPointer < 0: memPointer = maxCells - 1 def incrementCell(): memCells[memPointer] += 1 def decrementCell(): memCells[memPointer] -= 1 def outputValue(): print(chr(memCells[memPointer]), end="") def takeInput(): print() value = input(">") memCells[memPointer] = ord(value[0]) def openBracket(): global instructionPointer if memCells[memPointer] == 0: for x in bracketPositions: if x[0] == instructionPointer: instructionPointer = x[1] def closeBracket(): global instructionPointer if memCells[memPointer] != 0: for x in bracketPositions: if x[1] == instructionPointer: instructionPointer = x[0] #-------------------------------------- # Execute BF Code #-------------------------------------- while instructionPointer != len(programCode): x = programCode[instructionPointer] if x == ">": moveRight() if x == "<": moveLeft() if x == "+": incrementCell() if x == "-": decrementCell() if x == ".": outputValue() if x == ",": takeInput() if x == "[": openBracket() if x == "]": closeBracket() instructionPointer += 1
file_name = input('Enter name of Brainf*** file here: ') file = open(fileName, 'r') program_code = [] valid_commands = ['>', '<', '+', '-', '.', ',', '[', ']'] for x in file: for y in x: if y in validCommands: programCode.append(y) file.close() bracket_positions = [] loop_index = 0 open_index = [] for x in programCode: if x == '[': openIndex.append(loopIndex) if x == ']': open_position = openIndex.pop() bracketPositions.append([openPosition, loopIndex]) loop_index += 1 mem_cells = [] mem_pointer = 0 instruction_pointer = 0 mem_cells_stepper = 0 max_cells = 5000 while memCellsStepper < maxCells: memCells.append(0) mem_cells_stepper += 1 def move_right(): global memPointer mem_pointer += 1 if memPointer >= maxCells: mem_pointer = 0 def move_left(): global memPointer mem_pointer -= 1 if memPointer < 0: mem_pointer = maxCells - 1 def increment_cell(): memCells[memPointer] += 1 def decrement_cell(): memCells[memPointer] -= 1 def output_value(): print(chr(memCells[memPointer]), end='') def take_input(): print() value = input('>') memCells[memPointer] = ord(value[0]) def open_bracket(): global instructionPointer if memCells[memPointer] == 0: for x in bracketPositions: if x[0] == instructionPointer: instruction_pointer = x[1] def close_bracket(): global instructionPointer if memCells[memPointer] != 0: for x in bracketPositions: if x[1] == instructionPointer: instruction_pointer = x[0] while instructionPointer != len(programCode): x = programCode[instructionPointer] if x == '>': move_right() if x == '<': move_left() if x == '+': increment_cell() if x == '-': decrement_cell() if x == '.': output_value() if x == ',': take_input() if x == '[': open_bracket() if x == ']': close_bracket() instruction_pointer += 1
_registered_input_modules_types = {} def register(name, class_type): if name in _registered_input_modules_types: raise RuntimeError("Dublicate input module name: " + name) _registered_input_modules_types[name] = class_type def load_modules(agent, input_link_config): input_modules = [] # get input modules configuration from Parameter Server if not isinstance(input_link_config, dict): raise RuntimeError("Input link configuration is not valid.") # process configuration for module_name, module_config in input_link_config.iteritems(): module_type = _registered_input_modules_types.get(module_name) if module_type: input_modules.append( module_type(agent, module_config) ) else: raise RuntimeError("Input module {} type is unknown." % module_name) return input_modules
_registered_input_modules_types = {} def register(name, class_type): if name in _registered_input_modules_types: raise runtime_error('Dublicate input module name: ' + name) _registered_input_modules_types[name] = class_type def load_modules(agent, input_link_config): input_modules = [] if not isinstance(input_link_config, dict): raise runtime_error('Input link configuration is not valid.') for (module_name, module_config) in input_link_config.iteritems(): module_type = _registered_input_modules_types.get(module_name) if module_type: input_modules.append(module_type(agent, module_config)) else: raise runtime_error('Input module {} type is unknown.' % module_name) return input_modules
def find_missing(array): return [x for x in range(array[0], array[-1] + 1) if x not in array] lst = [2, 4, 1, 7, 10] print(find_missing(lst))
def find_missing(array): return [x for x in range(array[0], array[-1] + 1) if x not in array] lst = [2, 4, 1, 7, 10] print(find_missing(lst))
def trigger(): return """ CREATE OR REPLACE FUNCTION trg_mensagem_ticket_solucao() RETURNS TRIGGER AS $$ BEGIN IF (NEW.solucao) THEN UPDATE ticket SET solucionado_id = NEW.id, data_solucao = NOW(), hora_solucao = NOW() WHERE id = NEW.ticket_id; END IF; RETURN NEW; END $$ LANGUAGE plpgsql; DROP TRIGGER IF EXISTS trg_mensagem_ticket_solucao ON mensagem_ticket; CREATE TRIGGER trg_mensagem_ticket_solucao AFTER INSERT ON mensagem_ticket FOR EACH ROW EXECUTE PROCEDURE trg_mensagem_ticket_solucao(); """
def trigger(): return '\n CREATE OR REPLACE FUNCTION trg_mensagem_ticket_solucao()\n RETURNS TRIGGER AS $$\n BEGIN\n IF (NEW.solucao) THEN\n UPDATE ticket SET solucionado_id = NEW.id, data_solucao = NOW(), hora_solucao = NOW() WHERE id = NEW.ticket_id;\n END IF;\n \n RETURN NEW;\n END\n $$ LANGUAGE plpgsql;\n \n DROP TRIGGER IF EXISTS trg_mensagem_ticket_solucao ON mensagem_ticket;\n CREATE TRIGGER trg_mensagem_ticket_solucao\n AFTER INSERT ON mensagem_ticket\n FOR EACH ROW EXECUTE PROCEDURE trg_mensagem_ticket_solucao();\n '
S1 = "Hello Python" print(S1) # Prints complete string print(S1[0]) # Prints first character of the string print(S1[2:5]) # Prints character starting from 3rd t 5th print(S1[2:]) # Prints string starting from 3rd character print(S1 * 2) # Prints string two times print(S1 + "Thanks") # Prints concatenated string
s1 = 'Hello Python' print(S1) print(S1[0]) print(S1[2:5]) print(S1[2:]) print(S1 * 2) print(S1 + 'Thanks')
def onSpawn(): while True: pet.moveXY(48, 8) pet.moveXY(12, 8) pet.on("spawn", onSpawn) while True: hero.say("Run!!!") hero.say("Faster!")
def on_spawn(): while True: pet.moveXY(48, 8) pet.moveXY(12, 8) pet.on('spawn', onSpawn) while True: hero.say('Run!!!') hero.say('Faster!')
def valid_parentheses(parens): """Are the parentheses validly balanced? >>> valid_parentheses("()") True >>> valid_parentheses("()()") True >>> valid_parentheses("(()())") True >>> valid_parentheses(")()") False >>> valid_parentheses("())") False >>> valid_parentheses("((())") False >>> valid_parentheses(")()(") False """ d = {"(" : 1, ")" : -1} s = 0 for c in parens: s = s + d[c] if s < 0: return False return s == 0
def valid_parentheses(parens): """Are the parentheses validly balanced? >>> valid_parentheses("()") True >>> valid_parentheses("()()") True >>> valid_parentheses("(()())") True >>> valid_parentheses(")()") False >>> valid_parentheses("())") False >>> valid_parentheses("((())") False >>> valid_parentheses(")()(") False """ d = {'(': 1, ')': -1} s = 0 for c in parens: s = s + d[c] if s < 0: return False return s == 0
# TO print Fibonacci Series upto n numbers and replace all prime numbers and multiples of 5 by 0 # Checking for prime numbers def isprime(numb): if numb == 2: return True elif numb == 3: return True else : for i in range(2, numb // 2 + 1): if (numb % i) == 0: return False else: return True # Finding out the fibonacci numbers def fibonacci_series(n): flag = 0 a,b = 1,1 if n == 1: print(a) else: print(a, end = " ") print(b, end = " ") while flag <= n: c = a + b a,b = b,c flag += 1 if c % 5 == 0 or isprime(c): print(0, end = " ") else: print(c, end = " ") # The number of fibonacci terms required n1 = int(input("Enter the value of n: ")) n = n1 - 3 fibonacci_series(n)
def isprime(numb): if numb == 2: return True elif numb == 3: return True else: for i in range(2, numb // 2 + 1): if numb % i == 0: return False else: return True def fibonacci_series(n): flag = 0 (a, b) = (1, 1) if n == 1: print(a) else: print(a, end=' ') print(b, end=' ') while flag <= n: c = a + b (a, b) = (b, c) flag += 1 if c % 5 == 0 or isprime(c): print(0, end=' ') else: print(c, end=' ') n1 = int(input('Enter the value of n: ')) n = n1 - 3 fibonacci_series(n)
class Solution: def reorderLogFiles(self, logs: List[str]) -> List[str]: def corder(log): identifier, detail = log.split(None, 1) return (0, detail, identifier) if detail[0].isalpha() else (1,) return sorted(logs, key=corder)
class Solution: def reorder_log_files(self, logs: List[str]) -> List[str]: def corder(log): (identifier, detail) = log.split(None, 1) return (0, detail, identifier) if detail[0].isalpha() else (1,) return sorted(logs, key=corder)
# -*- coding: utf-8 -*- """ reV Econ utilities """ def lcoe_fcr(fixed_charge_rate, capital_cost, fixed_operating_cost, annual_energy_production, variable_operating_cost): """Calculate the Levelized Cost of Electricity (LCOE) using the fixed-charge-rate method: LCOE = ((fixed_charge_rate * capital_cost + fixed_operating_cost) / annual_energy_production + variable_operating_cost) Parameters ---------- fixed_charge_rate : float | np.ndarray Fixed charge rage (unitless) capital_cost : float | np.ndarray Capital cost (aka Capital Expenditures) ($) fixed_operating_cost : float | np.ndarray Fixed annual operating cost ($/year) annual_energy_production : float | np.ndarray Annual energy production (kWh for year) (can be calculated as capacity * cf * 8760) variable_operating_cost : float | np.ndarray Variable operating cost ($/kWh) Returns ------- lcoe : float | np.ndarray LCOE in $/MWh """ lcoe = ((fixed_charge_rate * capital_cost + fixed_operating_cost) / annual_energy_production + variable_operating_cost) lcoe *= 1000 # convert $/kWh to $/MWh return lcoe
""" reV Econ utilities """ def lcoe_fcr(fixed_charge_rate, capital_cost, fixed_operating_cost, annual_energy_production, variable_operating_cost): """Calculate the Levelized Cost of Electricity (LCOE) using the fixed-charge-rate method: LCOE = ((fixed_charge_rate * capital_cost + fixed_operating_cost) / annual_energy_production + variable_operating_cost) Parameters ---------- fixed_charge_rate : float | np.ndarray Fixed charge rage (unitless) capital_cost : float | np.ndarray Capital cost (aka Capital Expenditures) ($) fixed_operating_cost : float | np.ndarray Fixed annual operating cost ($/year) annual_energy_production : float | np.ndarray Annual energy production (kWh for year) (can be calculated as capacity * cf * 8760) variable_operating_cost : float | np.ndarray Variable operating cost ($/kWh) Returns ------- lcoe : float | np.ndarray LCOE in $/MWh """ lcoe = (fixed_charge_rate * capital_cost + fixed_operating_cost) / annual_energy_production + variable_operating_cost lcoe *= 1000 return lcoe
""" Base Exception MLApp Exception - inherit from Base Exception """ class MLAppBaseException(Exception): def __init__(self, message): self.message = message class FrameworkException(MLAppBaseException): def __init__(self, message=None): if message is not None and isinstance(message, str): self.message = message def __str__(self): return "[ML APP ERROR] %s\n" % str(self.message) class UserException(MLAppBaseException): def __init__(self, message): self.message = message def __str__(self): return "[USER ERROR] %s\n" % str(self.message) class FlowManagerException(UserException): def __init__(self, message): self.message = message def __str__(self): return "[FLOW MANAGER ERROR] %s\n" % str(self.message) class DataManagerException(UserException): def __init__(self, message): self.message = message def __str__(self): return "[DATA MANAGER ERROR] %s\n" % str(self.message) class ModelManagerException(UserException): def __init__(self, message): self.message = message def __str__(self): return "[MODEL MANAGER ERROR] %s\n" % str(self.message) class JobManagerException(UserException): def __init__(self, message): self.message = message def __str__(self): return "[JOB MANAGER ERROR] %s\n" % str(self.message) class PipelineManagerException(UserException): def __init__(self, message): self.message = message def __str__(self): return "[PIPELINE MANAGER ERROR] %s\n" % str(self.message) class EnvironmentException(UserException): def __init__(self, message): self.message = message def __str__(self): return "[ENVIRONMENT ERROR] %s\n" % str(self.message) class IoManagerException(FlowManagerException, DataManagerException, ModelManagerException, JobManagerException): def __init__(self, message): self.message = message def __str__(self): return "[IO MANAGER ERROR] %s\n" % str(self.message) class ConfigError(FlowManagerException, DataManagerException, ModelManagerException, JobManagerException): def __init__(self, message): self.message = message def __str__(self): return "[CONFIG ERROR] %s\n" % str(self.message) class ConfigKeyError(ConfigError): def __init__(self, message): self.message = message def __str__(self): return "[KEY ERROR] %s\n" % str(self.message) class ConfigValueError(ConfigError): def __init__(self, message): self.message = message def __str__(self): return "[VALUE ERROR] %s\n" % str(self.message)
""" Base Exception MLApp Exception - inherit from Base Exception """ class Mlappbaseexception(Exception): def __init__(self, message): self.message = message class Frameworkexception(MLAppBaseException): def __init__(self, message=None): if message is not None and isinstance(message, str): self.message = message def __str__(self): return '[ML APP ERROR] %s\n' % str(self.message) class Userexception(MLAppBaseException): def __init__(self, message): self.message = message def __str__(self): return '[USER ERROR] %s\n' % str(self.message) class Flowmanagerexception(UserException): def __init__(self, message): self.message = message def __str__(self): return '[FLOW MANAGER ERROR] %s\n' % str(self.message) class Datamanagerexception(UserException): def __init__(self, message): self.message = message def __str__(self): return '[DATA MANAGER ERROR] %s\n' % str(self.message) class Modelmanagerexception(UserException): def __init__(self, message): self.message = message def __str__(self): return '[MODEL MANAGER ERROR] %s\n' % str(self.message) class Jobmanagerexception(UserException): def __init__(self, message): self.message = message def __str__(self): return '[JOB MANAGER ERROR] %s\n' % str(self.message) class Pipelinemanagerexception(UserException): def __init__(self, message): self.message = message def __str__(self): return '[PIPELINE MANAGER ERROR] %s\n' % str(self.message) class Environmentexception(UserException): def __init__(self, message): self.message = message def __str__(self): return '[ENVIRONMENT ERROR] %s\n' % str(self.message) class Iomanagerexception(FlowManagerException, DataManagerException, ModelManagerException, JobManagerException): def __init__(self, message): self.message = message def __str__(self): return '[IO MANAGER ERROR] %s\n' % str(self.message) class Configerror(FlowManagerException, DataManagerException, ModelManagerException, JobManagerException): def __init__(self, message): self.message = message def __str__(self): return '[CONFIG ERROR] %s\n' % str(self.message) class Configkeyerror(ConfigError): def __init__(self, message): self.message = message def __str__(self): return '[KEY ERROR] %s\n' % str(self.message) class Configvalueerror(ConfigError): def __init__(self, message): self.message = message def __str__(self): return '[VALUE ERROR] %s\n' % str(self.message)
# initiate empty list to hold user input and sum value of zero user_list = [] list_sum = 0 # seek user input for ten numbers for i in range(10): userInput = input("Enter any 2-digit number: ") # check to see if number is even and if yes, add to list_sum # print incorrect value warning when ValueError exception occurs try: number = int(userInput) user_list.append(number) if number % 2 == 0: list_sum += number except ValueError: print("Incorrect value. That's not an int!") print("user_list: {}".format(user_list)) print("The sum of the even numbers in user_list is: {}.".format(list_sum))
user_list = [] list_sum = 0 for i in range(10): user_input = input('Enter any 2-digit number: ') try: number = int(userInput) user_list.append(number) if number % 2 == 0: list_sum += number except ValueError: print("Incorrect value. That's not an int!") print('user_list: {}'.format(user_list)) print('The sum of the even numbers in user_list is: {}.'.format(list_sum))
load("@io_bazel_rules_docker//container:pull.bzl", "container_pull") def containers(): container_pull( name = "alpine_linux_amd64", registry = "index.docker.io", repository = "library/alpine", tag = "3.14.2", )
load('@io_bazel_rules_docker//container:pull.bzl', 'container_pull') def containers(): container_pull(name='alpine_linux_amd64', registry='index.docker.io', repository='library/alpine', tag='3.14.2')
BUILD_STATE = ( ('triggered', 'Triggered'), ('building', 'Building'), ('finished', 'Finished'), ) BUILD_TYPES = ( ('html', 'HTML'), ('pdf', 'PDF'), ('epub', 'Epub'), ('man', 'Manpage'), )
build_state = (('triggered', 'Triggered'), ('building', 'Building'), ('finished', 'Finished')) build_types = (('html', 'HTML'), ('pdf', 'PDF'), ('epub', 'Epub'), ('man', 'Manpage'))
class Solution: def getDecimalValue(self, head: ListNode) -> int: return self.getDecimalValueHelper(head)[0] def getDecimalValueHelper(self, head: ListNode) -> int: if head is None: return (0, 0) total, exp = self.getDecimalValueHelper(head.next) currbit = head.val total += currbit * (2**exp) return (total, exp+1)
class Solution: def get_decimal_value(self, head: ListNode) -> int: return self.getDecimalValueHelper(head)[0] def get_decimal_value_helper(self, head: ListNode) -> int: if head is None: return (0, 0) (total, exp) = self.getDecimalValueHelper(head.next) currbit = head.val total += currbit * 2 ** exp return (total, exp + 1)
# Copyright (C) 2009 Duncan McGreggor <duncan@canonical.com> # Copyright (C) 2009 Robert Collins <robertc@robertcollins.net> # Copyright (C) 2012 New Dream Network, LLC (DreamHost) # Licenced under the txaws licence available at /LICENSE in the txaws source. __all__ = ["REGION_US", "REGION_EU", "EC2_US_EAST", "EC2_US_WEST", "EC2_ASIA_PACIFIC", "EC2_EU_WEST", "EC2_SOUTH_AMERICA_EAST", "EC2_ALL_REGIONS"] # These old EC2 variable names are maintained for backwards compatibility. REGION_US = "US" REGION_EU = "EU" EC2_ENDPOINT_US = "https://us-east-1.ec2.amazonaws.com/" EC2_ENDPOINT_EU = "https://eu-west-1.ec2.amazonaws.com/" SQS_ENDPOINT_US = "https://sqs.us-east-1.amazonaws.com/" # These are the new EC2 variables. EC2_US_EAST = [ {"region": "US East (Northern Virginia) Region", "endpoint": "https://ec2.us-east-1.amazonaws.com"}] EC2_US_WEST = [ {"region": "US West (Oregon) Region", "endpoint": "https://ec2.us-west-2.amazonaws.com"}, {"region": "US West (Northern California) Region", "endpoint": "https://ec2.us-west-1.amazonaws.com"}] EC2_US = EC2_US_EAST + EC2_US_WEST EC2_ASIA_PACIFIC = [ {"region": "Asia Pacific (Singapore) Region", "endpoint": "https://ec2.ap-southeast-1.amazonaws.com"}, {"region": "Asia Pacific (Tokyo) Region", "endpoint": "https://ec2.ap-northeast-1.amazonaws.com"}] EC2_EU_WEST = [ {"region": "EU (Ireland) Region", "endpoint": "https://ec2.eu-west-1.amazonaws.com"}] EC2_EU = EC2_EU_WEST EC2_SOUTH_AMERICA_EAST = [ {"region": "South America (Sao Paulo) Region", "endpoint": "https://ec2.sa-east-1.amazonaws.com"}] EC2_SOUTH_AMERICA = EC2_SOUTH_AMERICA_EAST EC2_ALL_REGIONS = EC2_US + EC2_ASIA_PACIFIC + EC2_EU + EC2_SOUTH_AMERICA # This old S3 variable is maintained for backwards compatibility. S3_ENDPOINT = "https://s3.amazonaws.com/" # These are the new S3 variables. S3_US_DEFAULT = [ {"region": "US Standard *", "endpoint": "https://s3.amazonaws.com"}] S3_US_WEST = [ {"region": "US West (Oregon) Region", "endpoint": "https://s3-us-west-2.amazonaws.com"}, {"region": "US West (Northern California) Region", "endpoint": "https://s3-us-west-1.amazonaws.com"}] S3_ASIA_PACIFIC = [ {"region": "Asia Pacific (Singapore) Region", "endpoint": "https://s3-ap-southeast-1.amazonaws.com"}, {"region": "Asia Pacific (Tokyo) Region", "endpoint": "https://s3-ap-northeast-1.amazonaws.com"}] S3_US = S3_US_DEFAULT + S3_US_WEST S3_EU_WEST = [ {"region": "EU (Ireland) Region", "endpoint": "https://s3-eu-west-1.amazonaws.com"}] S3_EU = S3_EU_WEST S3_SOUTH_AMERICA_EAST = [ {"region": "South America (Sao Paulo) Region", "endpoint": "s3-sa-east-1.amazonaws.com"}] S3_SOUTH_AMERICA = S3_SOUTH_AMERICA_EAST S3_ALL_REGIONS = S3_US + S3_ASIA_PACIFIC + S3_EU + S3_SOUTH_AMERICA
__all__ = ['REGION_US', 'REGION_EU', 'EC2_US_EAST', 'EC2_US_WEST', 'EC2_ASIA_PACIFIC', 'EC2_EU_WEST', 'EC2_SOUTH_AMERICA_EAST', 'EC2_ALL_REGIONS'] region_us = 'US' region_eu = 'EU' ec2_endpoint_us = 'https://us-east-1.ec2.amazonaws.com/' ec2_endpoint_eu = 'https://eu-west-1.ec2.amazonaws.com/' sqs_endpoint_us = 'https://sqs.us-east-1.amazonaws.com/' ec2_us_east = [{'region': 'US East (Northern Virginia) Region', 'endpoint': 'https://ec2.us-east-1.amazonaws.com'}] ec2_us_west = [{'region': 'US West (Oregon) Region', 'endpoint': 'https://ec2.us-west-2.amazonaws.com'}, {'region': 'US West (Northern California) Region', 'endpoint': 'https://ec2.us-west-1.amazonaws.com'}] ec2_us = EC2_US_EAST + EC2_US_WEST ec2_asia_pacific = [{'region': 'Asia Pacific (Singapore) Region', 'endpoint': 'https://ec2.ap-southeast-1.amazonaws.com'}, {'region': 'Asia Pacific (Tokyo) Region', 'endpoint': 'https://ec2.ap-northeast-1.amazonaws.com'}] ec2_eu_west = [{'region': 'EU (Ireland) Region', 'endpoint': 'https://ec2.eu-west-1.amazonaws.com'}] ec2_eu = EC2_EU_WEST ec2_south_america_east = [{'region': 'South America (Sao Paulo) Region', 'endpoint': 'https://ec2.sa-east-1.amazonaws.com'}] ec2_south_america = EC2_SOUTH_AMERICA_EAST ec2_all_regions = EC2_US + EC2_ASIA_PACIFIC + EC2_EU + EC2_SOUTH_AMERICA s3_endpoint = 'https://s3.amazonaws.com/' s3_us_default = [{'region': 'US Standard *', 'endpoint': 'https://s3.amazonaws.com'}] s3_us_west = [{'region': 'US West (Oregon) Region', 'endpoint': 'https://s3-us-west-2.amazonaws.com'}, {'region': 'US West (Northern California) Region', 'endpoint': 'https://s3-us-west-1.amazonaws.com'}] s3_asia_pacific = [{'region': 'Asia Pacific (Singapore) Region', 'endpoint': 'https://s3-ap-southeast-1.amazonaws.com'}, {'region': 'Asia Pacific (Tokyo) Region', 'endpoint': 'https://s3-ap-northeast-1.amazonaws.com'}] s3_us = S3_US_DEFAULT + S3_US_WEST s3_eu_west = [{'region': 'EU (Ireland) Region', 'endpoint': 'https://s3-eu-west-1.amazonaws.com'}] s3_eu = S3_EU_WEST s3_south_america_east = [{'region': 'South America (Sao Paulo) Region', 'endpoint': 's3-sa-east-1.amazonaws.com'}] s3_south_america = S3_SOUTH_AMERICA_EAST s3_all_regions = S3_US + S3_ASIA_PACIFIC + S3_EU + S3_SOUTH_AMERICA
""" 224. Basic Calculator Example 1: Input: "1 + 1" Output: 2 Example 2: Input: " 2-1 + 2 " Output: 3 Example 3: Input: "(1+(4+5+2)-3)+(6+8)" Output: 23 """ class Solution: def calculate(self, s): """ :type s: str :rtype: int """ self.stack = [] i, res, n, sign = 0, 0, len(s), 1 while i < n: if s[i] == '+' or s[i] == '-': sign = 1 if s[i] == '+' else -1 elif s[i] == '(': self.stack.append(res) self.stack.append(sign) sign, res = 1, 0 elif s[i] == ')': res = self.stack.pop()*res res += self.stack.pop() elif s[i].isdigit(): val = 0 while i< n and s[i].isdigit(): val = val*10 + int(s[i]) i+=1 res += sign*val i-=1 i+=1 return res class Solution: def calculate(self, s): total = 0 i, signs, n = 0, [1,1], len(s) while i < n: if s[i].isdigit(): start = i while i<n and s[i].isdigit(): i+=1 total += signs.pop()*int(s[start:i]) continue if s[i] in '+-(': signs.append(signs[-1]*(1,-1)[s[i] == '-']) elif s[i] == ')': signs.pop() i += 1 return total
""" 224. Basic Calculator Example 1: Input: "1 + 1" Output: 2 Example 2: Input: " 2-1 + 2 " Output: 3 Example 3: Input: "(1+(4+5+2)-3)+(6+8)" Output: 23 """ class Solution: def calculate(self, s): """ :type s: str :rtype: int """ self.stack = [] (i, res, n, sign) = (0, 0, len(s), 1) while i < n: if s[i] == '+' or s[i] == '-': sign = 1 if s[i] == '+' else -1 elif s[i] == '(': self.stack.append(res) self.stack.append(sign) (sign, res) = (1, 0) elif s[i] == ')': res = self.stack.pop() * res res += self.stack.pop() elif s[i].isdigit(): val = 0 while i < n and s[i].isdigit(): val = val * 10 + int(s[i]) i += 1 res += sign * val i -= 1 i += 1 return res class Solution: def calculate(self, s): total = 0 (i, signs, n) = (0, [1, 1], len(s)) while i < n: if s[i].isdigit(): start = i while i < n and s[i].isdigit(): i += 1 total += signs.pop() * int(s[start:i]) continue if s[i] in '+-(': signs.append(signs[-1] * (1, -1)[s[i] == '-']) elif s[i] == ')': signs.pop() i += 1 return total
n = int(input()) for i in range(0, n): line = input() b, p = line.split() b = int(b) p = float(p) calc = (60 * b) / p var = 60 / p min = calc - var max = calc + var print(min, calc, max)
n = int(input()) for i in range(0, n): line = input() (b, p) = line.split() b = int(b) p = float(p) calc = 60 * b / p var = 60 / p min = calc - var max = calc + var print(min, calc, max)
a = int(input('First number')) b = int(input('Second number')) if a>b: print(a) else: print(b)
a = int(input('First number')) b = int(input('Second number')) if a > b: print(a) else: print(b)
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. async def test_should_clear_cookies(context, page, server): await page.goto(server.EMPTY_PAGE) await context.addCookies( [{"url": server.EMPTY_PAGE, "name": "cookie1", "value": "1"}] ) assert await page.evaluate("document.cookie") == "cookie1=1" await context.clearCookies() assert await context.cookies() == [] await page.reload() assert await page.evaluate("document.cookie") == "" async def test_should_isolate_cookies_when_clearing(context, server, browser): another_context = await browser.newContext() await context.addCookies( [{"url": server.EMPTY_PAGE, "name": "page1cookie", "value": "page1value"}] ) await another_context.addCookies( [{"url": server.EMPTY_PAGE, "name": "page2cookie", "value": "page2value"}] ) assert len(await context.cookies()) == 1 assert len(await another_context.cookies()) == 1 await context.clearCookies() assert len(await context.cookies()) == 0 assert len(await another_context.cookies()) == 1 await another_context.clearCookies() assert len(await context.cookies()) == 0 assert len(await another_context.cookies()) == 0 await another_context.close()
async def test_should_clear_cookies(context, page, server): await page.goto(server.EMPTY_PAGE) await context.addCookies([{'url': server.EMPTY_PAGE, 'name': 'cookie1', 'value': '1'}]) assert await page.evaluate('document.cookie') == 'cookie1=1' await context.clearCookies() assert await context.cookies() == [] await page.reload() assert await page.evaluate('document.cookie') == '' async def test_should_isolate_cookies_when_clearing(context, server, browser): another_context = await browser.newContext() await context.addCookies([{'url': server.EMPTY_PAGE, 'name': 'page1cookie', 'value': 'page1value'}]) await another_context.addCookies([{'url': server.EMPTY_PAGE, 'name': 'page2cookie', 'value': 'page2value'}]) assert len(await context.cookies()) == 1 assert len(await another_context.cookies()) == 1 await context.clearCookies() assert len(await context.cookies()) == 0 assert len(await another_context.cookies()) == 1 await another_context.clearCookies() assert len(await context.cookies()) == 0 assert len(await another_context.cookies()) == 0 await another_context.close()
""" RedPocket Exceptions """ class RedPocketException(Exception): """Base API Exception""" def __init__(self, message: str = ""): self.message = message class RedPocketAuthError(RedPocketException): """Invalid Account Credentials""" class RedPocketAPIError(RedPocketException): """Error returned from API Call""" def __init__(self, message: str = "", return_code: int = -1): super().__init__(message=message) self.return_code = return_code
""" RedPocket Exceptions """ class Redpocketexception(Exception): """Base API Exception""" def __init__(self, message: str=''): self.message = message class Redpocketautherror(RedPocketException): """Invalid Account Credentials""" class Redpocketapierror(RedPocketException): """Error returned from API Call""" def __init__(self, message: str='', return_code: int=-1): super().__init__(message=message) self.return_code = return_code
# pylint: disable=C0111 __all__ = ["test_dataset", "test_label_smoother", "test_noam_optimizer", "test_tokenizer", "test_transformer", "test_transformer_data_batching", "test_transformer_dataset", "test_transformer_positional_encoder", "test_vocabulary", "test_word2vec", "test_data", "test_cnn"]
__all__ = ['test_dataset', 'test_label_smoother', 'test_noam_optimizer', 'test_tokenizer', 'test_transformer', 'test_transformer_data_batching', 'test_transformer_dataset', 'test_transformer_positional_encoder', 'test_vocabulary', 'test_word2vec', 'test_data', 'test_cnn']
distancia1: float; distancia2: float; distancia3: float; maiorD: float print("Digite as tres distancias: ") distancia1 = float(input()) distancia2 = float(input()) distancia3 = float(input()) if distancia1 > distancia2 and distancia1 > distancia3: maiorD = distancia1 elif distancia2 > distancia3: maiorD = distancia2 else: maiorD = distancia3 print(f"MAIOR DISTANCIA = {maiorD:.2f}")
distancia1: float distancia2: float distancia3: float maior_d: float print('Digite as tres distancias: ') distancia1 = float(input()) distancia2 = float(input()) distancia3 = float(input()) if distancia1 > distancia2 and distancia1 > distancia3: maior_d = distancia1 elif distancia2 > distancia3: maior_d = distancia2 else: maior_d = distancia3 print(f'MAIOR DISTANCIA = {maiorD:.2f}')
# # PySNMP MIB module ALTIGA-GLOBAL-REG (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALTIGA-GLOBAL-REG # Produced by pysmi-0.3.4 at Wed May 1 11:21:16 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") Gauge32, ModuleIdentity, Bits, NotificationType, ObjectIdentity, TimeTicks, MibIdentifier, iso, Integer32, Counter32, Counter64, Unsigned32, IpAddress, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ModuleIdentity", "Bits", "NotificationType", "ObjectIdentity", "TimeTicks", "MibIdentifier", "iso", "Integer32", "Counter32", "Counter64", "Unsigned32", "IpAddress", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") altigaGlobalRegModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 1, 1, 1)) altigaGlobalRegModule.setRevisions(('2005-01-05 00:00', '2003-10-20 00:00', '2003-04-25 00:00', '2002-07-10 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: altigaGlobalRegModule.setRevisionsDescriptions(('Added the new MIB Modules(65 to 67)', 'Added the new MIB Modules(58 to 64)', 'Added the new MIB Modules(54 to 57)', 'Updated with new header',)) if mibBuilder.loadTexts: altigaGlobalRegModule.setLastUpdated('200501050000Z') if mibBuilder.loadTexts: altigaGlobalRegModule.setOrganization('Cisco Systems, Inc.') if mibBuilder.loadTexts: altigaGlobalRegModule.setContactInfo('Cisco Systems 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-cvpn3000@cisco.com') if mibBuilder.loadTexts: altigaGlobalRegModule.setDescription('The Altiga Networks central registration module. Acronyms The following acronyms are used in this document: ACE: Access Control Encryption BwMgmt: Bandwidth Management CTCP: Cisco Transmission Control Protocol DHCP: Dynamic Host Configuration Protocol DNS: Domain Name Service FTP: File Transfer Protocol FW: Firewall HTTP: HyperText Transfer Protocol ICMP: Internet Control Message Protocol IKE: Internet Key Exchange IP: Internet Protocol LBSSF: Load Balance Secure Session Failover L2TP: Layer-2 Tunneling Protocol MIB: Management Information Base NAT: Network Address Translation NTP: Network Time Protocol PPP: Point-to-Point Protocol PPTP: Point-to-Point Tunneling Protocol SEP: Scalable Encryption Processor SNMP: Simple Network Management Protocol SSH: Secure Shell Protocol SSL: Secure Sockets Layer UDP: User Datagram Protocol VPN: Virtual Private Network NAC: Network Admission Control ') altigaRoot = MibIdentifier((1, 3, 6, 1, 4, 1, 3076)) altigaReg = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1)) altigaModules = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1)) alGlobalRegModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 1)) alCapModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 2)) alMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 3)) alComplModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 4)) alVersionMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 6)) alAccessMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 7)) alEventMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 8)) alAuthMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 9)) alPptpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 10)) alPppMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 11)) alHttpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 12)) alIpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 13)) alFilterMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 14)) alUserMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 15)) alTelnetMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 16)) alFtpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 17)) alTftpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 18)) alSnmpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 19)) alIpSecMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 20)) alL2tpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 21)) alSessionMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 22)) alDnsMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 23)) alAddressMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 24)) alDhcpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 25)) alWatchdogMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 26)) alHardwareMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 27)) alNatMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 28)) alLan2LanMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 29)) alGeneralMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30)) alSslMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 31)) alCertMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 32)) alNtpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 33)) alNetworkListMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 34)) alSepMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 35)) alIkeMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 36)) alSyncMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 37)) alT1E1MibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 38)) alMultiLinkMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 39)) alSshMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 40)) alLBSSFMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 41)) alDhcpServerMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 42)) alAutoUpdateMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 43)) alAdminAuthMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 44)) alPPPoEMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 45)) alXmlMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 46)) alCtcpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 47)) alFwMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 48)) alGroupMatchMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 49)) alACEServerMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 50)) alNatTMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 51)) alBwMgmtMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 52)) alIpSecPreFragMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 53)) alFipsMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 54)) alBackupL2LMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 55)) alNotifyMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 56)) alRebootStatusMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 57)) alAuthorizationModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 58)) alWebPortalMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 59)) alWebEmailMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 60)) alPortForwardMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 61)) alRemoteServerMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 62)) alWebvpnAclMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 63)) alNbnsMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 64)) alSecureDesktopMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 65)) alSslTunnelClientMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 66)) alNacMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 67)) altigaGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 2)) altigaProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 3)) altigaCaps = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 4)) altigaReqs = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 5)) altigaExpr = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 6)) altigaHw = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2)) altigaVpnHw = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1)) altigaVpnChassis = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1)) altigaVpnIntf = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 2)) altigaVpnEncrypt = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 3)) vpnConcentrator = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1)) vpnRemote = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 2)) vpnClient = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 3)) vpnConcentratorRev1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1, 1)) if mibBuilder.loadTexts: vpnConcentratorRev1.setStatus('current') if mibBuilder.loadTexts: vpnConcentratorRev1.setDescription("The first revision of Altiga's VPN Concentrator hardware. 603e PPC processor. C10/15/20/30/50/60.") vpnConcentratorRev2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1, 2)) if mibBuilder.loadTexts: vpnConcentratorRev2.setStatus('current') if mibBuilder.loadTexts: vpnConcentratorRev2.setDescription("The second revision of Altiga's VPN Concentrator hardware. 740 PPC processor. C10/15/20/30/50/60.") vpnRemoteRev1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 2, 1)) if mibBuilder.loadTexts: vpnRemoteRev1.setStatus('current') if mibBuilder.loadTexts: vpnRemoteRev1.setDescription("The first revision of Altiga's VPN Concentrator (Remote) hardware. 8240 PPC processor.") vpnClientRev1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 3, 1)) if mibBuilder.loadTexts: vpnClientRev1.setStatus('current') if mibBuilder.loadTexts: vpnClientRev1.setDescription("The first revision of Altiga's VPN Hardware Client hardware. 8260 PPC processor.") mibBuilder.exportSymbols("ALTIGA-GLOBAL-REG", PYSNMP_MODULE_ID=altigaGlobalRegModule, alNatTMibModule=alNatTMibModule, alWebEmailMibModule=alWebEmailMibModule, alEventMibModule=alEventMibModule, alPptpMibModule=alPptpMibModule, alAccessMibModule=alAccessMibModule, alDhcpMibModule=alDhcpMibModule, alIkeMibModule=alIkeMibModule, alHttpMibModule=alHttpMibModule, alSepMibModule=alSepMibModule, alMibModule=alMibModule, altigaVpnHw=altigaVpnHw, altigaExpr=altigaExpr, alHardwareMibModule=alHardwareMibModule, altigaGeneric=altigaGeneric, alRebootStatusMibModule=alRebootStatusMibModule, alSslMibModule=alSslMibModule, alVersionMibModule=alVersionMibModule, altigaVpnChassis=altigaVpnChassis, alSyncMibModule=alSyncMibModule, altigaHw=altigaHw, alPppMibModule=alPppMibModule, vpnRemote=vpnRemote, alGroupMatchMibModule=alGroupMatchMibModule, alNotifyMibModule=alNotifyMibModule, alCapModule=alCapModule, altigaReg=altigaReg, altigaRoot=altigaRoot, altigaReqs=altigaReqs, vpnClient=vpnClient, alIpSecPreFragMibModule=alIpSecPreFragMibModule, alL2tpMibModule=alL2tpMibModule, alAutoUpdateMibModule=alAutoUpdateMibModule, alSshMibModule=alSshMibModule, alSslTunnelClientMibModule=alSslTunnelClientMibModule, alAddressMibModule=alAddressMibModule, alLan2LanMibModule=alLan2LanMibModule, alSecureDesktopMibModule=alSecureDesktopMibModule, alDhcpServerMibModule=alDhcpServerMibModule, altigaVpnEncrypt=altigaVpnEncrypt, alPortForwardMibModule=alPortForwardMibModule, alT1E1MibModule=alT1E1MibModule, alAuthorizationModule=alAuthorizationModule, vpnRemoteRev1=vpnRemoteRev1, vpnConcentratorRev1=vpnConcentratorRev1, alFwMibModule=alFwMibModule, altigaProducts=altigaProducts, alPPPoEMibModule=alPPPoEMibModule, alFilterMibModule=alFilterMibModule, alCertMibModule=alCertMibModule, alTelnetMibModule=alTelnetMibModule, alGlobalRegModule=alGlobalRegModule, alWebPortalMibModule=alWebPortalMibModule, alNacMibModule=alNacMibModule, alCtcpMibModule=alCtcpMibModule, vpnClientRev1=vpnClientRev1, vpnConcentrator=vpnConcentrator, alGeneralMibModule=alGeneralMibModule, alAuthMibModule=alAuthMibModule, alACEServerMibModule=alACEServerMibModule, alNetworkListMibModule=alNetworkListMibModule, altigaCaps=altigaCaps, alWebvpnAclMibModule=alWebvpnAclMibModule, altigaVpnIntf=altigaVpnIntf, alSessionMibModule=alSessionMibModule, alIpSecMibModule=alIpSecMibModule, alFipsMibModule=alFipsMibModule, alTftpMibModule=alTftpMibModule, vpnConcentratorRev2=vpnConcentratorRev2, alSnmpMibModule=alSnmpMibModule, alFtpMibModule=alFtpMibModule, alBackupL2LMibModule=alBackupL2LMibModule, alAdminAuthMibModule=alAdminAuthMibModule, alXmlMibModule=alXmlMibModule, alLBSSFMibModule=alLBSSFMibModule, alWatchdogMibModule=alWatchdogMibModule, alDnsMibModule=alDnsMibModule, alBwMgmtMibModule=alBwMgmtMibModule, altigaModules=altigaModules, alMultiLinkMibModule=alMultiLinkMibModule, alNtpMibModule=alNtpMibModule, alNbnsMibModule=alNbnsMibModule, alRemoteServerMibModule=alRemoteServerMibModule, alNatMibModule=alNatMibModule, altigaGlobalRegModule=altigaGlobalRegModule, alComplModule=alComplModule, alIpMibModule=alIpMibModule, alUserMibModule=alUserMibModule)
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (single_value_constraint, value_size_constraint, constraints_union, value_range_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint', 'ValueSizeConstraint', 'ConstraintsUnion', 'ValueRangeConstraint', 'ConstraintsIntersection') (notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance') (gauge32, module_identity, bits, notification_type, object_identity, time_ticks, mib_identifier, iso, integer32, counter32, counter64, unsigned32, ip_address, enterprises, mib_scalar, mib_table, mib_table_row, mib_table_column) = mibBuilder.importSymbols('SNMPv2-SMI', 'Gauge32', 'ModuleIdentity', 'Bits', 'NotificationType', 'ObjectIdentity', 'TimeTicks', 'MibIdentifier', 'iso', 'Integer32', 'Counter32', 'Counter64', 'Unsigned32', 'IpAddress', 'enterprises', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn') (display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention') altiga_global_reg_module = module_identity((1, 3, 6, 1, 4, 1, 3076, 1, 1, 1, 1)) altigaGlobalRegModule.setRevisions(('2005-01-05 00:00', '2003-10-20 00:00', '2003-04-25 00:00', '2002-07-10 00:00')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: altigaGlobalRegModule.setRevisionsDescriptions(('Added the new MIB Modules(65 to 67)', 'Added the new MIB Modules(58 to 64)', 'Added the new MIB Modules(54 to 57)', 'Updated with new header')) if mibBuilder.loadTexts: altigaGlobalRegModule.setLastUpdated('200501050000Z') if mibBuilder.loadTexts: altigaGlobalRegModule.setOrganization('Cisco Systems, Inc.') if mibBuilder.loadTexts: altigaGlobalRegModule.setContactInfo('Cisco Systems 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-cvpn3000@cisco.com') if mibBuilder.loadTexts: altigaGlobalRegModule.setDescription('The Altiga Networks central registration module. Acronyms The following acronyms are used in this document: ACE: Access Control Encryption BwMgmt: Bandwidth Management CTCP: Cisco Transmission Control Protocol DHCP: Dynamic Host Configuration Protocol DNS: Domain Name Service FTP: File Transfer Protocol FW: Firewall HTTP: HyperText Transfer Protocol ICMP: Internet Control Message Protocol IKE: Internet Key Exchange IP: Internet Protocol LBSSF: Load Balance Secure Session Failover L2TP: Layer-2 Tunneling Protocol MIB: Management Information Base NAT: Network Address Translation NTP: Network Time Protocol PPP: Point-to-Point Protocol PPTP: Point-to-Point Tunneling Protocol SEP: Scalable Encryption Processor SNMP: Simple Network Management Protocol SSH: Secure Shell Protocol SSL: Secure Sockets Layer UDP: User Datagram Protocol VPN: Virtual Private Network NAC: Network Admission Control ') altiga_root = mib_identifier((1, 3, 6, 1, 4, 1, 3076)) altiga_reg = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1)) altiga_modules = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1)) al_global_reg_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 1)) al_cap_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 2)) al_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 3)) al_compl_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 4)) al_version_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 6)) al_access_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 7)) al_event_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 8)) al_auth_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 9)) al_pptp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 10)) al_ppp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 11)) al_http_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 12)) al_ip_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 13)) al_filter_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 14)) al_user_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 15)) al_telnet_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 16)) al_ftp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 17)) al_tftp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 18)) al_snmp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 19)) al_ip_sec_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 20)) al_l2tp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 21)) al_session_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 22)) al_dns_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 23)) al_address_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 24)) al_dhcp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 25)) al_watchdog_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 26)) al_hardware_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 27)) al_nat_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 28)) al_lan2_lan_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 29)) al_general_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30)) al_ssl_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 31)) al_cert_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 32)) al_ntp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 33)) al_network_list_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 34)) al_sep_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 35)) al_ike_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 36)) al_sync_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 37)) al_t1_e1_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 38)) al_multi_link_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 39)) al_ssh_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 40)) al_lbssf_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 41)) al_dhcp_server_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 42)) al_auto_update_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 43)) al_admin_auth_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 44)) al_pp_po_e_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 45)) al_xml_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 46)) al_ctcp_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 47)) al_fw_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 48)) al_group_match_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 49)) al_ace_server_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 50)) al_nat_t_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 51)) al_bw_mgmt_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 52)) al_ip_sec_pre_frag_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 53)) al_fips_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 54)) al_backup_l2_l_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 55)) al_notify_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 56)) al_reboot_status_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 57)) al_authorization_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 58)) al_web_portal_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 59)) al_web_email_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 60)) al_port_forward_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 61)) al_remote_server_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 62)) al_webvpn_acl_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 63)) al_nbns_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 64)) al_secure_desktop_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 65)) al_ssl_tunnel_client_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 66)) al_nac_mib_module = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 67)) altiga_generic = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 2)) altiga_products = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 3)) altiga_caps = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 4)) altiga_reqs = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 5)) altiga_expr = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 6)) altiga_hw = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2)) altiga_vpn_hw = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1)) altiga_vpn_chassis = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1)) altiga_vpn_intf = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 2)) altiga_vpn_encrypt = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 3)) vpn_concentrator = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1)) vpn_remote = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 2)) vpn_client = mib_identifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 3)) vpn_concentrator_rev1 = object_identity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1, 1)) if mibBuilder.loadTexts: vpnConcentratorRev1.setStatus('current') if mibBuilder.loadTexts: vpnConcentratorRev1.setDescription("The first revision of Altiga's VPN Concentrator hardware. 603e PPC processor. C10/15/20/30/50/60.") vpn_concentrator_rev2 = object_identity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1, 2)) if mibBuilder.loadTexts: vpnConcentratorRev2.setStatus('current') if mibBuilder.loadTexts: vpnConcentratorRev2.setDescription("The second revision of Altiga's VPN Concentrator hardware. 740 PPC processor. C10/15/20/30/50/60.") vpn_remote_rev1 = object_identity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 2, 1)) if mibBuilder.loadTexts: vpnRemoteRev1.setStatus('current') if mibBuilder.loadTexts: vpnRemoteRev1.setDescription("The first revision of Altiga's VPN Concentrator (Remote) hardware. 8240 PPC processor.") vpn_client_rev1 = object_identity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 3, 1)) if mibBuilder.loadTexts: vpnClientRev1.setStatus('current') if mibBuilder.loadTexts: vpnClientRev1.setDescription("The first revision of Altiga's VPN Hardware Client hardware. 8260 PPC processor.") mibBuilder.exportSymbols('ALTIGA-GLOBAL-REG', PYSNMP_MODULE_ID=altigaGlobalRegModule, alNatTMibModule=alNatTMibModule, alWebEmailMibModule=alWebEmailMibModule, alEventMibModule=alEventMibModule, alPptpMibModule=alPptpMibModule, alAccessMibModule=alAccessMibModule, alDhcpMibModule=alDhcpMibModule, alIkeMibModule=alIkeMibModule, alHttpMibModule=alHttpMibModule, alSepMibModule=alSepMibModule, alMibModule=alMibModule, altigaVpnHw=altigaVpnHw, altigaExpr=altigaExpr, alHardwareMibModule=alHardwareMibModule, altigaGeneric=altigaGeneric, alRebootStatusMibModule=alRebootStatusMibModule, alSslMibModule=alSslMibModule, alVersionMibModule=alVersionMibModule, altigaVpnChassis=altigaVpnChassis, alSyncMibModule=alSyncMibModule, altigaHw=altigaHw, alPppMibModule=alPppMibModule, vpnRemote=vpnRemote, alGroupMatchMibModule=alGroupMatchMibModule, alNotifyMibModule=alNotifyMibModule, alCapModule=alCapModule, altigaReg=altigaReg, altigaRoot=altigaRoot, altigaReqs=altigaReqs, vpnClient=vpnClient, alIpSecPreFragMibModule=alIpSecPreFragMibModule, alL2tpMibModule=alL2tpMibModule, alAutoUpdateMibModule=alAutoUpdateMibModule, alSshMibModule=alSshMibModule, alSslTunnelClientMibModule=alSslTunnelClientMibModule, alAddressMibModule=alAddressMibModule, alLan2LanMibModule=alLan2LanMibModule, alSecureDesktopMibModule=alSecureDesktopMibModule, alDhcpServerMibModule=alDhcpServerMibModule, altigaVpnEncrypt=altigaVpnEncrypt, alPortForwardMibModule=alPortForwardMibModule, alT1E1MibModule=alT1E1MibModule, alAuthorizationModule=alAuthorizationModule, vpnRemoteRev1=vpnRemoteRev1, vpnConcentratorRev1=vpnConcentratorRev1, alFwMibModule=alFwMibModule, altigaProducts=altigaProducts, alPPPoEMibModule=alPPPoEMibModule, alFilterMibModule=alFilterMibModule, alCertMibModule=alCertMibModule, alTelnetMibModule=alTelnetMibModule, alGlobalRegModule=alGlobalRegModule, alWebPortalMibModule=alWebPortalMibModule, alNacMibModule=alNacMibModule, alCtcpMibModule=alCtcpMibModule, vpnClientRev1=vpnClientRev1, vpnConcentrator=vpnConcentrator, alGeneralMibModule=alGeneralMibModule, alAuthMibModule=alAuthMibModule, alACEServerMibModule=alACEServerMibModule, alNetworkListMibModule=alNetworkListMibModule, altigaCaps=altigaCaps, alWebvpnAclMibModule=alWebvpnAclMibModule, altigaVpnIntf=altigaVpnIntf, alSessionMibModule=alSessionMibModule, alIpSecMibModule=alIpSecMibModule, alFipsMibModule=alFipsMibModule, alTftpMibModule=alTftpMibModule, vpnConcentratorRev2=vpnConcentratorRev2, alSnmpMibModule=alSnmpMibModule, alFtpMibModule=alFtpMibModule, alBackupL2LMibModule=alBackupL2LMibModule, alAdminAuthMibModule=alAdminAuthMibModule, alXmlMibModule=alXmlMibModule, alLBSSFMibModule=alLBSSFMibModule, alWatchdogMibModule=alWatchdogMibModule, alDnsMibModule=alDnsMibModule, alBwMgmtMibModule=alBwMgmtMibModule, altigaModules=altigaModules, alMultiLinkMibModule=alMultiLinkMibModule, alNtpMibModule=alNtpMibModule, alNbnsMibModule=alNbnsMibModule, alRemoteServerMibModule=alRemoteServerMibModule, alNatMibModule=alNatMibModule, altigaGlobalRegModule=altigaGlobalRegModule, alComplModule=alComplModule, alIpMibModule=alIpMibModule, alUserMibModule=alUserMibModule)
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module is part of the nmeta2 suite . It defines a custom traffic classifier . To create your own custom classifier, copy this example to a new file in the same directory and update the code as required. Call it from nmeta by specifying the name of the file (without the .py) in main_policy.yaml . Classifiers are called per packet, so performance is important . """ class Classifier(object): """ A custom classifier module for import by nmeta2 """ def __init__(self, logger): """ Initialise the classifier """ self.logger = logger def classifier(self, flow): """ A really basic statistical classifier to demonstrate ability to differentiate 'bandwidth hog' flows from ones that are more interactive so that appropriate classification metadata can be passed to QoS for differential treatment. . This method is passed a Flow class object that holds the current context of the flow . It returns a dictionary specifying a key/value of QoS treatment to take (or not if no classification determination made). . Only works on TCP. """ #*** Maximum packets to accumulate in a flow before making a #*** classification: _max_packets = 7 #*** Thresholds used in calculations: _max_packet_size_threshold = 1200 _interpacket_ratio_threshold = 0.3 #*** Dictionary to hold classification results: _results = {} if flow.packet_count >= _max_packets and not flow.finalised: #*** Reached our maximum packet count so do some classification: self.logger.debug("Reached max packets count, finalising") flow.finalised = 1 #*** Call functions to get statistics to make decisions on: _max_packet_size = flow.max_packet_size() _max_interpacket_interval = flow.max_interpacket_interval() _min_interpacket_interval = flow.min_interpacket_interval() #*** Avoid possible divide by zero error: if _max_interpacket_interval and _min_interpacket_interval: #*** Ratio between largest directional interpacket delta and #*** smallest. Use a ratio as it accounts for base RTT: _interpacket_ratio = float(_min_interpacket_interval) / \ float(_max_interpacket_interval) else: _interpacket_ratio = 0 self.logger.debug("max_packet_size=%s interpacket_ratio=%s", _max_packet_size, _interpacket_ratio) #*** Decide actions based on the statistics: if (_max_packet_size > _max_packet_size_threshold and _interpacket_ratio < _interpacket_ratio_threshold): #*** This traffic looks like a bandwidth hog so constrain it: _results['qos_treatment'] = 'constrained_bw' else: #*** Doesn't look like bandwidth hog so default priority: _results['qos_treatment'] = 'default_priority' self.logger.debug("Decided on results %s", _results) return _results
""" This module is part of the nmeta2 suite . It defines a custom traffic classifier . To create your own custom classifier, copy this example to a new file in the same directory and update the code as required. Call it from nmeta by specifying the name of the file (without the .py) in main_policy.yaml . Classifiers are called per packet, so performance is important . """ class Classifier(object): """ A custom classifier module for import by nmeta2 """ def __init__(self, logger): """ Initialise the classifier """ self.logger = logger def classifier(self, flow): """ A really basic statistical classifier to demonstrate ability to differentiate 'bandwidth hog' flows from ones that are more interactive so that appropriate classification metadata can be passed to QoS for differential treatment. . This method is passed a Flow class object that holds the current context of the flow . It returns a dictionary specifying a key/value of QoS treatment to take (or not if no classification determination made). . Only works on TCP. """ _max_packets = 7 _max_packet_size_threshold = 1200 _interpacket_ratio_threshold = 0.3 _results = {} if flow.packet_count >= _max_packets and (not flow.finalised): self.logger.debug('Reached max packets count, finalising') flow.finalised = 1 _max_packet_size = flow.max_packet_size() _max_interpacket_interval = flow.max_interpacket_interval() _min_interpacket_interval = flow.min_interpacket_interval() if _max_interpacket_interval and _min_interpacket_interval: _interpacket_ratio = float(_min_interpacket_interval) / float(_max_interpacket_interval) else: _interpacket_ratio = 0 self.logger.debug('max_packet_size=%s interpacket_ratio=%s', _max_packet_size, _interpacket_ratio) if _max_packet_size > _max_packet_size_threshold and _interpacket_ratio < _interpacket_ratio_threshold: _results['qos_treatment'] = 'constrained_bw' else: _results['qos_treatment'] = 'default_priority' self.logger.debug('Decided on results %s', _results) return _results
# Since any modulus should lay between 0 and 101, we can record all # possible modulus at any given point in the calculation. The possible # set of values of next step can be calculated using the previous set. # Since there's guaranteed to be an answer, we will eventually make # modulus 0 possible. We then backtrack to fill in all these operators. N = int(input()) A = list(map(int, input().split())) op = ['*'] * (N - 1) possible = [[None] * 101 for i in range(N)] possible[0][A[0]] = True end = N - 1 for i in range(N - 1): if possible[i][0]: end = i break for x in range(101): if possible[i][x]: possible[i + 1][(x + A[i + 1]) % 101] = ('+', x) possible[i + 1][(x + 101 - A[i + 1]) % 101] = ('-', x) possible[i + 1][(x * A[i + 1]) % 101] = ('*', x) x = 0 for i in range(end, 0, -1): op[i - 1] = possible[i][x][0] x = possible[i][x][1] print(''.join(str(x) for t in zip(A, op) for x in t) + str(A[-1]))
n = int(input()) a = list(map(int, input().split())) op = ['*'] * (N - 1) possible = [[None] * 101 for i in range(N)] possible[0][A[0]] = True end = N - 1 for i in range(N - 1): if possible[i][0]: end = i break for x in range(101): if possible[i][x]: possible[i + 1][(x + A[i + 1]) % 101] = ('+', x) possible[i + 1][(x + 101 - A[i + 1]) % 101] = ('-', x) possible[i + 1][x * A[i + 1] % 101] = ('*', x) x = 0 for i in range(end, 0, -1): op[i - 1] = possible[i][x][0] x = possible[i][x][1] print(''.join((str(x) for t in zip(A, op) for x in t)) + str(A[-1]))
# File: koodous_consts.py # # Copyright (c) 2018-2021 Splunk Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions # and limitations under the License. PHANTOM_ERR_CODE_UNAVAILABLE = "Error code unavailable" PHANTOM_ERR_MSG_UNAVAILABLE = "Unknown error occurred. Please check the asset configuration and|or action parameters." VAULT_ERR_INVALID_VAULT_ID = "Invalid Vault ID" VAULT_ERR_FILE_NOT_FOUND = "Vault file could not be found with supplied Vault ID" KOODOUS_BASE_URL = 'https://api.koodous.com' KOODOUS_SUCC_TEST_CONNECTIVITY = "Test connectivity passed" KOODOUS_ERR_TEST_CONNECTIVITY = "Test Connectivity Failed" KOODOUS_ERR_INVALID_ATTEMPT_PARAM = "Attempts must be integer number. Error: {0}" KOODOUS_ERR_GET_REPORT_PARAMS = "Must specify either 'sha256' or 'vault_id'" KOODOUS_ERR_UPLOADING_URL = "Error retrieving upload URL"
phantom_err_code_unavailable = 'Error code unavailable' phantom_err_msg_unavailable = 'Unknown error occurred. Please check the asset configuration and|or action parameters.' vault_err_invalid_vault_id = 'Invalid Vault ID' vault_err_file_not_found = 'Vault file could not be found with supplied Vault ID' koodous_base_url = 'https://api.koodous.com' koodous_succ_test_connectivity = 'Test connectivity passed' koodous_err_test_connectivity = 'Test Connectivity Failed' koodous_err_invalid_attempt_param = 'Attempts must be integer number. Error: {0}' koodous_err_get_report_params = "Must specify either 'sha256' or 'vault_id'" koodous_err_uploading_url = 'Error retrieving upload URL'
""" Conditional expression Evaluated to one of two expressions depending on a boolean. e.g: result = true_value if condition else false_value """ def sequence_class(immutable): return tuple if immutable else list seq = sequence_class(immutable=True) t = seq("OrHasson") print(t) print(type(t))
""" Conditional expression Evaluated to one of two expressions depending on a boolean. e.g: result = true_value if condition else false_value """ def sequence_class(immutable): return tuple if immutable else list seq = sequence_class(immutable=True) t = seq('OrHasson') print(t) print(type(t))
def print_two(*args): arg1, arg2 =args print(f"arg1 : {arg1},arg2 : {arg2}") def print_two_again(arg1,arg2): print(f"arg1:{arg1},arg2:{arg2}") def print_one(arg1): print(f"arg1:{arg1}") def print_none(): print("I got nothing") print_two("Zed","Shaw") print_two_again("Zed","Shaw") print_one("First!") print_none()
def print_two(*args): (arg1, arg2) = args print(f'arg1 : {arg1},arg2 : {arg2}') def print_two_again(arg1, arg2): print(f'arg1:{arg1},arg2:{arg2}') def print_one(arg1): print(f'arg1:{arg1}') def print_none(): print('I got nothing') print_two('Zed', 'Shaw') print_two_again('Zed', 'Shaw') print_one('First!') print_none()
''' Provide transmission-daemon RPC credentials ''' rpc_ip = '' rpc_port = '' rpc_username = '' rpc_password = ''
""" Provide transmission-daemon RPC credentials """ rpc_ip = '' rpc_port = '' rpc_username = '' rpc_password = ''
''' Kattis - secretchamber Without much execution time pressure along with nodes being characters, we opt to use python with a dict of dicts as our adjacency matrix. This is basically just floyd warshall transitive closure. Time: O(V^3), Mem: O(V^2) ''' n, q = input().split() n = int(n) q = int(q) edges = [] node_names = set() for i in range(n): u, v = input().split() edges.append((u,v)) node_names.add(u) node_names.add(v) adjmat = {} for i in node_names: adjmat[i] = {} for j in node_names: adjmat[i][j] = 0 for u, v in edges: adjmat[u][v] = 1 for k in node_names: for i in node_names: for j in node_names: adjmat[i][j] |= adjmat[i][k] & adjmat[k][j] for _ in range(q): a, b = input().split() if len(a) != len(b): print("no") continue no = 0 for i in range(len(a)): if (a[i] == b[i]): continue if not(a[i] in node_names and b[i] in node_names): no = 1 break if (adjmat[a[i]][b[i]] == 0): no = 1 break if no: print("no") else: print("yes")
""" Kattis - secretchamber Without much execution time pressure along with nodes being characters, we opt to use python with a dict of dicts as our adjacency matrix. This is basically just floyd warshall transitive closure. Time: O(V^3), Mem: O(V^2) """ (n, q) = input().split() n = int(n) q = int(q) edges = [] node_names = set() for i in range(n): (u, v) = input().split() edges.append((u, v)) node_names.add(u) node_names.add(v) adjmat = {} for i in node_names: adjmat[i] = {} for j in node_names: adjmat[i][j] = 0 for (u, v) in edges: adjmat[u][v] = 1 for k in node_names: for i in node_names: for j in node_names: adjmat[i][j] |= adjmat[i][k] & adjmat[k][j] for _ in range(q): (a, b) = input().split() if len(a) != len(b): print('no') continue no = 0 for i in range(len(a)): if a[i] == b[i]: continue if not (a[i] in node_names and b[i] in node_names): no = 1 break if adjmat[a[i]][b[i]] == 0: no = 1 break if no: print('no') else: print('yes')
class Person: def __init__(self, name, age): self.name = name self.age = age maria = Person("Maria Popova", 25) print(hasattr(maria,"name")) print(hasattr(maria,"surname")) print(getattr(maria, "age")) setattr(maria, "surname", "Popova") print(getattr(maria, "surname"))
class Person: def __init__(self, name, age): self.name = name self.age = age maria = person('Maria Popova', 25) print(hasattr(maria, 'name')) print(hasattr(maria, 'surname')) print(getattr(maria, 'age')) setattr(maria, 'surname', 'Popova') print(getattr(maria, 'surname'))
def spiral(steps): dx = 1 dy = 0 dd = 1 x = 0 y = 0 d = 0 for _ in range(steps - 1): x += dx y += dy d += 1 if d == dd: d = 0 tmp = dx dx = -dy dy = tmp if dy == 0: dd += 1 yield x, y def aoc(data): *_, (x, y) = spiral(int(data)) return abs(x) + abs(y)
def spiral(steps): dx = 1 dy = 0 dd = 1 x = 0 y = 0 d = 0 for _ in range(steps - 1): x += dx y += dy d += 1 if d == dd: d = 0 tmp = dx dx = -dy dy = tmp if dy == 0: dd += 1 yield (x, y) def aoc(data): (*_, (x, y)) = spiral(int(data)) return abs(x) + abs(y)
with open('do-plecaka.txt', 'r') as f: dane = [] # getting and cleaning data for line in f: dane.append([int(x) for x in line.split()]) # printing for x in dane: print(x)
with open('do-plecaka.txt', 'r') as f: dane = [] for line in f: dane.append([int(x) for x in line.split()]) for x in dane: print(x)
# measurements in inches ball_radius = 3 goal_top = 50 goal_width = 58 goal_half = 29 angle_threshold = .1 class L_params(object): horizontal_offset = 14.5 vertical_offset = 18.5 min_y = ball_radius - vertical_offset+3 # in robot coords max_y = goal_top - vertical_offset min_x = -14.5 max_x = 14.0 l1 = 11 l2 = 11 shoulder_offset = -60 elbow_offset = 0 angle_threshold = angle_threshold class R_params(object): horizontal_offset = 43.5 vertical_offset = 18.5 min_y = ball_radius - vertical_offset+2 # in robot coords max_y = goal_top - vertical_offset min_x = -14.0 max_x = 14.5 l1 = 11 l2 = 11 shoulder_offset = 0 elbow_offset = 0 angle_threshold = angle_threshold left_arm = L_params() right_arm = R_params() windows_port = "COM8" unix_port = "/dev/tty.usbserial-A4012B2H" ubuntu_port = "/dev/ttyUSB0" num_servos = 4 servo_speed = 500 baudrate = 400000
ball_radius = 3 goal_top = 50 goal_width = 58 goal_half = 29 angle_threshold = 0.1 class L_Params(object): horizontal_offset = 14.5 vertical_offset = 18.5 min_y = ball_radius - vertical_offset + 3 max_y = goal_top - vertical_offset min_x = -14.5 max_x = 14.0 l1 = 11 l2 = 11 shoulder_offset = -60 elbow_offset = 0 angle_threshold = angle_threshold class R_Params(object): horizontal_offset = 43.5 vertical_offset = 18.5 min_y = ball_radius - vertical_offset + 2 max_y = goal_top - vertical_offset min_x = -14.0 max_x = 14.5 l1 = 11 l2 = 11 shoulder_offset = 0 elbow_offset = 0 angle_threshold = angle_threshold left_arm = l_params() right_arm = r_params() windows_port = 'COM8' unix_port = '/dev/tty.usbserial-A4012B2H' ubuntu_port = '/dev/ttyUSB0' num_servos = 4 servo_speed = 500 baudrate = 400000
class Solution: def numDecodings(self, s: str) -> int: if s[0] == '0' or '00' in s: return 0 for idx, _ in enumerate(s): if idx == 0: pre, cur = 1, 1 else: tmp = cur if _ != '0': if s[idx - 1] == '0': cur = tmp pre = tmp elif 0 < int(s[idx - 1] + _) < 27: cur = pre + tmp pre = tmp else: cur = tmp pre = tmp else: if s[idx - 1] > '2': return 0 else: cur = pre pre = tmp return cur
class Solution: def num_decodings(self, s: str) -> int: if s[0] == '0' or '00' in s: return 0 for (idx, _) in enumerate(s): if idx == 0: (pre, cur) = (1, 1) else: tmp = cur if _ != '0': if s[idx - 1] == '0': cur = tmp pre = tmp elif 0 < int(s[idx - 1] + _) < 27: cur = pre + tmp pre = tmp else: cur = tmp pre = tmp elif s[idx - 1] > '2': return 0 else: cur = pre pre = tmp return cur
def find_skew_value(text): length_of_text = len(text) skew_value = 0 skew_value_list = [] for i in range(0, length_of_text): if text[i] == 'C': skew_value = skew_value - 1 elif text[i] == 'G': skew_value = skew_value + 1 skew_value_list.append(skew_value) return text, skew_value_list
def find_skew_value(text): length_of_text = len(text) skew_value = 0 skew_value_list = [] for i in range(0, length_of_text): if text[i] == 'C': skew_value = skew_value - 1 elif text[i] == 'G': skew_value = skew_value + 1 skew_value_list.append(skew_value) return (text, skew_value_list)
# -*- coding: utf-8 -*- """ Created on Wed May 22 10:46:35 2019 @author: SPAD-FCS """ class correlations: pass def selectG(G, selection='average'): """ Return a selection of the autocorrelations ========== =============================================================== Input Meaning ---------- --------------------------------------------------------------- G Object with all autocorrelations, i.e. output of e.g. FCS2CorrSplit selection Default value 'average': select only the autocorrelations that are averaged over multiple time traces. E.g. if FCS2CorrSplit splits a time trace in 10 pieces, calculates G for each trace and then calculates the average G, all autocorrelations are stored in G. This function removes all of them except for the average G. ========== =============================================================== ========== =============================================================== Output Meaning ---------- --------------------------------------------------------------- G Autocorrelation object with only the pixel dwell time and the average autocorrelations stored. All other autocorrelations are removed. ========== =============================================================== """ # get all attributes of G Glist = list(G.__dict__.keys()) if selection == 'average': # make a new list containing only 'average' attributes Glist2 = [s for s in Glist if "average" in s] else: Glist2 = Glist # make a new object with the average attributes Gout = correlations() for i in Glist2: setattr(Gout, i, getattr(G, i)) # add dwell time Gout.dwellTime = G.dwellTime return(Gout)
""" Created on Wed May 22 10:46:35 2019 @author: SPAD-FCS """ class Correlations: pass def select_g(G, selection='average'): """ Return a selection of the autocorrelations ========== =============================================================== Input Meaning ---------- --------------------------------------------------------------- G Object with all autocorrelations, i.e. output of e.g. FCS2CorrSplit selection Default value 'average': select only the autocorrelations that are averaged over multiple time traces. E.g. if FCS2CorrSplit splits a time trace in 10 pieces, calculates G for each trace and then calculates the average G, all autocorrelations are stored in G. This function removes all of them except for the average G. ========== =============================================================== ========== =============================================================== Output Meaning ---------- --------------------------------------------------------------- G Autocorrelation object with only the pixel dwell time and the average autocorrelations stored. All other autocorrelations are removed. ========== =============================================================== """ glist = list(G.__dict__.keys()) if selection == 'average': glist2 = [s for s in Glist if 'average' in s] else: glist2 = Glist gout = correlations() for i in Glist2: setattr(Gout, i, getattr(G, i)) Gout.dwellTime = G.dwellTime return Gout
#!/usr/bin/env python3 etape = 1 compteur = 0 n = 0 while True: print(f"{etape:4d} : {n:5d} { -2+(etape)*(etape+2):6d} ; ", end="") for _ in range(3 + etape): n += 1 print(n, end=" ") compteur += 1 if compteur == 500000: print(n) exit() print() n += etape etape += 1
etape = 1 compteur = 0 n = 0 while True: print(f'{etape:4d} : {n:5d} {-2 + etape * (etape + 2):6d} ; ', end='') for _ in range(3 + etape): n += 1 print(n, end=' ') compteur += 1 if compteur == 500000: print(n) exit() print() n += etape etape += 1
class EPIconst: class FeatureName: pseknc = "pseknc" cksnap = "cksnap" dpcp = "dpcp" eiip = "eiip" kmer = "kmer" tpcp = "tpcp" all = sorted([pseknc, cksnap, dpcp, eiip, kmer, tpcp]) class CellName: K562 = "K562" NHEK = "NHEK" IMR90 = "IMR90" HeLa_S3 = "HeLa-S3" HUVEC = "HUVEC" GM12878 = "GM12878" all = sorted([GM12878, HeLa_S3, HUVEC, IMR90, K562, NHEK]) class MethodName: ensemble = "meta" xgboost = "xgboost" svm = "svm" deepforest = "deepforest" lightgbm = "lightgbm" rf = "rf" all = sorted([lightgbm, rf, xgboost, svm, deepforest]) class ModelInitParams: logistic = {"n_jobs": 13, } mlp = {} deepforest = {"n_jobs": 13, "use_predictor": False, "random_state": 1, "predictor": 'forest', "verbose": 0} lightgbm = {"n_jobs": 13, 'max_depth': -1, 'num_leaves': 31, 'min_child_samples': 20, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'objective': None, 'n_estimators': 100, 'learning_rate': 0.1, 'device': 'gpu', 'boosting_type': 'gbdt', 'class_weight': None, 'importance_type': 'split', 'min_child_weight': 0.001, 'random_state': None, 'subsample_for_bin': 200000, 'silent': True} rf = {"n_jobs": 13, 'n_estimators': 100, "max_depth": None, 'min_samples_split': 2, "min_samples_leaf": 1, 'max_features': 'auto'} svm = {"probability": True} xgboost = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0, 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1, 'use_label_encoder': False, 'eval_metric': 'logloss', 'tree_method': 'gpu_hist'} class BaseModelParams: GM12878_cksnap_deepforest = {"max_layers": 20, "n_estimators": 5, "n_trees": 250} GM12878_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 125, 'min_child_samples': 90, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} GM12878_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} GM12878_cksnap_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} GM12878_cksnap_rf = {'n_estimators': 340, 'max_depth': 114, 'min_samples_leaf': 3, 'min_samples_split': 2, 'max_features': 'sqrt'} "----------------------------------------------" GM12878_dpcp_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 300} GM12878_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 331, 'max_bin': 135, 'min_child_samples': 190, 'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.9, 'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} GM12878_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'} GM12878_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3, 'learning_rate': 0.1} GM12878_dpcp_rf = {'n_estimators': 150, 'max_depth': 88, 'min_samples_leaf': 1, 'min_samples_split': 3, 'max_features': "sqrt"} "----------------------------------------------" GM12878_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} GM12878_eiip_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} GM12878_eiip_rf = {'n_estimators': 280, 'max_depth': None, 'min_samples_leaf': 1, 'min_samples_split': 7, 'max_features': "sqrt"} GM12878_eiip_svm = {'C': 1.0, 'gamma': 2048.0, 'kernel': 'rbf'} GM12878_eiip_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" GM12878_kmer_deepforest = {'max_layers': 25, 'n_estimators': 5, 'n_trees': 400} GM12878_kmer_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40, 'colsample_bytree': 1.0, 'subsample': 0.8, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} GM12878_kmer_rf = {'n_estimators': 170, 'max_depth': 41, 'min_samples_leaf': 3, 'min_samples_split': 2, 'max_features': 'sqrt'} GM12878_kmer_svm = {'C': 2.0, 'gamma': 128.0, 'kernel': 'rbf'} GM12878_kmer_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" GM12878_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400} GM12878_pseknc_lightgbm = {'max_depth': 11, 'num_leaves': 291, 'max_bin': 185, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} GM12878_pseknc_rf = {'n_estimators': 250, 'max_depth': 41, 'min_samples_leaf': 2, 'min_samples_split': 6, 'max_features': 'log2'} GM12878_pseknc_svm = {'C': 0.5, 'gamma': 1024.0, 'kernel': 'rbf'} GM12878_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 1, 'gamma': 0.1, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.1} "----------------------------------------------" GM12878_tpcp_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 100} GM12878_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 175, 'min_child_samples': 80, 'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 20, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} GM12878_tpcp_rf = {'n_estimators': 250, 'max_depth': 89, 'min_samples_leaf': 2, 'min_samples_split': 9, 'max_features': "log2"} GM12878_tpcp_svm = {'C': 16.0, 'gamma': 64.0, 'kernel': 'rbf'} GM12878_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "==============================================" HeLa_S3_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 300} HeLa_S3_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 105, 'min_child_samples': 80, 'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.1, 'reg_lambda': 0.1, 'min_split_gain': 0.4, 'learning_rate': 0.1, 'n_estimators': 150} HeLa_S3_cksnap_svm = {'C': 128.0, 'gamma': 128.0, 'kernel': 'rbf'} HeLa_S3_cksnap_rf = {'n_estimators': 340, 'max_depth': 44, 'min_samples_leaf': 1, 'min_samples_split': 5, 'max_features': 'sqrt'} HeLa_S3_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 3, 'reg_lambda': 0.5, 'learning_rate': 0.1} "----------------------------------------------" HeLa_S3_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 400} HeLa_S3_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 221, 'max_bin': 155, 'min_child_samples': 180, 'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 200} HeLa_S3_dpcp_rf = {'n_estimators': 70, 'max_depth': 32, 'min_samples_leaf': 1, 'min_samples_split': 8, 'max_features': 'sqrt'} HeLa_S3_dpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'} HeLa_S3_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" HeLa_S3_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200} HeLa_S3_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 5, 'min_child_samples': 110, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100} HeLa_S3_eiip_rf = {'n_estimators': 180, 'max_depth': 138, 'min_samples_leaf': 6, 'min_samples_split': 10, 'max_features': 'sqrt'} HeLa_S3_eiip_svm = {'C': 2.0, 'gamma': 1024.0, 'kernel': 'rbf'} HeLa_S3_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" HeLa_S3_kmer_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200} HeLa_S3_kmer_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 165, 'min_child_samples': 90, 'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 0.001, 'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125} HeLa_S3_kmer_rf = {'n_estimators': 240, 'max_depth': 77, 'min_samples_leaf': 2, 'min_samples_split': 2, 'max_features': 'sqrt'} HeLa_S3_kmer_svm = {'C': 8.0, 'gamma': 128.0, 'kernel': 'rbf'} HeLa_S3_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" HeLa_S3_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200} HeLa_S3_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 261, 'max_bin': 25, 'min_child_samples': 90, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} HeLa_S3_pseknc_rf = {'n_estimators': 330, 'max_depth': 118, 'min_samples_leaf': 1, 'min_samples_split': 8, 'max_features': 'log2'} HeLa_S3_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'} HeLa_S3_pseknc_xgboost = {'n_estimators': 750, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.1, 'reg_lambda': 2, 'learning_rate': 0.1} "----------------------------------------------" HeLa_S3_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 250} HeLa_S3_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 341, 'max_bin': 45, 'min_child_samples': 10, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 250} HeLa_S3_tpcp_rf = {'n_estimators': 320, 'max_depth': 99, 'min_samples_leaf': 1, 'min_samples_split': 10, 'max_features': 'sqrt'} HeLa_S3_tpcp_svm = {'C': 4.0, 'gamma': 32.0, 'kernel': 'rbf'} HeLa_S3_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "==============================================" HUVEC_cksnap_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 200} HUVEC_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 271, 'max_bin': 45, 'min_child_samples': 10, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.5, 'reg_lambda': 0.5, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175} HUVEC_cksnap_rf = {'n_estimators': 270, 'max_depth': 38, 'min_samples_leaf': 2, 'min_samples_split': 2, 'max_features': "auto"} HUVEC_cksnap_svm = {'C': 8.0, 'gamma': 64.0, 'kernel': 'rbf'} HUVEC_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" HUVEC_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 400} HUVEC_dpcp_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 245, 'min_child_samples': 30, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.5, 'reg_lambda': 0.3, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} HUVEC_dpcp_rf = {'n_estimators': 300, 'max_depth': 61, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'log2'} HUVEC_dpcp_svm = {'C': 4.0, 'gamma': 16.0, 'kernel': 'rbf'} HUVEC_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3, 'learning_rate': 0.1} "----------------------------------------------" HUVEC_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 300} HUVEC_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} HUVEC_eiip_rf = {'n_estimators': 310, 'max_depth': 28, 'min_samples_leaf': 1, 'min_samples_split': 2, 'max_features': 'sqrt'} HUVEC_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'} HUVEC_eiip_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} "----------------------------------------------" HUVEC_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} HUVEC_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 5, 'min_child_samples': 170, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.5, 'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125} HUVEC_kmer_rf = {'n_estimators': 230, 'max_depth': 59, 'min_samples_leaf': 1, 'min_samples_split': 4, 'max_features': 'auto'} HUVEC_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} HUVEC_kmer_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} "----------------------------------------------" HUVEC_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400} HUVEC_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 115, 'min_child_samples': 190, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175} HUVEC_pseknc_rf = {'n_estimators': 310, 'max_depth': 42, 'min_samples_leaf': 2, 'min_samples_split': 7, 'max_features': 'sqrt'} HUVEC_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'} HUVEC_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" HUVEC_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150} HUVEC_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 35, 'min_child_samples': 190, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} HUVEC_tpcp_rf = {'n_estimators': 330, 'max_depth': 121, 'min_samples_leaf': 2, 'min_samples_split': 5, 'max_features': "sqrt"} HUVEC_tpcp_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'} HUVEC_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "==============================================" IMR90_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 250} IMR90_cksnap_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 95, 'min_child_samples': 60, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.3, 'learning_rate': 0.1, 'n_estimators': 225} IMR90_cksnap_rf = {'n_estimators': 280, 'max_depth': 124, 'min_samples_leaf': 1, 'min_samples_split': 2, 'max_features': 'auto'} IMR90_cksnap_svm = {'C': 16.0, 'gamma': 16.0, 'kernel': 'rbf'} IMR90_cksnap_xgboost = {'n_estimators': 900, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0.4, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0.1, 'learning_rate': 0.1} "----------------------------------------------" IMR90_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} IMR90_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 281, 'max_bin': 115, 'min_child_samples': 20, 'colsample_bytree': 0.7, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.5, 'learning_rate': 0.1, 'n_estimators': 125} IMR90_dpcp_rf = {'n_estimators': 70, 'max_depth': 116, 'min_samples_leaf': 1, 'min_samples_split': 9, 'max_features': 'log2'} IMR90_dpcp_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'} IMR90_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.05, 'reg_lambda': 0.1, 'learning_rate': 0.1} "----------------------------------------------" IMR90_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 350} IMR90_eiip_lightgbm = {'max_depth': 13, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 50, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 80, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.4, 'learning_rate': 0.2, 'n_estimators': 200} IMR90_eiip_rf = {'n_estimators': 240, 'max_depth': 78, 'min_samples_leaf': 1, 'min_samples_split': 2, 'max_features': 'auto'} IMR90_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'} IMR90_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" IMR90_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 250} IMR90_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 175, 'min_child_samples': 120, 'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 0.7, 'reg_lambda': 0.9, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} IMR90_kmer_rf = {'n_estimators': 280, 'max_depth': 79, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'auto'} IMR90_kmer_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'} IMR90_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0.2, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" IMR90_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} IMR90_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 15, 'min_child_samples': 50, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} IMR90_pseknc_rf = {'n_estimators': 240, 'max_depth': 96, 'min_samples_leaf': 3, 'min_samples_split': 4, 'max_features': 'auto'} IMR90_pseknc_svm = {'C': 4.0, 'gamma': 1024.0, 'kernel': 'rbf'} IMR90_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0.2, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" IMR90_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} IMR90_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 35, 'min_child_samples': 60, 'colsample_bytree': 0.6, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 100} IMR90_tpcp_rf = {'n_estimators': 290, 'max_depth': 71, 'min_samples_leaf': 5, 'min_samples_split': 4, 'max_features': 'auto'} IMR90_tpcp_svm = {'C': 1.0, 'gamma': 512.0, 'kernel': 'rbf'} IMR90_tpcp_xgboost = {'n_estimators': 950, 'max_depth': 7, 'min_child_weight': 5, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.5, 'learning_rate': 0.1} "==============================================" K562_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 400} K562_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 225, 'min_child_samples': 60, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 250} K562_cksnap_rf = {'n_estimators': 330, 'max_depth': 109, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'sqrt'} K562_cksnap_svm = {'C': 16.0, 'gamma': 32.0, 'kernel': 'rbf'} K562_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 0.05, 'learning_rate': 0.1} "----------------------------------------------" K562_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 150} K562_dpcp_lightgbm = {'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 1e-05, 'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 225} K562_dpcp_rf = {'n_estimators': 240, 'max_depth': 127, 'min_samples_leaf': 1, 'min_samples_split': 6, 'max_features': 'sqrt'} K562_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'} K562_dpcp_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.05, 'learning_rate': 0.1} "----------------------------------------------" K562_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 150} K562_eiip_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 225, 'min_child_samples': 110, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 150} K562_eiip_rf = {'n_estimators': 120, 'max_depth': 93, 'min_samples_leaf': 3, 'min_samples_split': 3, 'max_features': 'auto'} K562_eiip_svm = {'C': 2.0, 'gamma': 1024.0, 'kernel': 'rbf'} K562_eiip_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0, 'learning_rate': 0.1} "----------------------------------------------" K562_kmer_deepforest = {'max_layers': 15, 'n_estimators': 5, 'n_trees': 150} K562_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 5, 'min_child_samples': 70, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} K562_kmer_rf = {'n_estimators': 290, 'max_depth': 137, 'min_samples_leaf': 10, 'min_samples_split': 7, 'max_features': "auto"} K562_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} K562_kmer_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0, 'learning_rate': 0.1} "----------------------------------------------" K562_pseknc_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 300} K562_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 65, 'min_child_samples': 200, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} K562_pseknc_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 1, 'min_samples_split': 6, 'max_features': 'log2'} K562_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'} K562_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1, 'learning_rate': 0.1} "----------------------------------------------" K562_tpcp_deepforest = {'max_layers': 20, 'n_estimators': 2, 'n_trees': 300} K562_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 105, 'min_child_samples': 130, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} K562_tpcp_rf = {'n_estimators': 280, 'max_depth': 143, 'min_samples_leaf': 5, 'min_samples_split': 2, 'max_features': 'sqrt'} K562_tpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'} K562_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 1, 'learning_rate': 0.1} "==============================================" NHEK_cksnap_deepforest = {"max_layers": 20, "n_estimators": 5, "n_trees": 400} NHEK_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 205, 'min_child_samples': 90, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 75} NHEK_cksnap_rf = {'n_estimators': 300, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 3, 'max_features': 'auto'} NHEK_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} NHEK_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 5, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" NHEK_dpcp_deepforest = {"max_layers": 10, "n_estimators": 8, "n_trees": 200} NHEK_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 301, 'max_bin': 145, 'min_child_samples': 70, 'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.9, 'reg_lambda': 1.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} NHEK_dpcp_rf = {'n_estimators': 300, 'max_depth': 138, 'min_samples_leaf': 1, 'min_samples_split': 5, 'max_features': 'auto'} NHEK_dpcp_svm = {'C': 8.0, 'gamma': 16.0, 'kernel': 'rbf'} NHEK_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 9, 'min_child_weight': 3, 'gamma': 0.5, 'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} "----------------------------------------------" NHEK_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 100} NHEK_eiip_lightgbm = {'max_depth': 11, 'num_leaves': 231, 'max_bin': 255, 'min_child_samples': 70, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} NHEK_eiip_rf = {'n_estimators': 230, 'max_depth': 56, 'min_samples_leaf': 2, 'min_samples_split': 6, 'max_features': 'log2'} NHEK_eiip_svm = {'C': 8.0, 'gamma': 512.0, 'kernel': 'rbf'} NHEK_eiip_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1, 'learning_rate': 0.1} "----------------------------------------------" NHEK_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} NHEK_kmer_lightgbm = {'max_depth': 13, 'num_leaves': 261, 'max_bin': 115, 'min_child_samples': 60, 'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.0, 'reg_lambda': 0.001, 'min_split_gain': 1.0, 'learning_rate': 0.1, 'n_estimators': 150} NHEK_kmer_rf = {'n_estimators': 60, 'max_depth': 117, 'min_samples_leaf': 3, 'min_samples_split': 3, 'max_features': "auto"} NHEK_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} NHEK_kmer_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1, 'learning_rate': 0.1} "----------------------------------------------" NHEK_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150} NHEK_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 271, 'max_bin': 155, 'min_child_samples': 20, 'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 60, 'reg_alpha': 0.1, 'reg_lambda': 1e-05, 'min_split_gain': 0.7, 'learning_rate': 0.1, 'n_estimators': 75} NHEK_pseknc_rf = {'n_estimators': 190, 'max_depth': 85, 'min_samples_leaf': 1, 'min_samples_split': 10, 'max_features': 'auto'} NHEK_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'} NHEK_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.1, 'reg_lambda': 3, 'learning_rate': 0.1} "----------------------------------------------" NHEK_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} NHEK_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 241, 'max_bin': 15, 'min_child_samples': 90, 'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 40, 'reg_alpha': 0.001, 'reg_lambda': 0.001, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100} NHEK_tpcp_rf = {'n_estimators': 120, 'max_depth': 115, 'min_samples_leaf': 1, 'min_samples_split': 4, 'max_features': 'auto'} NHEK_tpcp_svm = {'C': 1.0, 'gamma': 128.0, 'kernel': 'rbf'} NHEK_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.01, 'learning_rate': 0.1} class MetaModelParams: ################# GM12878 ###################### GM12878_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'identity', 'hidden_layer_sizes': 32} GM12878_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'identity', 'hidden_layer_sizes': 8} GM12878_6f5m_prob_logistic = {'C': 2.900000000000001} GM12878_4f2m_prob_logistic = {'C': 0.9000000000000001} GM12878_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400} GM12878_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 200} GM12878_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 200, 'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 30, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 50} GM12878_4f2m_prob_lightgbm = {'max_depth': 11, 'num_leaves': 311, 'max_bin': 85, 'min_child_samples': 150, 'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 75} GM12878_6f5m_prob_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 9, 'min_samples_split': 5, 'max_features': 'auto'} GM12878_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 53, 'min_samples_leaf': 6, 'min_samples_split': 7, 'max_features': 'log2'} GM12878_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'} GM12878_4f2m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'} GM12878_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} GM12878_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.05} ################# HeLa_S3 ###################### HeLa_S3_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'relu', 'hidden_layer_sizes': 32} HeLa_S3_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'relu', 'hidden_layer_sizes': (16, 32)} HeLa_S3_6f5m_prob_logistic = {'C': 1.9000000000000004} HeLa_S3_4f2m_prob_logistic = {'C': 0.5000000000000001} HeLa_S3_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 10, 'n_trees': 400} HeLa_S3_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400} HeLa_S3_6f5m_prob_lightgbm = {'max_depth': 5, 'num_leaves': 281, 'max_bin': 175, 'min_child_samples': 180, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 150} HeLa_S3_4f2m_prob_lightgbm = {'max_depth': 3, 'num_leaves': 311, 'max_bin': 35, 'min_child_samples': 20, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 1.0, 'n_estimators': 125} HeLa_S3_6f5m_prob_rf = {'n_estimators': 130, 'max_depth': 20, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'sqrt'} HeLa_S3_4f2m_prob_rf = {'n_estimators': 210, 'max_depth': 117, 'min_samples_leaf': 2, 'min_samples_split': 5, 'max_features': 'auto'} HeLa_S3_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'} HeLa_S3_4f2m_prob_svm = {'C': 0.25, 'gamma': 0.0625, 'kernel': 'rbf'} HeLa_S3_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05, 'learning_rate': 0.1} HeLa_S3_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05, 'learning_rate': 0.1} ################# HUVEC ######################## HUVEC_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'relu', 'hidden_layer_sizes': 8} HUVEC_4f2m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'tanh', 'hidden_layer_sizes': (8, 16)} HUVEC_6f5m_prob_logistic = {'C': 2.900000000000001} HUVEC_4f2m_prob_logistic = {'C': 0.9000000000000001} HUVEC_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 250} HUVEC_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400} HUVEC_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 311, 'max_bin': 45, 'min_child_samples': 170, 'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 10, 'reg_alpha': 0.0, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} HUVEC_4f2m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 261, 'max_bin': 45, 'min_child_samples': 180, 'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 10, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 200} HUVEC_6f5m_prob_rf = {'n_estimators': 290, 'max_depth': 105, 'min_samples_leaf': 5, 'min_samples_split': 2, 'max_features': 'log2'} HUVEC_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 2, 'max_features': 'log2'} HUVEC_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'} HUVEC_4f2m_prob_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'} HUVEC_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.02, 'learning_rate': 0.05} HUVEC_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.02, 'learning_rate': 0.01} ################# IMR90 ######################## IMR90_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'identity', 'hidden_layer_sizes': (16, 32)} IMR90_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'tanh', 'hidden_layer_sizes': (8, 16)} IMR90_6f5m_prob_logistic = {'C': 2.5000000000000004} IMR90_4f2m_prob_logistic = {'C': 2.5000000000000004} IMR90_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 8, 'n_trees': 300} IMR90_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 200} IMR90_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 85, 'min_child_samples': 70, 'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} IMR90_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 55, 'min_child_samples': 60, 'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 30, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 175} IMR90_6f5m_prob_rf = {'n_estimators': 340, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 3, 'max_features': 'log2'} IMR90_4f2m_prob_rf = {'n_estimators': 270, 'max_depth': 120, 'min_samples_leaf': 10, 'min_samples_split': 7, 'max_features': 'log2'} IMR90_6f5m_prob_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'} IMR90_4f2m_prob_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'} IMR90_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.9, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.05} IMR90_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.07} ################# K562 ######################### K562_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'logistic', 'hidden_layer_sizes': (8, 16)} K562_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'tanh', 'hidden_layer_sizes': 8} K562_6f5m_prob_logistic = {'C': 2.900000000000001} K562_4f2m_prob_logistic = {'C': 0.1} K562_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400} K562_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 300} K562_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 65, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.07, 'n_estimators': 75} K562_4f2m_prob_lightgbm = {'max_depth': 13, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.75, 'n_estimators': 175} K562_6f5m_prob_rf = {'n_estimators': 180, 'max_depth': 35, 'min_samples_leaf': 7, 'min_samples_split': 5, 'max_features': 'log2'} K562_4f2m_prob_rf = {'n_estimators': 80, 'max_depth': 130, 'min_samples_leaf': 6, 'min_samples_split': 5, 'max_features': 'log2'} K562_6f5m_prob_svm = {'C': 0.5, 'gamma': 0.0625, 'kernel': 'rbf'} K562_4f2m_prob_svm = {'C': 1.0, 'gamma': 0.0625, 'kernel': 'rbf'} K562_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.1} K562_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.01} ################# NHEK ######################### NHEK_6f5m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'identity', 'hidden_layer_sizes': 32} NHEK_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'relu', 'hidden_layer_sizes': (16, 32)} NHEK_6f5m_prob_logistic = {'C': 0.9000000000000001} NHEK_4f2m_prob_logistic = {'C': 0.1} NHEK_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 50} NHEK_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 50} NHEK_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 291, 'max_bin': 45, 'min_child_samples': 140, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 1.0, 'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} NHEK_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 35, 'min_child_samples': 100, 'colsample_bytree': 0.8, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.07, 'n_estimators': 100} NHEK_6f5m_prob_rf = {'n_estimators': 70, 'max_depth': 106, 'min_samples_leaf': 10, 'min_samples_split': 9, 'max_features': 'log2'} NHEK_4f2m_prob_rf = {'n_estimators': 130, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 4, 'max_features': 'sqrt'} NHEK_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'} NHEK_4f2m_prob_svm = {'C': 2.0, 'gamma': 16.0, 'kernel': 'rbf'} NHEK_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.07} NHEK_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0.4, 'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0.05, 'reg_lambda': 1, 'learning_rate': 1.0} if __name__ == '_main_': print(getattr(EPIconst.BaseModelParams, "NHEK_tpcp_deepforest"))
class Epiconst: class Featurename: pseknc = 'pseknc' cksnap = 'cksnap' dpcp = 'dpcp' eiip = 'eiip' kmer = 'kmer' tpcp = 'tpcp' all = sorted([pseknc, cksnap, dpcp, eiip, kmer, tpcp]) class Cellname: k562 = 'K562' nhek = 'NHEK' imr90 = 'IMR90' he_la_s3 = 'HeLa-S3' huvec = 'HUVEC' gm12878 = 'GM12878' all = sorted([GM12878, HeLa_S3, HUVEC, IMR90, K562, NHEK]) class Methodname: ensemble = 'meta' xgboost = 'xgboost' svm = 'svm' deepforest = 'deepforest' lightgbm = 'lightgbm' rf = 'rf' all = sorted([lightgbm, rf, xgboost, svm, deepforest]) class Modelinitparams: logistic = {'n_jobs': 13} mlp = {} deepforest = {'n_jobs': 13, 'use_predictor': False, 'random_state': 1, 'predictor': 'forest', 'verbose': 0} lightgbm = {'n_jobs': 13, 'max_depth': -1, 'num_leaves': 31, 'min_child_samples': 20, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'objective': None, 'n_estimators': 100, 'learning_rate': 0.1, 'device': 'gpu', 'boosting_type': 'gbdt', 'class_weight': None, 'importance_type': 'split', 'min_child_weight': 0.001, 'random_state': None, 'subsample_for_bin': 200000, 'silent': True} rf = {'n_jobs': 13, 'n_estimators': 100, 'max_depth': None, 'min_samples_split': 2, 'min_samples_leaf': 1, 'max_features': 'auto'} svm = {'probability': True} xgboost = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0, 'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1, 'use_label_encoder': False, 'eval_metric': 'logloss', 'tree_method': 'gpu_hist'} class Basemodelparams: gm12878_cksnap_deepforest = {'max_layers': 20, 'n_estimators': 5, 'n_trees': 250} gm12878_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 125, 'min_child_samples': 90, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} gm12878_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} gm12878_cksnap_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} gm12878_cksnap_rf = {'n_estimators': 340, 'max_depth': 114, 'min_samples_leaf': 3, 'min_samples_split': 2, 'max_features': 'sqrt'} '----------------------------------------------' gm12878_dpcp_deepforest = {'max_layers': 20, 'n_estimators': 2, 'n_trees': 300} gm12878_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 331, 'max_bin': 135, 'min_child_samples': 190, 'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.9, 'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} gm12878_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'} gm12878_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3, 'learning_rate': 0.1} gm12878_dpcp_rf = {'n_estimators': 150, 'max_depth': 88, 'min_samples_leaf': 1, 'min_samples_split': 3, 'max_features': 'sqrt'} '----------------------------------------------' gm12878_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} gm12878_eiip_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} gm12878_eiip_rf = {'n_estimators': 280, 'max_depth': None, 'min_samples_leaf': 1, 'min_samples_split': 7, 'max_features': 'sqrt'} gm12878_eiip_svm = {'C': 1.0, 'gamma': 2048.0, 'kernel': 'rbf'} gm12878_eiip_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' gm12878_kmer_deepforest = {'max_layers': 25, 'n_estimators': 5, 'n_trees': 400} gm12878_kmer_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40, 'colsample_bytree': 1.0, 'subsample': 0.8, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} gm12878_kmer_rf = {'n_estimators': 170, 'max_depth': 41, 'min_samples_leaf': 3, 'min_samples_split': 2, 'max_features': 'sqrt'} gm12878_kmer_svm = {'C': 2.0, 'gamma': 128.0, 'kernel': 'rbf'} gm12878_kmer_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' gm12878_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400} gm12878_pseknc_lightgbm = {'max_depth': 11, 'num_leaves': 291, 'max_bin': 185, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} gm12878_pseknc_rf = {'n_estimators': 250, 'max_depth': 41, 'min_samples_leaf': 2, 'min_samples_split': 6, 'max_features': 'log2'} gm12878_pseknc_svm = {'C': 0.5, 'gamma': 1024.0, 'kernel': 'rbf'} gm12878_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 1, 'gamma': 0.1, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.1} '----------------------------------------------' gm12878_tpcp_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 100} gm12878_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 175, 'min_child_samples': 80, 'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 20, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} gm12878_tpcp_rf = {'n_estimators': 250, 'max_depth': 89, 'min_samples_leaf': 2, 'min_samples_split': 9, 'max_features': 'log2'} gm12878_tpcp_svm = {'C': 16.0, 'gamma': 64.0, 'kernel': 'rbf'} gm12878_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '==============================================' he_la_s3_cksnap_deepforest = {'max_layers': 20, 'n_estimators': 2, 'n_trees': 300} he_la_s3_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 105, 'min_child_samples': 80, 'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.1, 'reg_lambda': 0.1, 'min_split_gain': 0.4, 'learning_rate': 0.1, 'n_estimators': 150} he_la_s3_cksnap_svm = {'C': 128.0, 'gamma': 128.0, 'kernel': 'rbf'} he_la_s3_cksnap_rf = {'n_estimators': 340, 'max_depth': 44, 'min_samples_leaf': 1, 'min_samples_split': 5, 'max_features': 'sqrt'} he_la_s3_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 3, 'reg_lambda': 0.5, 'learning_rate': 0.1} '----------------------------------------------' he_la_s3_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400} he_la_s3_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 221, 'max_bin': 155, 'min_child_samples': 180, 'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 200} he_la_s3_dpcp_rf = {'n_estimators': 70, 'max_depth': 32, 'min_samples_leaf': 1, 'min_samples_split': 8, 'max_features': 'sqrt'} he_la_s3_dpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'} he_la_s3_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' he_la_s3_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200} he_la_s3_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 5, 'min_child_samples': 110, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100} he_la_s3_eiip_rf = {'n_estimators': 180, 'max_depth': 138, 'min_samples_leaf': 6, 'min_samples_split': 10, 'max_features': 'sqrt'} he_la_s3_eiip_svm = {'C': 2.0, 'gamma': 1024.0, 'kernel': 'rbf'} he_la_s3_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' he_la_s3_kmer_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200} he_la_s3_kmer_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 165, 'min_child_samples': 90, 'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 0.001, 'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125} he_la_s3_kmer_rf = {'n_estimators': 240, 'max_depth': 77, 'min_samples_leaf': 2, 'min_samples_split': 2, 'max_features': 'sqrt'} he_la_s3_kmer_svm = {'C': 8.0, 'gamma': 128.0, 'kernel': 'rbf'} he_la_s3_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' he_la_s3_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200} he_la_s3_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 261, 'max_bin': 25, 'min_child_samples': 90, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} he_la_s3_pseknc_rf = {'n_estimators': 330, 'max_depth': 118, 'min_samples_leaf': 1, 'min_samples_split': 8, 'max_features': 'log2'} he_la_s3_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'} he_la_s3_pseknc_xgboost = {'n_estimators': 750, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.1, 'reg_lambda': 2, 'learning_rate': 0.1} '----------------------------------------------' he_la_s3_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 250} he_la_s3_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 341, 'max_bin': 45, 'min_child_samples': 10, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 250} he_la_s3_tpcp_rf = {'n_estimators': 320, 'max_depth': 99, 'min_samples_leaf': 1, 'min_samples_split': 10, 'max_features': 'sqrt'} he_la_s3_tpcp_svm = {'C': 4.0, 'gamma': 32.0, 'kernel': 'rbf'} he_la_s3_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '==============================================' huvec_cksnap_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} huvec_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 271, 'max_bin': 45, 'min_child_samples': 10, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.5, 'reg_lambda': 0.5, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175} huvec_cksnap_rf = {'n_estimators': 270, 'max_depth': 38, 'min_samples_leaf': 2, 'min_samples_split': 2, 'max_features': 'auto'} huvec_cksnap_svm = {'C': 8.0, 'gamma': 64.0, 'kernel': 'rbf'} huvec_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' huvec_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400} huvec_dpcp_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 245, 'min_child_samples': 30, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.5, 'reg_lambda': 0.3, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} huvec_dpcp_rf = {'n_estimators': 300, 'max_depth': 61, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'log2'} huvec_dpcp_svm = {'C': 4.0, 'gamma': 16.0, 'kernel': 'rbf'} huvec_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3, 'learning_rate': 0.1} '----------------------------------------------' huvec_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 300} huvec_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} huvec_eiip_rf = {'n_estimators': 310, 'max_depth': 28, 'min_samples_leaf': 1, 'min_samples_split': 2, 'max_features': 'sqrt'} huvec_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'} huvec_eiip_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} '----------------------------------------------' huvec_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} huvec_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 5, 'min_child_samples': 170, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.5, 'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125} huvec_kmer_rf = {'n_estimators': 230, 'max_depth': 59, 'min_samples_leaf': 1, 'min_samples_split': 4, 'max_features': 'auto'} huvec_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} huvec_kmer_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} '----------------------------------------------' huvec_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400} huvec_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 115, 'min_child_samples': 190, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175} huvec_pseknc_rf = {'n_estimators': 310, 'max_depth': 42, 'min_samples_leaf': 2, 'min_samples_split': 7, 'max_features': 'sqrt'} huvec_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'} huvec_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' huvec_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150} huvec_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 35, 'min_child_samples': 190, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} huvec_tpcp_rf = {'n_estimators': 330, 'max_depth': 121, 'min_samples_leaf': 2, 'min_samples_split': 5, 'max_features': 'sqrt'} huvec_tpcp_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'} huvec_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '==============================================' imr90_cksnap_deepforest = {'max_layers': 20, 'n_estimators': 2, 'n_trees': 250} imr90_cksnap_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 95, 'min_child_samples': 60, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.3, 'learning_rate': 0.1, 'n_estimators': 225} imr90_cksnap_rf = {'n_estimators': 280, 'max_depth': 124, 'min_samples_leaf': 1, 'min_samples_split': 2, 'max_features': 'auto'} imr90_cksnap_svm = {'C': 16.0, 'gamma': 16.0, 'kernel': 'rbf'} imr90_cksnap_xgboost = {'n_estimators': 900, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0.4, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0.1, 'learning_rate': 0.1} '----------------------------------------------' imr90_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} imr90_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 281, 'max_bin': 115, 'min_child_samples': 20, 'colsample_bytree': 0.7, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.5, 'learning_rate': 0.1, 'n_estimators': 125} imr90_dpcp_rf = {'n_estimators': 70, 'max_depth': 116, 'min_samples_leaf': 1, 'min_samples_split': 9, 'max_features': 'log2'} imr90_dpcp_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'} imr90_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.05, 'reg_lambda': 0.1, 'learning_rate': 0.1} '----------------------------------------------' imr90_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 350} imr90_eiip_lightgbm = {'max_depth': 13, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 50, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 80, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.4, 'learning_rate': 0.2, 'n_estimators': 200} imr90_eiip_rf = {'n_estimators': 240, 'max_depth': 78, 'min_samples_leaf': 1, 'min_samples_split': 2, 'max_features': 'auto'} imr90_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'} imr90_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' imr90_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 250} imr90_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 175, 'min_child_samples': 120, 'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 0.7, 'reg_lambda': 0.9, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} imr90_kmer_rf = {'n_estimators': 280, 'max_depth': 79, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'auto'} imr90_kmer_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'} imr90_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0.2, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' imr90_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} imr90_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 15, 'min_child_samples': 50, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} imr90_pseknc_rf = {'n_estimators': 240, 'max_depth': 96, 'min_samples_leaf': 3, 'min_samples_split': 4, 'max_features': 'auto'} imr90_pseknc_svm = {'C': 4.0, 'gamma': 1024.0, 'kernel': 'rbf'} imr90_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0.2, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' imr90_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300} imr90_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 35, 'min_child_samples': 60, 'colsample_bytree': 0.6, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 100} imr90_tpcp_rf = {'n_estimators': 290, 'max_depth': 71, 'min_samples_leaf': 5, 'min_samples_split': 4, 'max_features': 'auto'} imr90_tpcp_svm = {'C': 1.0, 'gamma': 512.0, 'kernel': 'rbf'} imr90_tpcp_xgboost = {'n_estimators': 950, 'max_depth': 7, 'min_child_weight': 5, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.5, 'learning_rate': 0.1} '==============================================' k562_cksnap_deepforest = {'max_layers': 20, 'n_estimators': 2, 'n_trees': 400} k562_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 225, 'min_child_samples': 60, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 250} k562_cksnap_rf = {'n_estimators': 330, 'max_depth': 109, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'sqrt'} k562_cksnap_svm = {'C': 16.0, 'gamma': 32.0, 'kernel': 'rbf'} k562_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 0.05, 'learning_rate': 0.1} '----------------------------------------------' k562_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150} k562_dpcp_lightgbm = {'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 1e-05, 'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 225} k562_dpcp_rf = {'n_estimators': 240, 'max_depth': 127, 'min_samples_leaf': 1, 'min_samples_split': 6, 'max_features': 'sqrt'} k562_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'} k562_dpcp_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.05, 'learning_rate': 0.1} '----------------------------------------------' k562_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 150} k562_eiip_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 225, 'min_child_samples': 110, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 150} k562_eiip_rf = {'n_estimators': 120, 'max_depth': 93, 'min_samples_leaf': 3, 'min_samples_split': 3, 'max_features': 'auto'} k562_eiip_svm = {'C': 2.0, 'gamma': 1024.0, 'kernel': 'rbf'} k562_eiip_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0, 'learning_rate': 0.1} '----------------------------------------------' k562_kmer_deepforest = {'max_layers': 15, 'n_estimators': 5, 'n_trees': 150} k562_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 5, 'min_child_samples': 70, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} k562_kmer_rf = {'n_estimators': 290, 'max_depth': 137, 'min_samples_leaf': 10, 'min_samples_split': 7, 'max_features': 'auto'} k562_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} k562_kmer_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0, 'learning_rate': 0.1} '----------------------------------------------' k562_pseknc_deepforest = {'max_layers': 15, 'n_estimators': 2, 'n_trees': 300} k562_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 65, 'min_child_samples': 200, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} k562_pseknc_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 1, 'min_samples_split': 6, 'max_features': 'log2'} k562_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'} k562_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1, 'learning_rate': 0.1} '----------------------------------------------' k562_tpcp_deepforest = {'max_layers': 20, 'n_estimators': 2, 'n_trees': 300} k562_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 105, 'min_child_samples': 130, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} k562_tpcp_rf = {'n_estimators': 280, 'max_depth': 143, 'min_samples_leaf': 5, 'min_samples_split': 2, 'max_features': 'sqrt'} k562_tpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'} k562_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 4, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 1, 'learning_rate': 0.1} '==============================================' nhek_cksnap_deepforest = {'max_layers': 20, 'n_estimators': 5, 'n_trees': 400} nhek_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 205, 'min_child_samples': 90, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 75} nhek_cksnap_rf = {'n_estimators': 300, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 3, 'max_features': 'auto'} nhek_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} nhek_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 5, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' nhek_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 8, 'n_trees': 200} nhek_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 301, 'max_bin': 145, 'min_child_samples': 70, 'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.9, 'reg_lambda': 1.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150} nhek_dpcp_rf = {'n_estimators': 300, 'max_depth': 138, 'min_samples_leaf': 1, 'min_samples_split': 5, 'max_features': 'auto'} nhek_dpcp_svm = {'C': 8.0, 'gamma': 16.0, 'kernel': 'rbf'} nhek_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 9, 'min_child_weight': 3, 'gamma': 0.5, 'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1, 'learning_rate': 0.1} '----------------------------------------------' nhek_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 100} nhek_eiip_lightgbm = {'max_depth': 11, 'num_leaves': 231, 'max_bin': 255, 'min_child_samples': 70, 'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} nhek_eiip_rf = {'n_estimators': 230, 'max_depth': 56, 'min_samples_leaf': 2, 'min_samples_split': 6, 'max_features': 'log2'} nhek_eiip_svm = {'C': 8.0, 'gamma': 512.0, 'kernel': 'rbf'} nhek_eiip_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1, 'learning_rate': 0.1} '----------------------------------------------' nhek_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} nhek_kmer_lightgbm = {'max_depth': 13, 'num_leaves': 261, 'max_bin': 115, 'min_child_samples': 60, 'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.0, 'reg_lambda': 0.001, 'min_split_gain': 1.0, 'learning_rate': 0.1, 'n_estimators': 150} nhek_kmer_rf = {'n_estimators': 60, 'max_depth': 117, 'min_samples_leaf': 3, 'min_samples_split': 3, 'max_features': 'auto'} nhek_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'} nhek_kmer_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1, 'learning_rate': 0.1} '----------------------------------------------' nhek_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150} nhek_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 271, 'max_bin': 155, 'min_child_samples': 20, 'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 60, 'reg_alpha': 0.1, 'reg_lambda': 1e-05, 'min_split_gain': 0.7, 'learning_rate': 0.1, 'n_estimators': 75} nhek_pseknc_rf = {'n_estimators': 190, 'max_depth': 85, 'min_samples_leaf': 1, 'min_samples_split': 10, 'max_features': 'auto'} nhek_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'} nhek_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.1, 'reg_lambda': 3, 'learning_rate': 0.1} '----------------------------------------------' nhek_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 200} nhek_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 241, 'max_bin': 15, 'min_child_samples': 90, 'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 40, 'reg_alpha': 0.001, 'reg_lambda': 0.001, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100} nhek_tpcp_rf = {'n_estimators': 120, 'max_depth': 115, 'min_samples_leaf': 1, 'min_samples_split': 4, 'max_features': 'auto'} nhek_tpcp_svm = {'C': 1.0, 'gamma': 128.0, 'kernel': 'rbf'} nhek_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.01, 'learning_rate': 0.1} class Metamodelparams: gm12878_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'identity', 'hidden_layer_sizes': 32} gm12878_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'identity', 'hidden_layer_sizes': 8} gm12878_6f5m_prob_logistic = {'C': 2.900000000000001} gm12878_4f2m_prob_logistic = {'C': 0.9000000000000001} gm12878_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400} gm12878_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 200} gm12878_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 200, 'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 30, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 50} gm12878_4f2m_prob_lightgbm = {'max_depth': 11, 'num_leaves': 311, 'max_bin': 85, 'min_child_samples': 150, 'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 75} gm12878_6f5m_prob_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 9, 'min_samples_split': 5, 'max_features': 'auto'} gm12878_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 53, 'min_samples_leaf': 6, 'min_samples_split': 7, 'max_features': 'log2'} gm12878_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'} gm12878_4f2m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'} gm12878_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.1} gm12878_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.05} he_la_s3_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'relu', 'hidden_layer_sizes': 32} he_la_s3_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'relu', 'hidden_layer_sizes': (16, 32)} he_la_s3_6f5m_prob_logistic = {'C': 1.9000000000000004} he_la_s3_4f2m_prob_logistic = {'C': 0.5000000000000001} he_la_s3_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 10, 'n_trees': 400} he_la_s3_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400} he_la_s3_6f5m_prob_lightgbm = {'max_depth': 5, 'num_leaves': 281, 'max_bin': 175, 'min_child_samples': 180, 'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 150} he_la_s3_4f2m_prob_lightgbm = {'max_depth': 3, 'num_leaves': 311, 'max_bin': 35, 'min_child_samples': 20, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 1.0, 'n_estimators': 125} he_la_s3_6f5m_prob_rf = {'n_estimators': 130, 'max_depth': 20, 'min_samples_leaf': 2, 'min_samples_split': 3, 'max_features': 'sqrt'} he_la_s3_4f2m_prob_rf = {'n_estimators': 210, 'max_depth': 117, 'min_samples_leaf': 2, 'min_samples_split': 5, 'max_features': 'auto'} he_la_s3_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'} he_la_s3_4f2m_prob_svm = {'C': 0.25, 'gamma': 0.0625, 'kernel': 'rbf'} he_la_s3_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05, 'learning_rate': 0.1} he_la_s3_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05, 'learning_rate': 0.1} huvec_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'relu', 'hidden_layer_sizes': 8} huvec_4f2m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'tanh', 'hidden_layer_sizes': (8, 16)} huvec_6f5m_prob_logistic = {'C': 2.900000000000001} huvec_4f2m_prob_logistic = {'C': 0.9000000000000001} huvec_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 250} huvec_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400} huvec_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 311, 'max_bin': 45, 'min_child_samples': 170, 'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 10, 'reg_alpha': 0.0, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100} huvec_4f2m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 261, 'max_bin': 45, 'min_child_samples': 180, 'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 10, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 200} huvec_6f5m_prob_rf = {'n_estimators': 290, 'max_depth': 105, 'min_samples_leaf': 5, 'min_samples_split': 2, 'max_features': 'log2'} huvec_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 2, 'max_features': 'log2'} huvec_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'} huvec_4f2m_prob_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'} huvec_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.02, 'learning_rate': 0.05} huvec_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.02, 'learning_rate': 0.01} imr90_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'identity', 'hidden_layer_sizes': (16, 32)} imr90_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'tanh', 'hidden_layer_sizes': (8, 16)} imr90_6f5m_prob_logistic = {'C': 2.5000000000000004} imr90_4f2m_prob_logistic = {'C': 2.5000000000000004} imr90_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 8, 'n_trees': 300} imr90_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 200} imr90_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 85, 'min_child_samples': 70, 'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250} imr90_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 55, 'min_child_samples': 60, 'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 30, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 175} imr90_6f5m_prob_rf = {'n_estimators': 340, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 3, 'max_features': 'log2'} imr90_4f2m_prob_rf = {'n_estimators': 270, 'max_depth': 120, 'min_samples_leaf': 10, 'min_samples_split': 7, 'max_features': 'log2'} imr90_6f5m_prob_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'} imr90_4f2m_prob_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'} imr90_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.9, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.05} imr90_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.07} k562_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'logistic', 'hidden_layer_sizes': (8, 16)} k562_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'tanh', 'hidden_layer_sizes': 8} k562_6f5m_prob_logistic = {'C': 2.900000000000001} k562_4f2m_prob_logistic = {'C': 0.1} k562_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400} k562_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 300} k562_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 65, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 1e-05, 'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.07, 'n_estimators': 75} k562_4f2m_prob_lightgbm = {'max_depth': 13, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.75, 'n_estimators': 175} k562_6f5m_prob_rf = {'n_estimators': 180, 'max_depth': 35, 'min_samples_leaf': 7, 'min_samples_split': 5, 'max_features': 'log2'} k562_4f2m_prob_rf = {'n_estimators': 80, 'max_depth': 130, 'min_samples_leaf': 6, 'min_samples_split': 5, 'max_features': 'log2'} k562_6f5m_prob_svm = {'C': 0.5, 'gamma': 0.0625, 'kernel': 'rbf'} k562_4f2m_prob_svm = {'C': 1.0, 'gamma': 0.0625, 'kernel': 'rbf'} k562_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 6, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.1} k562_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0, 'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01, 'learning_rate': 0.01} nhek_6f5m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs', 'activation': 'identity', 'hidden_layer_sizes': 32} nhek_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd', 'activation': 'relu', 'hidden_layer_sizes': (16, 32)} nhek_6f5m_prob_logistic = {'C': 0.9000000000000001} nhek_4f2m_prob_logistic = {'C': 0.1} nhek_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 50} nhek_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 50} nhek_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 291, 'max_bin': 45, 'min_child_samples': 140, 'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 1.0, 'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200} nhek_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 35, 'min_child_samples': 100, 'colsample_bytree': 0.8, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0, 'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.07, 'n_estimators': 100} nhek_6f5m_prob_rf = {'n_estimators': 70, 'max_depth': 106, 'min_samples_leaf': 10, 'min_samples_split': 9, 'max_features': 'log2'} nhek_4f2m_prob_rf = {'n_estimators': 130, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 4, 'max_features': 'sqrt'} nhek_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'} nhek_4f2m_prob_svm = {'C': 2.0, 'gamma': 16.0, 'kernel': 'rbf'} nhek_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0, 'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0, 'learning_rate': 0.07} nhek_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0.4, 'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0.05, 'reg_lambda': 1, 'learning_rate': 1.0} if __name__ == '_main_': print(getattr(EPIconst.BaseModelParams, 'NHEK_tpcp_deepforest'))
# Functions can encapsulate functionality you want to reuse: def even_odd(x): if x % 2 == 0: print("even") else: print("odd") # reused even_odd(2) even_odd(4) even_odd(7) even_odd(22) even_odd(8) # output should be >>> even, even, odd, even, even
def even_odd(x): if x % 2 == 0: print('even') else: print('odd') even_odd(2) even_odd(4) even_odd(7) even_odd(22) even_odd(8)
version = '0.11.0' version_cmd = 'confd -version' download_url = 'https://github.com/kelseyhightower/confd/releases/download/vVERSION/confd-VERSION-linux-amd64' install_script = """ chmod +x confd-VERSION-linux-amd64 mv -f confd-VERSION-linux-amd64 /usr/local/bin/confd """
version = '0.11.0' version_cmd = 'confd -version' download_url = 'https://github.com/kelseyhightower/confd/releases/download/vVERSION/confd-VERSION-linux-amd64' install_script = '\nchmod +x confd-VERSION-linux-amd64\nmv -f confd-VERSION-linux-amd64 /usr/local/bin/confd\n'
class Sensors: def __init__(self, **kwargs): self.sensor_data_dictionary = kwargs def update(self, **kwargs): self.sensor_data_dictionary = kwargs def get_value(self, key): return (self.sensor_data_dictionary.get(key))
class Sensors: def __init__(self, **kwargs): self.sensor_data_dictionary = kwargs def update(self, **kwargs): self.sensor_data_dictionary = kwargs def get_value(self, key): return self.sensor_data_dictionary.get(key)
# https://www.google.com/webhp?sourceid=chrome- # instant&ion=1&espv=2&ie=UTF-8#q=dp%20coin%20change def coin_change_recur(coins,n,change_sum): # If sum is 0 there exists a solution with no coins if change_sum == 0: return 1 # if sum is less then 0 no solution exists if change_sum < 0: return 0 # if there is no coins left and sum is not 0 then no solution # exists if n <= 0 and change_sum > 0: return 0 # counts the solution including the coins[n-1] and excluding the coins[n-1] return (coin_change_recur(coins,n-1,change_sum) + coin_change_recur(coins,n,change_sum - coins[n-1])) # To hold the results that has been already computed memo_dict = {} def coin_change_memo(coins,n,change_sum): # Check if we have already computed for the current change_sum if change_sum in memo_dict: return memo_dict[change_sum] # If sum is 0 there exists a solution with no coins if change_sum == 0: return 1 # if sum is less then 0 no solution exists if change_sum < 0: return 0 # if thhere are no coins left and sum is not 0 then no solution exists if n <= 0 and change_sum > 0: return 0 # count the solution inclusding coins[n-1] and excluding coins[n-1] count = (coin_change_memo(coins,n-1,change_sum) + coin_change_memo(coins,n,change_sum - coins[n-1])) #memo_dict[change_sum] = count return count def coin_change_bottom_up(coins,change_sum): coins_len = len(coins) T = [[0] * (coins_len) for i in range(change_sum + 1)] # Initialize the base case : getting sum 0 for i in range(coins_len): T[0][i] = 1 for i in range(1, change_sum + 1): for j in range(coins_len): # Solutions including coins[j] x = T[i - coins[j]][j] if i >= coins[j] else 0 # Solutions excluding coins[j] y = T[i][j-1] if j >= 1 else 0 # total count T[i][j] = x + y return T[change_sum][coins_len - 1] if __name__ == "__main__": coins = [1,2,3] print("Number of ways to make change: ", coin_change_recur(coins,len(coins),4)) print("Number of ways to make change: ", coin_change_memo(coins,len(coins),4)) print("Number of ways to make change: ", coin_change_bottom_up(coins,4))
def coin_change_recur(coins, n, change_sum): if change_sum == 0: return 1 if change_sum < 0: return 0 if n <= 0 and change_sum > 0: return 0 return coin_change_recur(coins, n - 1, change_sum) + coin_change_recur(coins, n, change_sum - coins[n - 1]) memo_dict = {} def coin_change_memo(coins, n, change_sum): if change_sum in memo_dict: return memo_dict[change_sum] if change_sum == 0: return 1 if change_sum < 0: return 0 if n <= 0 and change_sum > 0: return 0 count = coin_change_memo(coins, n - 1, change_sum) + coin_change_memo(coins, n, change_sum - coins[n - 1]) return count def coin_change_bottom_up(coins, change_sum): coins_len = len(coins) t = [[0] * coins_len for i in range(change_sum + 1)] for i in range(coins_len): T[0][i] = 1 for i in range(1, change_sum + 1): for j in range(coins_len): x = T[i - coins[j]][j] if i >= coins[j] else 0 y = T[i][j - 1] if j >= 1 else 0 T[i][j] = x + y return T[change_sum][coins_len - 1] if __name__ == '__main__': coins = [1, 2, 3] print('Number of ways to make change: ', coin_change_recur(coins, len(coins), 4)) print('Number of ways to make change: ', coin_change_memo(coins, len(coins), 4)) print('Number of ways to make change: ', coin_change_bottom_up(coins, 4))
def calc_fuel(mass: int): return max(mass // 3 - 2, 0) def calc_fuel_rec(mass: int): fuel = calc_fuel(mass) if fuel == 0: return fuel else: return fuel + calc_fuel_rec(fuel)
def calc_fuel(mass: int): return max(mass // 3 - 2, 0) def calc_fuel_rec(mass: int): fuel = calc_fuel(mass) if fuel == 0: return fuel else: return fuel + calc_fuel_rec(fuel)
MODEL_TYPE = { "PointRend" : 1, "MobileNetV3Large" : 2, "MobileNetV3Small" : 3 } TASK_TYPE = { "Object Detection" : 1, "Instance Segmentation (Map)" : 2, "Instance Segmentation (Blend)" : 3 } """ 0 : No Ml model to run 1 : Object Detection : PointRend 2 : Instance Detection (Map) : MobileV3Large 3 : Instance Detection (Blend) : MobileV3Large """
model_type = {'PointRend': 1, 'MobileNetV3Large': 2, 'MobileNetV3Small': 3} task_type = {'Object Detection': 1, 'Instance Segmentation (Map)': 2, 'Instance Segmentation (Blend)': 3} '\n0 : No Ml model to run \n1 : Object Detection : PointRend\n2 : Instance Detection (Map) : MobileV3Large \n3 : Instance Detection (Blend) : MobileV3Large \n'
class Cls: x = "a" d = {"a": "ab"} cl = Cls() cl.x = "b" d[cl.x]
class Cls: x = 'a' d = {'a': 'ab'} cl = cls() cl.x = 'b' d[cl.x]
#Copyright 2018 Infosys Ltd. #Use of this source code is governed by Apache 2.0 license that can be found in the LICENSE file or at #http://www.apache.org/licenses/LICENSE-2.0 . ####DATABASE QUERY STATUS CODES#### CON000 = 'CON000' # Successfull database connection CON001 = 'CON001' # Failed to connect to database EXE000 = 'EXE000' # Successful query execution EXE001 = 'EXE001' # Query Execution failure
con000 = 'CON000' con001 = 'CON001' exe000 = 'EXE000' exe001 = 'EXE001'
#a = int(input()) #b = int(input()) entrada = input() a, b = entrada.split(" ") a = int(a) b = int(b) if(a > b): if(a%b == 0): print ("Sao Multiplos") else: print("Nao sao Multiplos") else: if(b%a == 0): print("Sao Multiplos") else: print("Nao sao Multiplos")
entrada = input() (a, b) = entrada.split(' ') a = int(a) b = int(b) if a > b: if a % b == 0: print('Sao Multiplos') else: print('Nao sao Multiplos') elif b % a == 0: print('Sao Multiplos') else: print('Nao sao Multiplos')
class Solution: def orangesRotting(self, grid: List[List[int]]) -> int: d=0 while True: c=False old=[] for i in range(len(grid)): s=[] for j in range(len(grid[0])): s.append(grid[i][j]) old.append(s) for i in range(len(grid)): for j in range(len(grid[0])): if old[i][j]==2: f=self.change(grid,i,j) if f: c=True if c==False: break else: d=d+1 for i in range(len(grid)): for j in range(len(grid[0])): if grid[i][j]==1: return -1 return d def change(self,grid,i,j): r=False for ti,tj in zip([-1,0,0,1],[0,-1,1,0]): if ti+i>=0 and ti+i<len(grid) and tj+j>=0 and tj+j<len(grid[0]): if grid[ti+i][tj+j]==1: grid[ti+i][tj+j]=2 r=True return r
class Solution: def oranges_rotting(self, grid: List[List[int]]) -> int: d = 0 while True: c = False old = [] for i in range(len(grid)): s = [] for j in range(len(grid[0])): s.append(grid[i][j]) old.append(s) for i in range(len(grid)): for j in range(len(grid[0])): if old[i][j] == 2: f = self.change(grid, i, j) if f: c = True if c == False: break else: d = d + 1 for i in range(len(grid)): for j in range(len(grid[0])): if grid[i][j] == 1: return -1 return d def change(self, grid, i, j): r = False for (ti, tj) in zip([-1, 0, 0, 1], [0, -1, 1, 0]): if ti + i >= 0 and ti + i < len(grid) and (tj + j >= 0) and (tj + j < len(grid[0])): if grid[ti + i][tj + j] == 1: grid[ti + i][tj + j] = 2 r = True return r
class Solution: def binaryGap(self, n: int) -> int: a = str(bin(n)) a = a[2:] dis = [] c = 0 for i in range(len(a)): if a[i] == "1": dis.append(c) c = 0 c +=1 return max(dis)
class Solution: def binary_gap(self, n: int) -> int: a = str(bin(n)) a = a[2:] dis = [] c = 0 for i in range(len(a)): if a[i] == '1': dis.append(c) c = 0 c += 1 return max(dis)
class Suggestion: def __init__(self, obj=None): """ See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-response" """ self.text = obj.get('text', None) self.street_line = obj.get('street_line', None) self.city = obj.get('city', None) self.state = obj.get('state', None)
class Suggestion: def __init__(self, obj=None): """ See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-response" """ self.text = obj.get('text', None) self.street_line = obj.get('street_line', None) self.city = obj.get('city', None) self.state = obj.get('state', None)
# coding: utf-8 # In[1]: #num01_SwethaMJ.py sum_ = 0 for i in range(1,1000): if i%3==0 or i%5==0: sum_ += i print(sum_)
sum_ = 0 for i in range(1, 1000): if i % 3 == 0 or i % 5 == 0: sum_ += i print(sum_)
# M6 #2 str = 'inet addr:127.0.0.1 Mask:255.0.0.0' index = str.find(':') if index > 0: # clip off the front str1 = str[index+1:] i = str1.find(' ') addr = str1[:i].rstrip() # addr is the inet address print('Address: ', addr)
str = 'inet addr:127.0.0.1 Mask:255.0.0.0' index = str.find(':') if index > 0: str1 = str[index + 1:] i = str1.find(' ') addr = str1[:i].rstrip() print('Address: ', addr)
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls]
class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls]
class Solution: def romanToInt(self, s: str) -> int: dic = {"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000} res = 0 while s: letter = s[0] if len(s)==1: res+=dic[letter] return res if dic[letter]>=dic[s[1]]: res+=dic[letter] s = s[1:] elif dic[letter]<dic[s[1]]: res +=dic[s[1]]-dic[letter] s=s[2:] return res
class Solution: def roman_to_int(self, s: str) -> int: dic = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} res = 0 while s: letter = s[0] if len(s) == 1: res += dic[letter] return res if dic[letter] >= dic[s[1]]: res += dic[letter] s = s[1:] elif dic[letter] < dic[s[1]]: res += dic[s[1]] - dic[letter] s = s[2:] return res
class blank (object): def __init__ (self): object.__init__ (self) deployment_settings = blank () # Web2py Settings deployment_settings.web2py = blank () deployment_settings.web2py.port = 8000 # Database settings deployment_settings.database = blank () deployment_settings.database.db_type = "sqlite" deployment_settings.database.host = "localhost" deployment_settings.database.port = None # use default deployment_settings.database.database = "healthscapes" deployment_settings.database.username = "hs" deployment_settings.database.password = "hs" deployment_settings.database.pool_size = 30 # MongoDB Settings deployment_settings.mongodb = blank () deployment_settings.mongodb.host = None deployment_settings.mongodb.port = 27017 deployment_settings.mongodb.db = 'mongo_db' deployment_settings.mongodb.username = 'mongo' deployment_settings.mongodb.password = 'mongo' # PostGIS Settings deployment_settings.postgis = blank () deployment_settings.postgis.host = None deployment_settings.postgis.port = 5432 deployment_settings.postgis.database = "geodata" deployment_settings.postgis.username = "postgis" deployment_settings.postgis.password = "postgis" deployment_settings.postgis.pool_size = 10 deployment_settings.geoserver_sources = [] # Upload Geoserver Settings deployment_settings.geoserver = blank () deployment_settings.geoserver.host = 'http://localhost' deployment_settings.geoserver.port = 8888 deployment_settings.geoserver.username = "admin" deployment_settings.geoserver.password = "geoserver" deployment_settings.geoserver.workspace = 'hsd' deployment_settings.geoserver.pgis_store = 'test' # NPR Settings deployment_settings.npr = blank () deployment_settings.npr.key = 'MDA2OTc4ODY2MDEyOTc0NTMyMjFmZGNjZg001' deployment_settings.data = blank () deployment_settings.data.base_table = 'datatypes' # Development Mode deployment_settings.dev_mode = blank () deployment_settings.dev_mode.enabled = False deployment_settings.dev_mode.firstname = 'First' deployment_settings.dev_mode.lastname = 'Last' deployment_settings.dev_mode.email = 'fake@gmail.com' # ExtJS Settings deployment_settings.extjs = blank () deployment_settings.extjs.location = 'http://skapes.org/media/js/ext'
class Blank(object): def __init__(self): object.__init__(self) deployment_settings = blank() deployment_settings.web2py = blank() deployment_settings.web2py.port = 8000 deployment_settings.database = blank() deployment_settings.database.db_type = 'sqlite' deployment_settings.database.host = 'localhost' deployment_settings.database.port = None deployment_settings.database.database = 'healthscapes' deployment_settings.database.username = 'hs' deployment_settings.database.password = 'hs' deployment_settings.database.pool_size = 30 deployment_settings.mongodb = blank() deployment_settings.mongodb.host = None deployment_settings.mongodb.port = 27017 deployment_settings.mongodb.db = 'mongo_db' deployment_settings.mongodb.username = 'mongo' deployment_settings.mongodb.password = 'mongo' deployment_settings.postgis = blank() deployment_settings.postgis.host = None deployment_settings.postgis.port = 5432 deployment_settings.postgis.database = 'geodata' deployment_settings.postgis.username = 'postgis' deployment_settings.postgis.password = 'postgis' deployment_settings.postgis.pool_size = 10 deployment_settings.geoserver_sources = [] deployment_settings.geoserver = blank() deployment_settings.geoserver.host = 'http://localhost' deployment_settings.geoserver.port = 8888 deployment_settings.geoserver.username = 'admin' deployment_settings.geoserver.password = 'geoserver' deployment_settings.geoserver.workspace = 'hsd' deployment_settings.geoserver.pgis_store = 'test' deployment_settings.npr = blank() deployment_settings.npr.key = 'MDA2OTc4ODY2MDEyOTc0NTMyMjFmZGNjZg001' deployment_settings.data = blank() deployment_settings.data.base_table = 'datatypes' deployment_settings.dev_mode = blank() deployment_settings.dev_mode.enabled = False deployment_settings.dev_mode.firstname = 'First' deployment_settings.dev_mode.lastname = 'Last' deployment_settings.dev_mode.email = 'fake@gmail.com' deployment_settings.extjs = blank() deployment_settings.extjs.location = 'http://skapes.org/media/js/ext'
def insertion_sort(to_sort): i=0 while i <= len(to_sort)-1: hole = i; item = to_sort[i] while hole > 0 and to_sort[hole-1] > item: to_sort[hole] = to_sort[hole-1] hole-=1 to_sort[hole] = item i+=1 return to_sort
def insertion_sort(to_sort): i = 0 while i <= len(to_sort) - 1: hole = i item = to_sort[i] while hole > 0 and to_sort[hole - 1] > item: to_sort[hole] = to_sort[hole - 1] hole -= 1 to_sort[hole] = item i += 1 return to_sort
load_modules = { 'hw_USBtin': {'port':'auto', 'speed':500}, # IO hardware module # Module for sniff and replay 'mod_stat': {"bus":'mod_stat','debug':2},'mod_stat~2': {"bus":'mod_stat'}, 'mod_firewall': {}, 'mod_fuzz1':{'debug':2}, 'gen_replay': {'debug': 1}# Stats } # Now let's describe the logic of this test actions = [ {'hw_USBtin': {'action': 'read','pipe': 1}}, # Read to PIPE 1 {'mod_stat': {'pipe': 1}}, # Write generated packets (pings) {'mod_firewall': {'white_bus': ["mod_stat"]}}, {'gen_replay': {}}, {'mod_stat~2': {'pipe': 1}}, {'hw_USBtin': {'action': 'write','pipe': 1}}, ]
load_modules = {'hw_USBtin': {'port': 'auto', 'speed': 500}, 'mod_stat': {'bus': 'mod_stat', 'debug': 2}, 'mod_stat~2': {'bus': 'mod_stat'}, 'mod_firewall': {}, 'mod_fuzz1': {'debug': 2}, 'gen_replay': {'debug': 1}} actions = [{'hw_USBtin': {'action': 'read', 'pipe': 1}}, {'mod_stat': {'pipe': 1}}, {'mod_firewall': {'white_bus': ['mod_stat']}}, {'gen_replay': {}}, {'mod_stat~2': {'pipe': 1}}, {'hw_USBtin': {'action': 'write', 'pipe': 1}}]
def dragon_lives_for(sequence): dragon_size = 50 sheep = 0 squeezed_for = 0 days = 0 while True: sheep += sequence.pop(0) if dragon_size <= sheep: sheep -= dragon_size dragon_size += 1 squeezed_for = 0 else: sheep = 0 dragon_size -= 1 squeezed_for += 1 if squeezed_for >= 5: return days days += 1 def test_dragon_lives_for(): assert dragon_lives_for([50, 52, 52, 49, 50, 47, 45, 43, 50, 55]) == 7 if __name__ == '__main__': with open('input/01') as f: l = [int(v) for v in f.read().split(', ')] print(dragon_lives_for(l))
def dragon_lives_for(sequence): dragon_size = 50 sheep = 0 squeezed_for = 0 days = 0 while True: sheep += sequence.pop(0) if dragon_size <= sheep: sheep -= dragon_size dragon_size += 1 squeezed_for = 0 else: sheep = 0 dragon_size -= 1 squeezed_for += 1 if squeezed_for >= 5: return days days += 1 def test_dragon_lives_for(): assert dragon_lives_for([50, 52, 52, 49, 50, 47, 45, 43, 50, 55]) == 7 if __name__ == '__main__': with open('input/01') as f: l = [int(v) for v in f.read().split(', ')] print(dragon_lives_for(l))
# -*- coding: utf-8 -*- # ----------------------------------------------------------- # Copyright (C) 2009 StatPro Italia s.r.l. # # StatPro Italia # Via G. B. Vico 4 # I-20123 Milano # ITALY # # phone: +39 02 96875 1 # fax: +39 02 96875 605 # # email: info@riskmap.net # # This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the license for more details. # ----------------------------------------------------------- # # Author: Enrico Sirola <enrico.sirola@statpro.com> # $Id$ """drmaa constants""" # drmaa_get_attribute() ATTR_BUFFER = 1024 # drmaa_get_contact() CONTACT_BUFFER = 1024 # drmaa_get_DRM_system() DRM_SYSTEM_BUFFER = 1024 # drmaa_get_DRM_system() DRMAA_IMPLEMENTATION_BUFFER = 1024 # Agreed buffer length constants # these are recommended minimum values ERROR_STRING_BUFFER = 1024 JOBNAME_BUFFER = 1024 SIGNAL_BUFFER = 32 # Agreed constants TIMEOUT_WAIT_FOREVER = -1 TIMEOUT_NO_WAIT = 0 JOB_IDS_SESSION_ANY = "DRMAA_JOB_IDS_SESSION_ANY" JOB_IDS_SESSION_ALL = "DRMAA_JOB_IDS_SESSION_ALL" SUBMISSION_STATE_ACTIVE = "drmaa_active" SUBMISSION_STATE_HOLD = "drmaa_hold" # Agreed placeholder names PLACEHOLDER_INCR = "$drmaa_incr_ph$" PLACEHOLDER_HD = "$drmaa_hd_ph$" PLACEHOLDER_WD = "$drmaa_wd_ph$" # Agreed names of job template attributes REMOTE_COMMAND = "drmaa_remote_command" JS_STATE = "drmaa_js_state" WD = "drmaa_wd" JOB_CATEGORY = "drmaa_job_category" NATIVE_SPECIFICATION = "drmaa_native_specification" BLOCK_EMAIL = "drmaa_block_email" START_TIME = "drmaa_start_time" JOB_NAME = "drmaa_job_name" INPUT_PATH = "drmaa_input_path" OUTPUT_PATH = "drmaa_output_path" ERROR_PATH = "drmaa_error_path" JOIN_FILES = "drmaa_join_files" TRANSFER_FILES = "drmaa_transfer_files" DEADLINE_TIME = "drmaa_deadline_time" WCT_HLIMIT = "drmaa_wct_hlimit" WCT_SLIMIT = "drmaa_wct_slimit" DURATION_HLIMIT = "drmaa_duration_hlimit" DURATION_SLIMIT = "drmaa_duration_slimit" # names of job template vector attributes V_ARGV = "drmaa_v_argv" V_ENV = "drmaa_v_env" V_EMAIL = "drmaa_v_email" NO_MORE_ELEMENTS = 25 def job_state(code): return _JOB_PS[code] class JobState(object): UNDETERMINED = 'undetermined' QUEUED_ACTIVE = 'queued_active' SYSTEM_ON_HOLD = 'system_on_hold' USER_ON_HOLD = 'user_on_hold' USER_SYSTEM_ON_HOLD = 'user_system_on_hold' RUNNING = 'running' SYSTEM_SUSPENDED = 'system_suspended' USER_SUSPENDED = 'user_suspended' USER_SYSTEM_SUSPENDED = 'user_system_suspended' DONE = 'done' FAILED = 'failed' # Job control action class JobControlAction(object): SUSPEND = 'suspend' RESUME = 'resume' HOLD = 'hold' RELEASE = 'release' TERMINATE = 'terminate' _JOB_CONTROL = [ JobControlAction.SUSPEND, JobControlAction.RESUME, JobControlAction.HOLD, JobControlAction.RELEASE, JobControlAction.TERMINATE ] def string_to_control_action(operation): return _JOB_CONTROL.index(operation) def control_action_to_string(code): return _JOB_CONTROL[code] def status_to_string(status): return _JOB_PS[status] _JOB_PS = { 0x00: JobState.UNDETERMINED, 0x10: JobState.QUEUED_ACTIVE, 0x11: JobState.SYSTEM_ON_HOLD, 0x12: JobState.USER_ON_HOLD, 0x13: JobState.USER_SYSTEM_ON_HOLD, 0x20: JobState.RUNNING, 0x21: JobState.SYSTEM_SUSPENDED, 0x22: JobState.USER_SUSPENDED, 0x23: JobState.USER_SYSTEM_SUSPENDED, 0x30: JobState.DONE, 0x40: JobState.FAILED, } # State at submission time class JobSubmissionState(object): HOLD_STATE = SUBMISSION_STATE_HOLD ACTIVE_STATE = SUBMISSION_STATE_ACTIVE _SUBMISSION_STATE = [ JobSubmissionState.HOLD_STATE, JobSubmissionState.ACTIVE_STATE ] def submission_state(code): return _SUBMISSION_STATE[code]
"""drmaa constants""" attr_buffer = 1024 contact_buffer = 1024 drm_system_buffer = 1024 drmaa_implementation_buffer = 1024 error_string_buffer = 1024 jobname_buffer = 1024 signal_buffer = 32 timeout_wait_forever = -1 timeout_no_wait = 0 job_ids_session_any = 'DRMAA_JOB_IDS_SESSION_ANY' job_ids_session_all = 'DRMAA_JOB_IDS_SESSION_ALL' submission_state_active = 'drmaa_active' submission_state_hold = 'drmaa_hold' placeholder_incr = '$drmaa_incr_ph$' placeholder_hd = '$drmaa_hd_ph$' placeholder_wd = '$drmaa_wd_ph$' remote_command = 'drmaa_remote_command' js_state = 'drmaa_js_state' wd = 'drmaa_wd' job_category = 'drmaa_job_category' native_specification = 'drmaa_native_specification' block_email = 'drmaa_block_email' start_time = 'drmaa_start_time' job_name = 'drmaa_job_name' input_path = 'drmaa_input_path' output_path = 'drmaa_output_path' error_path = 'drmaa_error_path' join_files = 'drmaa_join_files' transfer_files = 'drmaa_transfer_files' deadline_time = 'drmaa_deadline_time' wct_hlimit = 'drmaa_wct_hlimit' wct_slimit = 'drmaa_wct_slimit' duration_hlimit = 'drmaa_duration_hlimit' duration_slimit = 'drmaa_duration_slimit' v_argv = 'drmaa_v_argv' v_env = 'drmaa_v_env' v_email = 'drmaa_v_email' no_more_elements = 25 def job_state(code): return _JOB_PS[code] class Jobstate(object): undetermined = 'undetermined' queued_active = 'queued_active' system_on_hold = 'system_on_hold' user_on_hold = 'user_on_hold' user_system_on_hold = 'user_system_on_hold' running = 'running' system_suspended = 'system_suspended' user_suspended = 'user_suspended' user_system_suspended = 'user_system_suspended' done = 'done' failed = 'failed' class Jobcontrolaction(object): suspend = 'suspend' resume = 'resume' hold = 'hold' release = 'release' terminate = 'terminate' _job_control = [JobControlAction.SUSPEND, JobControlAction.RESUME, JobControlAction.HOLD, JobControlAction.RELEASE, JobControlAction.TERMINATE] def string_to_control_action(operation): return _JOB_CONTROL.index(operation) def control_action_to_string(code): return _JOB_CONTROL[code] def status_to_string(status): return _JOB_PS[status] _job_ps = {0: JobState.UNDETERMINED, 16: JobState.QUEUED_ACTIVE, 17: JobState.SYSTEM_ON_HOLD, 18: JobState.USER_ON_HOLD, 19: JobState.USER_SYSTEM_ON_HOLD, 32: JobState.RUNNING, 33: JobState.SYSTEM_SUSPENDED, 34: JobState.USER_SUSPENDED, 35: JobState.USER_SYSTEM_SUSPENDED, 48: JobState.DONE, 64: JobState.FAILED} class Jobsubmissionstate(object): hold_state = SUBMISSION_STATE_HOLD active_state = SUBMISSION_STATE_ACTIVE _submission_state = [JobSubmissionState.HOLD_STATE, JobSubmissionState.ACTIVE_STATE] def submission_state(code): return _SUBMISSION_STATE[code]
load("//tools/bzl:maven_jar.bzl", "maven_jar") def external_plugin_deps(): AUTO_VALUE_VERSION = "1.7.4" maven_jar( name = "auto-value", artifact = "com.google.auto.value:auto-value:" + AUTO_VALUE_VERSION, sha1 = "6b126cb218af768339e4d6e95a9b0ae41f74e73d", ) maven_jar( name = "auto-value-annotations", artifact = "com.google.auto.value:auto-value-annotations:" + AUTO_VALUE_VERSION, sha1 = "eff48ed53995db2dadf0456426cc1f8700136f86", )
load('//tools/bzl:maven_jar.bzl', 'maven_jar') def external_plugin_deps(): auto_value_version = '1.7.4' maven_jar(name='auto-value', artifact='com.google.auto.value:auto-value:' + AUTO_VALUE_VERSION, sha1='6b126cb218af768339e4d6e95a9b0ae41f74e73d') maven_jar(name='auto-value-annotations', artifact='com.google.auto.value:auto-value-annotations:' + AUTO_VALUE_VERSION, sha1='eff48ed53995db2dadf0456426cc1f8700136f86')
# -*- coding: utf-8 -*- class AzureShellCache: __inst = None __cache = {} @staticmethod def Instance(): if AzureShellCache.__inst == None: AzureShellCache() return AzureShellCache.__inst def __init__(self): if AzureShellCache.__inst != None: raise Exception("This must not be called!!") AzureShellCache.__inst = self def set(self, k, v): self.__cache[k] = v def get(self, k): return self.__cache.get(k)
class Azureshellcache: __inst = None __cache = {} @staticmethod def instance(): if AzureShellCache.__inst == None: azure_shell_cache() return AzureShellCache.__inst def __init__(self): if AzureShellCache.__inst != None: raise exception('This must not be called!!') AzureShellCache.__inst = self def set(self, k, v): self.__cache[k] = v def get(self, k): return self.__cache.get(k)
solutions = [] maxAllowed = 10**1000 value = 1 base = 1 while value * value <= maxAllowed: while value < base * 10: solutions.append(value) value += base base = value solutions.append(value) while True: num = int(input()) if num == 0: break # Binary search # start is inclusive, end is not start = 0 end = len(solutions) while start + 1 < end: middle = (start + end) // 2 # Too high if solutions[middle] * solutions[middle] > num: end = middle else: start = middle; print(solutions[start])
solutions = [] max_allowed = 10 ** 1000 value = 1 base = 1 while value * value <= maxAllowed: while value < base * 10: solutions.append(value) value += base base = value solutions.append(value) while True: num = int(input()) if num == 0: break start = 0 end = len(solutions) while start + 1 < end: middle = (start + end) // 2 if solutions[middle] * solutions[middle] > num: end = middle else: start = middle print(solutions[start])
# template_parsetab.py # This file is automatically generated. Do not edit. # pylint: disable=W,C,R _tabversion = '3.10' _lr_method = 'LALR' _lr_signature = 'template_validateALL ARROW ARROWPARENS ARROW_PRE ASSIGN ASSIGNBAND ASSIGNBOR ASSIGNBXOR ASSIGNDIVIDE ASSIGNLLSHIFT ASSIGNLSHIFT ASSIGNMINUS ASSIGNMOD ASSIGNPLUS ASSIGNRRSHIFT ASSIGNRSHIFT ASSIGNTIMES AWAIT BACKSLASH BAND BITINV BNEGATE BOR BREAK BXOR BYTE CASE CATCH CHAR CLASS CLOSECOM COLON COMMA COMMENT COND_DOT CONST CONTINUE DEC DEFAULT DELETE DIVIDE DO DOT DOUBLE ELSE EMPTYLINE EQUAL EQUAL_STRICT EXPONENT EXPORT EXTENDS FINALLY FOR FROM FUNCTION GET GLOBAL GTHAN GTHANEQ ID IF IMPORT IN INC INFERRED INSTANCEOF LAND LBRACKET LET LLSHIFT LOR LPAREN LSBRACKET LSHIFT LTHAN LTHANEQ MINUS MLSTRLIT MOD NATIVE NEW NOT NOTEQUAL NOTEQUAL_STRICT NUMBER OF OPENCOM PLUS QEST RBRACKET REGEXPR RETURN RPAREN RRSHIFT RSBRACKET RSHIFT SEMI SET SHORT SIGNED SLASHR STATIC STRINGLIT SWITCH TEMPLATE TEMPLATE_STR TGTHAN THROW TIMES TLTHAN TRIPLEDOT TRY TYPED TYPEOF VAR VARIABLE VAR_TYPE_PREC WHILE WITH YIELD newlinelthan : LTHAN\n | TLTHAN\n gthan : GTHAN\n | TGTHAN\n id : ID\n | GET\n | SET\n | STATIC\n | CATCH\n | GLOBAL\n | AWAIT\n left_id : id id_opt : id\n |\n id_var_type : id \n id_var_decl : id \n var_type : var_type id_var_type\n | id_var_type\n | SHORT\n | DOUBLE\n | CHAR\n | BYTE\n | INFERRED\n | var_type template_ref\n \n templatedeflist : var_type\n | var_type ASSIGN var_type\n | templatedeflist COMMA var_type\n | templatedeflist COMMA var_type ASSIGN var_type\n template : lthan templatedeflist gthan\n typeof_opt : TYPEOF\n |\n \n simple_templatedeflist : typeof_opt var_type\n | simple_templatedeflist COMMA typeof_opt var_type\n template_ref : lthan simple_templatedeflist gthan\n template_ref_validate : lthan simple_templatedeflist gthan\n template_validate : template\n | template_ref_validate\n ' _lr_action_items = {'LTHAN':([0,9,11,12,13,14,15,16,18,19,20,21,22,23,24,25,28,29,33,34,36,37,39,42,43,44,],[5,5,-18,-19,-20,-21,-22,-23,-15,-5,-6,-7,-8,-9,-10,-11,-3,-4,-17,-24,5,5,5,5,-34,5,]),'TLTHAN':([0,9,11,12,13,14,15,16,18,19,20,21,22,23,24,25,28,29,33,34,36,37,39,42,43,44,],[6,6,-18,-19,-20,-21,-22,-23,-15,-5,-6,-7,-8,-9,-10,-11,-3,-4,-17,-24,6,6,6,6,-34,6,]),'$end':([1,2,3,26,28,29,30,],[0,-36,-37,-29,-3,-4,-35,]),'SHORT':([4,5,6,10,17,27,31,32,35,38,41,],[12,-1,-2,12,-30,12,-31,12,-31,12,12,]),'DOUBLE':([4,5,6,10,17,27,31,32,35,38,41,],[13,-1,-2,13,-30,13,-31,13,-31,13,13,]),'CHAR':([4,5,6,10,17,27,31,32,35,38,41,],[14,-1,-2,14,-30,14,-31,14,-31,14,14,]),'BYTE':([4,5,6,10,17,27,31,32,35,38,41,],[15,-1,-2,15,-30,15,-31,15,-31,15,15,]),'INFERRED':([4,5,6,10,17,27,31,32,35,38,41,],[16,-1,-2,16,-30,16,-31,16,-31,16,16,]),'TYPEOF':([4,5,6,31,35,],[17,-1,-2,17,17,]),'ID':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[19,-1,-2,19,19,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,19,-3,-4,-31,19,-17,-24,-31,19,19,19,19,19,19,-34,19,]),'GET':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[20,-1,-2,20,20,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,20,-3,-4,-31,20,-17,-24,-31,20,20,20,20,20,20,-34,20,]),'SET':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[21,-1,-2,21,21,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,21,-3,-4,-31,21,-17,-24,-31,21,21,21,21,21,21,-34,21,]),'STATIC':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[22,-1,-2,22,22,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,22,-3,-4,-31,22,-17,-24,-31,22,22,22,22,22,22,-34,22,]),'CATCH':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[23,-1,-2,23,23,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,23,-3,-4,-31,23,-17,-24,-31,23,23,23,23,23,23,-34,23,]),'GLOBAL':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[24,-1,-2,24,24,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,24,-3,-4,-31,24,-17,-24,-31,24,24,24,24,24,24,-34,24,]),'AWAIT':([4,5,6,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,36,37,38,39,41,42,43,44,],[25,-1,-2,25,25,-18,-19,-20,-21,-22,-23,-30,-15,-5,-6,-7,-8,-9,-10,-11,25,-3,-4,-31,25,-17,-24,-31,25,25,25,25,25,25,-34,25,]),'COMMA':([7,8,9,11,12,13,14,15,16,18,19,20,21,22,23,24,25,28,29,33,34,36,37,39,40,42,43,44,],[27,31,-25,-18,-19,-20,-21,-22,-23,-15,-5,-6,-7,-8,-9,-10,-11,-3,-4,-17,-24,-32,-27,-26,31,-33,-34,-28,]),'GTHAN':([7,8,9,11,12,13,14,15,16,18,19,20,21,22,23,24,25,28,29,33,34,36,37,39,40,42,43,44,],[28,28,-25,-18,-19,-20,-21,-22,-23,-15,-5,-6,-7,-8,-9,-10,-11,-3,-4,-17,-24,-32,-27,-26,28,-33,-34,-28,]),'TGTHAN':([7,8,9,11,12,13,14,15,16,18,19,20,21,22,23,24,25,28,29,33,34,36,37,39,40,42,43,44,],[29,29,-25,-18,-19,-20,-21,-22,-23,-15,-5,-6,-7,-8,-9,-10,-11,-3,-4,-17,-24,-32,-27,-26,29,-33,-34,-28,]),'ASSIGN':([9,11,12,13,14,15,16,18,19,20,21,22,23,24,25,28,29,33,34,37,43,],[32,-18,-19,-20,-21,-22,-23,-15,-5,-6,-7,-8,-9,-10,-11,-3,-4,-17,-24,41,-34,]),} _lr_action = {} for _k, _v in _lr_action_items.items(): for _x,_y in zip(_v[0],_v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items _lr_goto_items = {'template_validate':([0,],[1,]),'template':([0,],[2,]),'template_ref_validate':([0,],[3,]),'lthan':([0,9,36,37,39,42,44,],[4,35,35,35,35,35,35,]),'templatedeflist':([4,],[7,]),'simple_templatedeflist':([4,35,],[8,40,]),'var_type':([4,10,27,32,38,41,],[9,36,37,39,42,44,]),'typeof_opt':([4,31,35,],[10,38,10,]),'id_var_type':([4,9,10,27,32,36,37,38,39,41,42,44,],[11,33,11,11,11,33,33,11,33,11,33,33,]),'id':([4,9,10,27,32,36,37,38,39,41,42,44,],[18,18,18,18,18,18,18,18,18,18,18,18,]),'gthan':([7,8,40,],[26,30,43,]),'template_ref':([9,36,37,39,42,44,],[34,34,34,34,34,34,]),} _lr_goto = {} for _k, _v in _lr_goto_items.items(): for _x, _y in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items _lr_productions = [ ("S' -> template_validate","S'",1,None,None,None), ('lthan -> LTHAN','lthan',1,'p_lthan','js_parse_template.py',12), ('lthan -> TLTHAN','lthan',1,'p_lthan','js_parse_template.py',13), ('gthan -> GTHAN','gthan',1,'p_gthan','js_parse_template.py',18), ('gthan -> TGTHAN','gthan',1,'p_gthan','js_parse_template.py',19), ('id -> ID','id',1,'p_id','js_parse_template.py',24), ('id -> GET','id',1,'p_id','js_parse_template.py',25), ('id -> SET','id',1,'p_id','js_parse_template.py',26), ('id -> STATIC','id',1,'p_id','js_parse_template.py',27), ('id -> CATCH','id',1,'p_id','js_parse_template.py',28), ('id -> GLOBAL','id',1,'p_id','js_parse_template.py',29), ('id -> AWAIT','id',1,'p_id','js_parse_template.py',30), ('left_id -> id','left_id',1,'p_left_id','js_parse_template.py',35), ('id_opt -> id','id_opt',1,'p_id_opt','js_parse_template.py',39), ('id_opt -> <empty>','id_opt',0,'p_id_opt','js_parse_template.py',40), ('id_var_type -> id','id_var_type',1,'p_id_var_type','js_parse_template.py',46), ('id_var_decl -> id','id_var_decl',1,'p_id_var_decl','js_parse_template.py',51), ('var_type -> var_type id_var_type','var_type',2,'p_var_type','js_parse_template.py',56), ('var_type -> id_var_type','var_type',1,'p_var_type','js_parse_template.py',57), ('var_type -> SHORT','var_type',1,'p_var_type','js_parse_template.py',58), ('var_type -> DOUBLE','var_type',1,'p_var_type','js_parse_template.py',59), ('var_type -> CHAR','var_type',1,'p_var_type','js_parse_template.py',60), ('var_type -> BYTE','var_type',1,'p_var_type','js_parse_template.py',61), ('var_type -> INFERRED','var_type',1,'p_var_type','js_parse_template.py',62), ('var_type -> var_type template_ref','var_type',2,'p_var_type','js_parse_template.py',63), ('templatedeflist -> var_type','templatedeflist',1,'p_templatedeflist','js_parse_template.py',69), ('templatedeflist -> var_type ASSIGN var_type','templatedeflist',3,'p_templatedeflist','js_parse_template.py',70), ('templatedeflist -> templatedeflist COMMA var_type','templatedeflist',3,'p_templatedeflist','js_parse_template.py',71), ('templatedeflist -> templatedeflist COMMA var_type ASSIGN var_type','templatedeflist',5,'p_templatedeflist','js_parse_template.py',72), ('template -> lthan templatedeflist gthan','template',3,'p_template','js_parse_template.py',77), ('typeof_opt -> TYPEOF','typeof_opt',1,'p_typeof_opt','js_parse_template.py',82), ('typeof_opt -> <empty>','typeof_opt',0,'p_typeof_opt','js_parse_template.py',83), ('simple_templatedeflist -> typeof_opt var_type','simple_templatedeflist',2,'p_simple_templatedeflist','js_parse_template.py',93), ('simple_templatedeflist -> simple_templatedeflist COMMA typeof_opt var_type','simple_templatedeflist',4,'p_simple_templatedeflist','js_parse_template.py',94), ('template_ref -> lthan simple_templatedeflist gthan','template_ref',3,'p_template_ref','js_parse_template.py',99), ('template_ref_validate -> lthan simple_templatedeflist gthan','template_ref_validate',3,'p_template_ref_validate','js_parse_template.py',104), ('template_validate -> template','template_validate',1,'p_template_validate','js_parse_template.py',109), ('template_validate -> template_ref_validate','template_validate',1,'p_template_validate','js_parse_template.py',110), ]
_tabversion = '3.10' _lr_method = 'LALR' _lr_signature = 'template_validateALL ARROW ARROWPARENS ARROW_PRE ASSIGN ASSIGNBAND ASSIGNBOR ASSIGNBXOR ASSIGNDIVIDE ASSIGNLLSHIFT ASSIGNLSHIFT ASSIGNMINUS ASSIGNMOD ASSIGNPLUS ASSIGNRRSHIFT ASSIGNRSHIFT ASSIGNTIMES AWAIT BACKSLASH BAND BITINV BNEGATE BOR BREAK BXOR BYTE CASE CATCH CHAR CLASS CLOSECOM COLON COMMA COMMENT COND_DOT CONST CONTINUE DEC DEFAULT DELETE DIVIDE DO DOT DOUBLE ELSE EMPTYLINE EQUAL EQUAL_STRICT EXPONENT EXPORT EXTENDS FINALLY FOR FROM FUNCTION GET GLOBAL GTHAN GTHANEQ ID IF IMPORT IN INC INFERRED INSTANCEOF LAND LBRACKET LET LLSHIFT LOR LPAREN LSBRACKET LSHIFT LTHAN LTHANEQ MINUS MLSTRLIT MOD NATIVE NEW NOT NOTEQUAL NOTEQUAL_STRICT NUMBER OF OPENCOM PLUS QEST RBRACKET REGEXPR RETURN RPAREN RRSHIFT RSBRACKET RSHIFT SEMI SET SHORT SIGNED SLASHR STATIC STRINGLIT SWITCH TEMPLATE TEMPLATE_STR TGTHAN THROW TIMES TLTHAN TRIPLEDOT TRY TYPED TYPEOF VAR VARIABLE VAR_TYPE_PREC WHILE WITH YIELD newlinelthan : LTHAN\n | TLTHAN\n gthan : GTHAN\n | TGTHAN\n id : ID\n | GET\n | SET\n | STATIC\n | CATCH\n | GLOBAL\n | AWAIT\n left_id : id id_opt : id\n |\n id_var_type : id \n id_var_decl : id \n var_type : var_type id_var_type\n | id_var_type\n | SHORT\n | DOUBLE\n | CHAR\n | BYTE\n | INFERRED\n | var_type template_ref\n \n templatedeflist : var_type\n | var_type ASSIGN var_type\n | templatedeflist COMMA var_type\n | templatedeflist COMMA var_type ASSIGN var_type\n template : lthan templatedeflist gthan\n typeof_opt : TYPEOF\n |\n \n simple_templatedeflist : typeof_opt var_type\n | simple_templatedeflist COMMA typeof_opt var_type\n template_ref : lthan simple_templatedeflist gthan\n template_ref_validate : lthan simple_templatedeflist gthan\n template_validate : template\n | template_ref_validate\n ' _lr_action_items = {'LTHAN': ([0, 9, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 33, 34, 36, 37, 39, 42, 43, 44], [5, 5, -18, -19, -20, -21, -22, -23, -15, -5, -6, -7, -8, -9, -10, -11, -3, -4, -17, -24, 5, 5, 5, 5, -34, 5]), 'TLTHAN': ([0, 9, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 33, 34, 36, 37, 39, 42, 43, 44], [6, 6, -18, -19, -20, -21, -22, -23, -15, -5, -6, -7, -8, -9, -10, -11, -3, -4, -17, -24, 6, 6, 6, 6, -34, 6]), '$end': ([1, 2, 3, 26, 28, 29, 30], [0, -36, -37, -29, -3, -4, -35]), 'SHORT': ([4, 5, 6, 10, 17, 27, 31, 32, 35, 38, 41], [12, -1, -2, 12, -30, 12, -31, 12, -31, 12, 12]), 'DOUBLE': ([4, 5, 6, 10, 17, 27, 31, 32, 35, 38, 41], [13, -1, -2, 13, -30, 13, -31, 13, -31, 13, 13]), 'CHAR': ([4, 5, 6, 10, 17, 27, 31, 32, 35, 38, 41], [14, -1, -2, 14, -30, 14, -31, 14, -31, 14, 14]), 'BYTE': ([4, 5, 6, 10, 17, 27, 31, 32, 35, 38, 41], [15, -1, -2, 15, -30, 15, -31, 15, -31, 15, 15]), 'INFERRED': ([4, 5, 6, 10, 17, 27, 31, 32, 35, 38, 41], [16, -1, -2, 16, -30, 16, -31, 16, -31, 16, 16]), 'TYPEOF': ([4, 5, 6, 31, 35], [17, -1, -2, 17, 17]), 'ID': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [19, -1, -2, 19, 19, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 19, -3, -4, -31, 19, -17, -24, -31, 19, 19, 19, 19, 19, 19, -34, 19]), 'GET': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [20, -1, -2, 20, 20, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 20, -3, -4, -31, 20, -17, -24, -31, 20, 20, 20, 20, 20, 20, -34, 20]), 'SET': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [21, -1, -2, 21, 21, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 21, -3, -4, -31, 21, -17, -24, -31, 21, 21, 21, 21, 21, 21, -34, 21]), 'STATIC': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [22, -1, -2, 22, 22, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 22, -3, -4, -31, 22, -17, -24, -31, 22, 22, 22, 22, 22, 22, -34, 22]), 'CATCH': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [23, -1, -2, 23, 23, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 23, -3, -4, -31, 23, -17, -24, -31, 23, 23, 23, 23, 23, 23, -34, 23]), 'GLOBAL': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [24, -1, -2, 24, 24, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 24, -3, -4, -31, 24, -17, -24, -31, 24, 24, 24, 24, 24, 24, -34, 24]), 'AWAIT': ([4, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 42, 43, 44], [25, -1, -2, 25, 25, -18, -19, -20, -21, -22, -23, -30, -15, -5, -6, -7, -8, -9, -10, -11, 25, -3, -4, -31, 25, -17, -24, -31, 25, 25, 25, 25, 25, 25, -34, 25]), 'COMMA': ([7, 8, 9, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 33, 34, 36, 37, 39, 40, 42, 43, 44], [27, 31, -25, -18, -19, -20, -21, -22, -23, -15, -5, -6, -7, -8, -9, -10, -11, -3, -4, -17, -24, -32, -27, -26, 31, -33, -34, -28]), 'GTHAN': ([7, 8, 9, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 33, 34, 36, 37, 39, 40, 42, 43, 44], [28, 28, -25, -18, -19, -20, -21, -22, -23, -15, -5, -6, -7, -8, -9, -10, -11, -3, -4, -17, -24, -32, -27, -26, 28, -33, -34, -28]), 'TGTHAN': ([7, 8, 9, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 33, 34, 36, 37, 39, 40, 42, 43, 44], [29, 29, -25, -18, -19, -20, -21, -22, -23, -15, -5, -6, -7, -8, -9, -10, -11, -3, -4, -17, -24, -32, -27, -26, 29, -33, -34, -28]), 'ASSIGN': ([9, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 28, 29, 33, 34, 37, 43], [32, -18, -19, -20, -21, -22, -23, -15, -5, -6, -7, -8, -9, -10, -11, -3, -4, -17, -24, 41, -34])} _lr_action = {} for (_k, _v) in _lr_action_items.items(): for (_x, _y) in zip(_v[0], _v[1]): if not _x in _lr_action: _lr_action[_x] = {} _lr_action[_x][_k] = _y del _lr_action_items _lr_goto_items = {'template_validate': ([0], [1]), 'template': ([0], [2]), 'template_ref_validate': ([0], [3]), 'lthan': ([0, 9, 36, 37, 39, 42, 44], [4, 35, 35, 35, 35, 35, 35]), 'templatedeflist': ([4], [7]), 'simple_templatedeflist': ([4, 35], [8, 40]), 'var_type': ([4, 10, 27, 32, 38, 41], [9, 36, 37, 39, 42, 44]), 'typeof_opt': ([4, 31, 35], [10, 38, 10]), 'id_var_type': ([4, 9, 10, 27, 32, 36, 37, 38, 39, 41, 42, 44], [11, 33, 11, 11, 11, 33, 33, 11, 33, 11, 33, 33]), 'id': ([4, 9, 10, 27, 32, 36, 37, 38, 39, 41, 42, 44], [18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18]), 'gthan': ([7, 8, 40], [26, 30, 43]), 'template_ref': ([9, 36, 37, 39, 42, 44], [34, 34, 34, 34, 34, 34])} _lr_goto = {} for (_k, _v) in _lr_goto_items.items(): for (_x, _y) in zip(_v[0], _v[1]): if not _x in _lr_goto: _lr_goto[_x] = {} _lr_goto[_x][_k] = _y del _lr_goto_items _lr_productions = [("S' -> template_validate", "S'", 1, None, None, None), ('lthan -> LTHAN', 'lthan', 1, 'p_lthan', 'js_parse_template.py', 12), ('lthan -> TLTHAN', 'lthan', 1, 'p_lthan', 'js_parse_template.py', 13), ('gthan -> GTHAN', 'gthan', 1, 'p_gthan', 'js_parse_template.py', 18), ('gthan -> TGTHAN', 'gthan', 1, 'p_gthan', 'js_parse_template.py', 19), ('id -> ID', 'id', 1, 'p_id', 'js_parse_template.py', 24), ('id -> GET', 'id', 1, 'p_id', 'js_parse_template.py', 25), ('id -> SET', 'id', 1, 'p_id', 'js_parse_template.py', 26), ('id -> STATIC', 'id', 1, 'p_id', 'js_parse_template.py', 27), ('id -> CATCH', 'id', 1, 'p_id', 'js_parse_template.py', 28), ('id -> GLOBAL', 'id', 1, 'p_id', 'js_parse_template.py', 29), ('id -> AWAIT', 'id', 1, 'p_id', 'js_parse_template.py', 30), ('left_id -> id', 'left_id', 1, 'p_left_id', 'js_parse_template.py', 35), ('id_opt -> id', 'id_opt', 1, 'p_id_opt', 'js_parse_template.py', 39), ('id_opt -> <empty>', 'id_opt', 0, 'p_id_opt', 'js_parse_template.py', 40), ('id_var_type -> id', 'id_var_type', 1, 'p_id_var_type', 'js_parse_template.py', 46), ('id_var_decl -> id', 'id_var_decl', 1, 'p_id_var_decl', 'js_parse_template.py', 51), ('var_type -> var_type id_var_type', 'var_type', 2, 'p_var_type', 'js_parse_template.py', 56), ('var_type -> id_var_type', 'var_type', 1, 'p_var_type', 'js_parse_template.py', 57), ('var_type -> SHORT', 'var_type', 1, 'p_var_type', 'js_parse_template.py', 58), ('var_type -> DOUBLE', 'var_type', 1, 'p_var_type', 'js_parse_template.py', 59), ('var_type -> CHAR', 'var_type', 1, 'p_var_type', 'js_parse_template.py', 60), ('var_type -> BYTE', 'var_type', 1, 'p_var_type', 'js_parse_template.py', 61), ('var_type -> INFERRED', 'var_type', 1, 'p_var_type', 'js_parse_template.py', 62), ('var_type -> var_type template_ref', 'var_type', 2, 'p_var_type', 'js_parse_template.py', 63), ('templatedeflist -> var_type', 'templatedeflist', 1, 'p_templatedeflist', 'js_parse_template.py', 69), ('templatedeflist -> var_type ASSIGN var_type', 'templatedeflist', 3, 'p_templatedeflist', 'js_parse_template.py', 70), ('templatedeflist -> templatedeflist COMMA var_type', 'templatedeflist', 3, 'p_templatedeflist', 'js_parse_template.py', 71), ('templatedeflist -> templatedeflist COMMA var_type ASSIGN var_type', 'templatedeflist', 5, 'p_templatedeflist', 'js_parse_template.py', 72), ('template -> lthan templatedeflist gthan', 'template', 3, 'p_template', 'js_parse_template.py', 77), ('typeof_opt -> TYPEOF', 'typeof_opt', 1, 'p_typeof_opt', 'js_parse_template.py', 82), ('typeof_opt -> <empty>', 'typeof_opt', 0, 'p_typeof_opt', 'js_parse_template.py', 83), ('simple_templatedeflist -> typeof_opt var_type', 'simple_templatedeflist', 2, 'p_simple_templatedeflist', 'js_parse_template.py', 93), ('simple_templatedeflist -> simple_templatedeflist COMMA typeof_opt var_type', 'simple_templatedeflist', 4, 'p_simple_templatedeflist', 'js_parse_template.py', 94), ('template_ref -> lthan simple_templatedeflist gthan', 'template_ref', 3, 'p_template_ref', 'js_parse_template.py', 99), ('template_ref_validate -> lthan simple_templatedeflist gthan', 'template_ref_validate', 3, 'p_template_ref_validate', 'js_parse_template.py', 104), ('template_validate -> template', 'template_validate', 1, 'p_template_validate', 'js_parse_template.py', 109), ('template_validate -> template_ref_validate', 'template_validate', 1, 'p_template_validate', 'js_parse_template.py', 110)]
def f(): print('f executed from module 1') if __name__ == '__main__': print('We are in module 1')
def f(): print('f executed from module 1') if __name__ == '__main__': print('We are in module 1')
class ParserError(Exception): """ Base parser exception class. Throws when any error occurs. """ pass
class Parsererror(Exception): """ Base parser exception class. Throws when any error occurs. """ pass
n=int(input("Enter a number : ")) r=int(input("Enter range of table : ")) print("Multiplication Table of",n,"is") for i in range(0,r): i=i+1 print(n,"X",i,"=",n*i) print("Loop completed")
n = int(input('Enter a number : ')) r = int(input('Enter range of table : ')) print('Multiplication Table of', n, 'is') for i in range(0, r): i = i + 1 print(n, 'X', i, '=', n * i) print('Loop completed')
PRACTICE = False N_DAYS = 80 with open("test.txt" if PRACTICE else "input.txt", "r") as f: content = f.read().strip() state = list(map(int, content.split(","))) def next_day(state): new_state = [] num_new = 0 for fish in state: if fish == 0: new_state.append(6) num_new += 1 else: new_state.append(fish - 1) return new_state + [8] * num_new for days in range(N_DAYS): state = next_day(state) # print("After {:2} days: ".format(days + 1) + ",".join(map(str, state))) print(len(state))
practice = False n_days = 80 with open('test.txt' if PRACTICE else 'input.txt', 'r') as f: content = f.read().strip() state = list(map(int, content.split(','))) def next_day(state): new_state = [] num_new = 0 for fish in state: if fish == 0: new_state.append(6) num_new += 1 else: new_state.append(fish - 1) return new_state + [8] * num_new for days in range(N_DAYS): state = next_day(state) print(len(state))
# This program says hello print("Hello World!") # Ask the user to input their name and assign it to the name variable print("What is your name? ") myName = input() # Print out greet followed by name print("It is good to meet you, " + myName) # Print out the length of the name print("The length of your name " + str(len(myName))) # Ask for your age and show how old you will be next year print("What is your age?") myAge = input() print("You will be " + str(int(myAge)+1) + "in a year. ")
print('Hello World!') print('What is your name? ') my_name = input() print('It is good to meet you, ' + myName) print('The length of your name ' + str(len(myName))) print('What is your age?') my_age = input() print('You will be ' + str(int(myAge) + 1) + 'in a year. ')
N = int(input()) x, y = 0, 0 for _ in range(N): T, S = input().split() T = int(T) x += min(int(12 * T / 1000), len(S)) y += max(len(S) - int(12 * T / 1000), 0) print(x, y)
n = int(input()) (x, y) = (0, 0) for _ in range(N): (t, s) = input().split() t = int(T) x += min(int(12 * T / 1000), len(S)) y += max(len(S) - int(12 * T / 1000), 0) print(x, y)
# -*- coding: utf-8 -*- # tomolab # Michele Scipioni # Harvard University, Martinos Center for Biomedical Imaging # University of Pisa LIGHT_BLUE = "rgb(200,228,246)" BLUE = "rgb(47,128,246)" LIGHT_RED = "rgb(246,228,200)" RED = "rgb(246,128,47)" LIGHT_GRAY = "rgb(246,246,246)" GRAY = "rgb(200,200,200)" GREEN = "rgb(0,100,0)"
light_blue = 'rgb(200,228,246)' blue = 'rgb(47,128,246)' light_red = 'rgb(246,228,200)' red = 'rgb(246,128,47)' light_gray = 'rgb(246,246,246)' gray = 'rgb(200,200,200)' green = 'rgb(0,100,0)'
__all__ = [ 'manager', \ 'node', \ 'feature', \ 'python_utils' ]
__all__ = ['manager', 'node', 'feature', 'python_utils']
TEST_LAT=-12 TEST_LONG=60 TEST_LOCATION_HIERARCHY_FOR_GEO_CODE=['madagascar'] class DummyLocationTree(object): def get_location_hierarchy_for_geocode(self, lat, long ): return TEST_LOCATION_HIERARCHY_FOR_GEO_CODE def get_centroid(self, location_name, level): if location_name=="jalgaon" and level==2: return None return TEST_LONG, TEST_LAT def get_location_hierarchy(self,lowest_level_location_name): if lowest_level_location_name=='pune': return ['pune','mh','india']
test_lat = -12 test_long = 60 test_location_hierarchy_for_geo_code = ['madagascar'] class Dummylocationtree(object): def get_location_hierarchy_for_geocode(self, lat, long): return TEST_LOCATION_HIERARCHY_FOR_GEO_CODE def get_centroid(self, location_name, level): if location_name == 'jalgaon' and level == 2: return None return (TEST_LONG, TEST_LAT) def get_location_hierarchy(self, lowest_level_location_name): if lowest_level_location_name == 'pune': return ['pune', 'mh', 'india']
# # PySNMP MIB module HUAWEI-PGI-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-PGI-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:35:58 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint") hwDatacomm, = mibBuilder.importSymbols("HUAWEI-MIB", "hwDatacomm") ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup") Counter64, Gauge32, Integer32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, iso, ModuleIdentity, MibIdentifier, Bits, Unsigned32, Counter32, IpAddress, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Gauge32", "Integer32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "iso", "ModuleIdentity", "MibIdentifier", "Bits", "Unsigned32", "Counter32", "IpAddress", "NotificationType") DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention") hwPortGroupIsolation = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144)) if mibBuilder.loadTexts: hwPortGroupIsolation.setLastUpdated('200701010000Z') if mibBuilder.loadTexts: hwPortGroupIsolation.setOrganization('Huawei Technologies Co. Ltd.') hwPortGroupIsolationMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1)) hwPortGroupIsolationConfigTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1), ) if mibBuilder.loadTexts: hwPortGroupIsolationConfigTable.setStatus('current') hwPortGroupIsolationConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1), ).setIndexNames((0, "HUAWEI-PGI-MIB", "hwPortGroupIsolationIndex")) if mibBuilder.loadTexts: hwPortGroupIsolationConfigEntry.setStatus('current') hwPortGroupIsolationIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024))) if mibBuilder.loadTexts: hwPortGroupIsolationIndex.setStatus('current') hwPortGroupIsolationIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hwPortGroupIsolationIfName.setStatus('current') hwPortGroupIsolationGroupID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readcreate") if mibBuilder.loadTexts: hwPortGroupIsolationGroupID.setStatus('current') hwPortGroupIsolationConfigRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 51), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: hwPortGroupIsolationConfigRowStatus.setStatus('current') hwPortGroupIsolationConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3)) hwPortGroupIsolationCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 1)) hwPortGroupIsolationCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 1, 1)).setObjects(("HUAWEI-PGI-MIB", "hwPortGroupIsolationObjectGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hwPortGroupIsolationCompliance = hwPortGroupIsolationCompliance.setStatus('current') hwPortGroupIsolationGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 3)) hwPortGroupIsolationObjectGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 3, 1)).setObjects(("HUAWEI-PGI-MIB", "hwPortGroupIsolationIfName"), ("HUAWEI-PGI-MIB", "hwPortGroupIsolationGroupID"), ("HUAWEI-PGI-MIB", "hwPortGroupIsolationConfigRowStatus")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hwPortGroupIsolationObjectGroup = hwPortGroupIsolationObjectGroup.setStatus('current') mibBuilder.exportSymbols("HUAWEI-PGI-MIB", PYSNMP_MODULE_ID=hwPortGroupIsolation, hwPortGroupIsolation=hwPortGroupIsolation, hwPortGroupIsolationIfName=hwPortGroupIsolationIfName, hwPortGroupIsolationCompliance=hwPortGroupIsolationCompliance, hwPortGroupIsolationConformance=hwPortGroupIsolationConformance, hwPortGroupIsolationConfigTable=hwPortGroupIsolationConfigTable, hwPortGroupIsolationIndex=hwPortGroupIsolationIndex, hwPortGroupIsolationGroups=hwPortGroupIsolationGroups, hwPortGroupIsolationConfigEntry=hwPortGroupIsolationConfigEntry, hwPortGroupIsolationMibObjects=hwPortGroupIsolationMibObjects, hwPortGroupIsolationObjectGroup=hwPortGroupIsolationObjectGroup, hwPortGroupIsolationGroupID=hwPortGroupIsolationGroupID, hwPortGroupIsolationCompliances=hwPortGroupIsolationCompliances, hwPortGroupIsolationConfigRowStatus=hwPortGroupIsolationConfigRowStatus)
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (single_value_constraint, constraints_intersection, value_size_constraint, constraints_union, value_range_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint', 'ConstraintsIntersection', 'ValueSizeConstraint', 'ConstraintsUnion', 'ValueRangeConstraint') (hw_datacomm,) = mibBuilder.importSymbols('HUAWEI-MIB', 'hwDatacomm') (module_compliance, object_group, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'ObjectGroup', 'NotificationGroup') (counter64, gauge32, integer32, time_ticks, mib_scalar, mib_table, mib_table_row, mib_table_column, object_identity, iso, module_identity, mib_identifier, bits, unsigned32, counter32, ip_address, notification_type) = mibBuilder.importSymbols('SNMPv2-SMI', 'Counter64', 'Gauge32', 'Integer32', 'TimeTicks', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'ObjectIdentity', 'iso', 'ModuleIdentity', 'MibIdentifier', 'Bits', 'Unsigned32', 'Counter32', 'IpAddress', 'NotificationType') (display_string, row_status, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'RowStatus', 'TextualConvention') hw_port_group_isolation = module_identity((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144)) if mibBuilder.loadTexts: hwPortGroupIsolation.setLastUpdated('200701010000Z') if mibBuilder.loadTexts: hwPortGroupIsolation.setOrganization('Huawei Technologies Co. Ltd.') hw_port_group_isolation_mib_objects = mib_identifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1)) hw_port_group_isolation_config_table = mib_table((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1)) if mibBuilder.loadTexts: hwPortGroupIsolationConfigTable.setStatus('current') hw_port_group_isolation_config_entry = mib_table_row((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1)).setIndexNames((0, 'HUAWEI-PGI-MIB', 'hwPortGroupIsolationIndex')) if mibBuilder.loadTexts: hwPortGroupIsolationConfigEntry.setStatus('current') hw_port_group_isolation_index = mib_table_column((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 1), integer32().subtype(subtypeSpec=value_range_constraint(1, 1024))) if mibBuilder.loadTexts: hwPortGroupIsolationIndex.setStatus('current') hw_port_group_isolation_if_name = mib_table_column((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 11), octet_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readcreate') if mibBuilder.loadTexts: hwPortGroupIsolationIfName.setStatus('current') hw_port_group_isolation_group_id = mib_table_column((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 12), integer32().subtype(subtypeSpec=value_range_constraint(1, 255))).setMaxAccess('readcreate') if mibBuilder.loadTexts: hwPortGroupIsolationGroupID.setStatus('current') hw_port_group_isolation_config_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 1, 1, 1, 51), row_status()).setMaxAccess('readcreate') if mibBuilder.loadTexts: hwPortGroupIsolationConfigRowStatus.setStatus('current') hw_port_group_isolation_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3)) hw_port_group_isolation_compliances = mib_identifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 1)) hw_port_group_isolation_compliance = module_compliance((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 1, 1)).setObjects(('HUAWEI-PGI-MIB', 'hwPortGroupIsolationObjectGroup')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hw_port_group_isolation_compliance = hwPortGroupIsolationCompliance.setStatus('current') hw_port_group_isolation_groups = mib_identifier((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 3)) hw_port_group_isolation_object_group = object_group((1, 3, 6, 1, 4, 1, 2011, 5, 25, 144, 3, 3, 1)).setObjects(('HUAWEI-PGI-MIB', 'hwPortGroupIsolationIfName'), ('HUAWEI-PGI-MIB', 'hwPortGroupIsolationGroupID'), ('HUAWEI-PGI-MIB', 'hwPortGroupIsolationConfigRowStatus')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): hw_port_group_isolation_object_group = hwPortGroupIsolationObjectGroup.setStatus('current') mibBuilder.exportSymbols('HUAWEI-PGI-MIB', PYSNMP_MODULE_ID=hwPortGroupIsolation, hwPortGroupIsolation=hwPortGroupIsolation, hwPortGroupIsolationIfName=hwPortGroupIsolationIfName, hwPortGroupIsolationCompliance=hwPortGroupIsolationCompliance, hwPortGroupIsolationConformance=hwPortGroupIsolationConformance, hwPortGroupIsolationConfigTable=hwPortGroupIsolationConfigTable, hwPortGroupIsolationIndex=hwPortGroupIsolationIndex, hwPortGroupIsolationGroups=hwPortGroupIsolationGroups, hwPortGroupIsolationConfigEntry=hwPortGroupIsolationConfigEntry, hwPortGroupIsolationMibObjects=hwPortGroupIsolationMibObjects, hwPortGroupIsolationObjectGroup=hwPortGroupIsolationObjectGroup, hwPortGroupIsolationGroupID=hwPortGroupIsolationGroupID, hwPortGroupIsolationCompliances=hwPortGroupIsolationCompliances, hwPortGroupIsolationConfigRowStatus=hwPortGroupIsolationConfigRowStatus)
class Solution: def frequencySort(self, s: str) -> str: freq = {} for ch in s: if(ch in freq): freq[ch] += 1 else: freq[ch] = 1 out = "" for k,v in sorted(freq.items(), key=lambda x: x[1], reverse=True): out += (k * v) return out
class Solution: def frequency_sort(self, s: str) -> str: freq = {} for ch in s: if ch in freq: freq[ch] += 1 else: freq[ch] = 1 out = '' for (k, v) in sorted(freq.items(), key=lambda x: x[1], reverse=True): out += k * v return out
psys_game_rain = 0 psys_game_snow = 1 psys_game_blood = 2 psys_game_blood_2 = 3 psys_game_hoof_dust = 4 psys_game_hoof_dust_mud = 5 psys_game_water_splash_1 = 6 psys_game_water_splash_2 = 7 psys_game_water_splash_3 = 8 psys_torch_fire = 9 psys_fire_glow_1 = 10 psys_fire_glow_fixed = 11 psys_torch_smoke = 12 psys_flue_smoke_short = 13 psys_flue_smoke_tall = 14 psys_war_smoke_tall = 15 psys_ladder_dust_6m = 16 psys_ladder_dust_8m = 17 psys_ladder_dust_10m = 18 psys_ladder_dust_12m = 19 psys_ladder_dust_14m = 20 psys_ladder_straw_6m = 21 psys_ladder_straw_8m = 22 psys_ladder_straw_10m = 23 psys_ladder_straw_12m = 24 psys_ladder_straw_14m = 25 psys_torch_fire_sparks = 26 psys_fire_sparks_1 = 27 psys_pistol_smoke = 28 psys_brazier_fire_1 = 29 psys_cooking_fire_1 = 30 psys_cooking_smoke = 31 psys_food_steam = 32 psys_candle_light = 33 psys_candle_light_small = 34 psys_lamp_fire = 35 psys_dummy_smoke = 36 psys_dummy_smoke_big = 37 psys_gourd_smoke = 38 psys_gourd_piece_1 = 39 psys_gourd_piece_2 = 40 psys_fire_fly_1 = 41 psys_bug_fly_1 = 42 psys_moon_beam_1 = 43 psys_moon_beam_paricle_1 = 44 psys_night_smoke_1 = 45 psys_fireplace_fire_small = 46 psys_fireplace_fire_big = 47 psys_village_fire_big = 48 psys_village_fire_smoke_big = 49 psys_map_village_fire = 50 psys_map_village_fire_smoke = 51 psys_map_village_looted_smoke = 52 psys_dungeon_water_drops = 53 psys_wedding_rose = 54 psys_sea_foam_a = 55 psys_fall_leafs_a = 56 psys_desert_storm = 57 psys_blizzard = 58 psys_rain = 59 psys_oil = 60 psys_ship_shrapnel = 61 psys_lanse = 62 psys_lanse_straw = 63 psys_dummy_straw = 64 psys_dummy_straw_big = 65 psys_lanse_blood = 66 psys_blood_decapitation = 67
psys_game_rain = 0 psys_game_snow = 1 psys_game_blood = 2 psys_game_blood_2 = 3 psys_game_hoof_dust = 4 psys_game_hoof_dust_mud = 5 psys_game_water_splash_1 = 6 psys_game_water_splash_2 = 7 psys_game_water_splash_3 = 8 psys_torch_fire = 9 psys_fire_glow_1 = 10 psys_fire_glow_fixed = 11 psys_torch_smoke = 12 psys_flue_smoke_short = 13 psys_flue_smoke_tall = 14 psys_war_smoke_tall = 15 psys_ladder_dust_6m = 16 psys_ladder_dust_8m = 17 psys_ladder_dust_10m = 18 psys_ladder_dust_12m = 19 psys_ladder_dust_14m = 20 psys_ladder_straw_6m = 21 psys_ladder_straw_8m = 22 psys_ladder_straw_10m = 23 psys_ladder_straw_12m = 24 psys_ladder_straw_14m = 25 psys_torch_fire_sparks = 26 psys_fire_sparks_1 = 27 psys_pistol_smoke = 28 psys_brazier_fire_1 = 29 psys_cooking_fire_1 = 30 psys_cooking_smoke = 31 psys_food_steam = 32 psys_candle_light = 33 psys_candle_light_small = 34 psys_lamp_fire = 35 psys_dummy_smoke = 36 psys_dummy_smoke_big = 37 psys_gourd_smoke = 38 psys_gourd_piece_1 = 39 psys_gourd_piece_2 = 40 psys_fire_fly_1 = 41 psys_bug_fly_1 = 42 psys_moon_beam_1 = 43 psys_moon_beam_paricle_1 = 44 psys_night_smoke_1 = 45 psys_fireplace_fire_small = 46 psys_fireplace_fire_big = 47 psys_village_fire_big = 48 psys_village_fire_smoke_big = 49 psys_map_village_fire = 50 psys_map_village_fire_smoke = 51 psys_map_village_looted_smoke = 52 psys_dungeon_water_drops = 53 psys_wedding_rose = 54 psys_sea_foam_a = 55 psys_fall_leafs_a = 56 psys_desert_storm = 57 psys_blizzard = 58 psys_rain = 59 psys_oil = 60 psys_ship_shrapnel = 61 psys_lanse = 62 psys_lanse_straw = 63 psys_dummy_straw = 64 psys_dummy_straw_big = 65 psys_lanse_blood = 66 psys_blood_decapitation = 67
# config:utf-8 """ production settings file. """
""" production settings file. """
def isEven(num): num1 = num / 2 num2 = num // 2 if num1 == num2: return True else: return False # pi = (4/1) - (4/3) + (4/5) - (4/7) + (4/9) - (4/11) + (4/13) - (4/15) pi = 0.0 for index, i in enumerate(range(1, 100), start=1): thing = (4/((2 * index) - 1)) if isEven(index): pi -= thing else: pi += thing print(pi)
def is_even(num): num1 = num / 2 num2 = num // 2 if num1 == num2: return True else: return False pi = 0.0 for (index, i) in enumerate(range(1, 100), start=1): thing = 4 / (2 * index - 1) if is_even(index): pi -= thing else: pi += thing print(pi)