content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
#!/usr/bin/env python
DEF_TASKDB_CONF = {'timeout': 10.0, # seconds
'task_checkout_delay': 1.0, # seconds
'task_checkout_num_tries': 10}
class TASK_STATES(object):
QUEUED_NO_DEP = 'QUEUED_NO_DEP'
RUNNING = 'RUNNING'
FAILED = 'FAILED'
SUCCEEDED = 'SUCCEEDED'
CHECKPOINTED = 'CHECKPOINTED'
KILLED = 'KILLED'
DELETED = 'DELETED'
LIST_OF_TASK_STATES = [TASK_STATES.QUEUED_NO_DEP,
TASK_STATES.RUNNING,
TASK_STATES.FAILED,
TASK_STATES.SUCCEEDED,
TASK_STATES.CHECKPOINTED,
TASK_STATES.DELETED,
TASK_STATES.KILLED]
class TASK_LOG_ACTIONS(object):
ADDED = 'ADDED'
RAN = 'RAN'
RAN_FROM_CHECKPOINT = 'RAN_FROM_CHECKPOINT'
DELETED = 'DELETED'
RESET = 'RESET'
FAILED = 'FAILED'
SUCCEEDED = 'SUCCEEDED'
CHECKPOINTED = 'CHECKPOINTED'
KILLED = 'KILLED'
UPDATED = 'UPDATED'
CLEANED = 'CLEANED'
VALID_LOG_CHECKIN_ACTIONS = [TASK_LOG_ACTIONS.FAILED,
TASK_LOG_ACTIONS.SUCCEEDED,
TASK_LOG_ACTIONS.CHECKPOINTED,
TASK_LOG_ACTIONS.KILLED]
class TASKDB_STATES(object):
RUNNING = 'RUNNING'
PAUSED = 'PAUSED'
| def_taskdb_conf = {'timeout': 10.0, 'task_checkout_delay': 1.0, 'task_checkout_num_tries': 10}
class Task_States(object):
queued_no_dep = 'QUEUED_NO_DEP'
running = 'RUNNING'
failed = 'FAILED'
succeeded = 'SUCCEEDED'
checkpointed = 'CHECKPOINTED'
killed = 'KILLED'
deleted = 'DELETED'
list_of_task_states = [TASK_STATES.QUEUED_NO_DEP, TASK_STATES.RUNNING, TASK_STATES.FAILED, TASK_STATES.SUCCEEDED, TASK_STATES.CHECKPOINTED, TASK_STATES.DELETED, TASK_STATES.KILLED]
class Task_Log_Actions(object):
added = 'ADDED'
ran = 'RAN'
ran_from_checkpoint = 'RAN_FROM_CHECKPOINT'
deleted = 'DELETED'
reset = 'RESET'
failed = 'FAILED'
succeeded = 'SUCCEEDED'
checkpointed = 'CHECKPOINTED'
killed = 'KILLED'
updated = 'UPDATED'
cleaned = 'CLEANED'
valid_log_checkin_actions = [TASK_LOG_ACTIONS.FAILED, TASK_LOG_ACTIONS.SUCCEEDED, TASK_LOG_ACTIONS.CHECKPOINTED, TASK_LOG_ACTIONS.KILLED]
class Taskdb_States(object):
running = 'RUNNING'
paused = 'PAUSED' |
#!/usr/bin/env python3
#######################################################################################
# #
# Program purpose: Finds the first appearance of the substring 'not' and 'poor' #
# from a given string, if 'not' follows the 'poor', replace #
# the whole 'not'...'poor' substring with 'good'. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : October 11, 2019 #
# #
#######################################################################################
def get_user_string(mess: str):
is_valid = False
data = ''
while is_valid is False:
try:
data = input(mess)
if len(data) == 0:
raise ValueError('Please provide a string')
is_valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
return data
def process_string(main_data: str):
val = main_data.find('not')
new_data = ''
if val != -1:
temp = main_data.find('poor')
if temp != -1:
new_data = main_data[:val] + 'good' + main_data[temp + len('poor'):]
else:
temp = main_data.find('poor')
if temp != -1:
new_data = main_data[:temp] + 'good' + main_data[temp + len('poor'):]
if len(new_data) == 0:
return main_data
return new_data
if __name__ == "__main__":
main_str = get_user_string(mess='Enter some string: ')
proc_data = process_string(main_data=main_str)
print(f'Processed data: {proc_data}') | def get_user_string(mess: str):
is_valid = False
data = ''
while is_valid is False:
try:
data = input(mess)
if len(data) == 0:
raise value_error('Please provide a string')
is_valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
return data
def process_string(main_data: str):
val = main_data.find('not')
new_data = ''
if val != -1:
temp = main_data.find('poor')
if temp != -1:
new_data = main_data[:val] + 'good' + main_data[temp + len('poor'):]
else:
temp = main_data.find('poor')
if temp != -1:
new_data = main_data[:temp] + 'good' + main_data[temp + len('poor'):]
if len(new_data) == 0:
return main_data
return new_data
if __name__ == '__main__':
main_str = get_user_string(mess='Enter some string: ')
proc_data = process_string(main_data=main_str)
print(f'Processed data: {proc_data}') |
symbol1 = input()
symbol2 = input()
def return_characters(symbol1, symbol2):
symbol1 = ord(symbol1)
symbol2 = ord(symbol2)
result = []
for i in range(symbol1 + 1, symbol2):
char = chr(i)
result.append(char)
result = " ".join(result)
return result
print(return_characters(symbol1, symbol2))
| symbol1 = input()
symbol2 = input()
def return_characters(symbol1, symbol2):
symbol1 = ord(symbol1)
symbol2 = ord(symbol2)
result = []
for i in range(symbol1 + 1, symbol2):
char = chr(i)
result.append(char)
result = ' '.join(result)
return result
print(return_characters(symbol1, symbol2)) |
class Messages:
help = "How can I help you?"
hello = "Hello!"
welcome = "Welcome {name}! I'm VK-Reminder-Bot."
done = "I have set the reminder!"
updated = "I have updated the reminder!"
missed = "I didn't get that!"
get_title = "Please enter Reminder Title:"
get_time = "When should I remind you?"
time_retry = "Please enter a valid time:"
bad_time = "Can't set reminders in the past, Reminder discarded."
no_reminders = "You don't have any reminders!"
| class Messages:
help = 'How can I help you?'
hello = 'Hello!'
welcome = "Welcome {name}! I'm VK-Reminder-Bot."
done = 'I have set the reminder!'
updated = 'I have updated the reminder!'
missed = "I didn't get that!"
get_title = 'Please enter Reminder Title:'
get_time = 'When should I remind you?'
time_retry = 'Please enter a valid time:'
bad_time = "Can't set reminders in the past, Reminder discarded."
no_reminders = "You don't have any reminders!" |
class Solution:
def kthPalindrome(self, queries: List[int], intLength: int) -> List[int]:
start = pow(10, (intLength + 1) // 2 - 1)
end = pow(10, (intLength + 1) // 2)
mul = pow(10, intLength // 2)
def reverse(num: int) -> int:
res = 0
while num:
res = res * 10 + num % 10
num //= 10
return res
def getKthPalindrome(q: int) -> int:
prefix = start + q - 1
return prefix * mul + reverse(prefix // 10 if intLength & 1 else prefix)
return [-1 if start + q > end else getKthPalindrome(q)
for q in queries]
| class Solution:
def kth_palindrome(self, queries: List[int], intLength: int) -> List[int]:
start = pow(10, (intLength + 1) // 2 - 1)
end = pow(10, (intLength + 1) // 2)
mul = pow(10, intLength // 2)
def reverse(num: int) -> int:
res = 0
while num:
res = res * 10 + num % 10
num //= 10
return res
def get_kth_palindrome(q: int) -> int:
prefix = start + q - 1
return prefix * mul + reverse(prefix // 10 if intLength & 1 else prefix)
return [-1 if start + q > end else get_kth_palindrome(q) for q in queries] |
def checkMagazine(magazine, note):
if len(magazine) < len(note):
print("No")
# Program would not stop if not return
return None
note_dict = {}
for word in note:
if word not in note_dict:
note_dict[word] = 1
else:
note_dict[word] += 1
for word in magazine:
if word in note_dict:
note_dict[word] = max(0, note_dict[word]-1)
print(["No", "Yes"][int(sum(note_dict.values())==0)]) | def check_magazine(magazine, note):
if len(magazine) < len(note):
print('No')
return None
note_dict = {}
for word in note:
if word not in note_dict:
note_dict[word] = 1
else:
note_dict[word] += 1
for word in magazine:
if word in note_dict:
note_dict[word] = max(0, note_dict[word] - 1)
print(['No', 'Yes'][int(sum(note_dict.values()) == 0)]) |
#!/usr/bin/env python
# definitions of packets that go from the App to Artoo or App to Solo and vice versa.
# All packets are of the form (in little endian)
# 32-bit type identifier
# 32-bit length
# n bytes value
# https://docs.google.com/a/3drobotics.com/document/d/1rA1zs3T7X1n9ip9YMGZEcLCW6Mx1RR1bNlh9gF0i8nM/edit#heading=h.tcfcw63p9sfk
# packet type definitions
# Solo-App messages
# NOTE: Make sure this stays in sync with the app's definitions! Those are in iSolo/networking/SoloPacket.swift
SOLO_MESSAGE_HEADER_LENGTH = 8
# Sends Solo's current shot to the app
SOLO_MESSAGE_GET_CURRENT_SHOT = 0
SOLO_MESSAGE_SET_CURRENT_SHOT = 1
# send a location
SOLO_MESSAGE_LOCATION = 2
# record a position (for cable cam)
SOLO_RECORD_POSITION = 3
SOLO_CABLE_CAM_OPTIONS = 4
SOLO_MESSAGE_GET_BUTTON_SETTING = 5
SOLO_MESSAGE_SET_BUTTON_SETTING = 6
SOLO_PAUSE = 7
SOLO_FOLLOW_OPTIONS = 19
SOLO_FOLLOW_OPTIONS_V2 = 119
SOLO_SHOT_OPTIONS = 20
SOLO_SHOT_ERROR = 21
SOLO_PANO_OPTIONS = 22
SOLO_ZIPLINE_OPTIONS = 23
SOLO_REWIND_OPTIONS = 24
SOLO_PANO_STATE = 25
SOLO_HOME_LOCATION = 26
SOLO_POWER_STATE = 27
SOLO_ZIPLINE_LOCK = 28
SOLO_SPLINE_RECORD = 50
SOLO_SPLINE_PLAY = 51
SOLO_SPLINE_POINT = 52
SOLO_SPLINE_SEEK = 53
SOLO_SPLINE_PLAYBACK_STATUS = 54
SOLO_SPLINE_PATH_SETTINGS = 55
SOLO_SPLINE_DURATIONS = 56
SOLO_SPLINE_ATTACH = 57
# Artoo-App messages start at 100
# Shot manager to app messages start at 1000
SOLO_MESSAGE_SHOTMANAGER_ERROR = 1000
# recorded waypoint contents
SOLO_CABLE_CAM_WAYPOINT = 1001
# IG shots.
## IG Inspect - app to shotmanager
SOLO_INSPECT_START = 10001
SOLO_INSPECT_SET_WAYPOINT = 10002
SOLO_INSPECT_MOVE_GIMBAL = 10003
SOLO_INSPECT_MOVE_VEHICLE = 10004
## IG Scan
SOLO_SCAN_START = 10101
## IG Survey
SOLO_SURVEY_START = 10201
# Geo Fence
GEOFENCE_SET_DATA = 3000
GEOFENCE_SET_ACK = 3001
GEOFENCE_UPDATE_POLY = 3002
GEOFENCE_CLEAR = 3003
GEOFENCE_ACTIVATED = 3004
# Gopro control messages
GOPRO_SET_ENABLED = 5000
GOPRO_SET_REQUEST = 5001
GOPRO_RECORD = 5003
GOPRO_V1_STATE = 5005
GOPRO_V2_STATE = 5006
GOPRO_REQUEST_STATE = 5007
GOPRO_SET_EXTENDED_REQUEST = 5009
GOPRO_PHOTO = 5020 # Added to Open Solo for solex app photo logging
# enums for packet types
# failure to enter a shot due to poor ekf
SHOT_ERROR_BAD_EKF = 0
# can't enter shot if we're not armed
SHOT_ERROR_UNARMED = 1
#can't enter shot if we're RTL
SHOT_ERROR_RTL = 2
# status error codes for spline point message
SPLINE_ERROR_NONE = 0
SPLINE_ERROR_MODE = -1
SPLINE_ERROR_DUPLICATE = -2 | solo_message_header_length = 8
solo_message_get_current_shot = 0
solo_message_set_current_shot = 1
solo_message_location = 2
solo_record_position = 3
solo_cable_cam_options = 4
solo_message_get_button_setting = 5
solo_message_set_button_setting = 6
solo_pause = 7
solo_follow_options = 19
solo_follow_options_v2 = 119
solo_shot_options = 20
solo_shot_error = 21
solo_pano_options = 22
solo_zipline_options = 23
solo_rewind_options = 24
solo_pano_state = 25
solo_home_location = 26
solo_power_state = 27
solo_zipline_lock = 28
solo_spline_record = 50
solo_spline_play = 51
solo_spline_point = 52
solo_spline_seek = 53
solo_spline_playback_status = 54
solo_spline_path_settings = 55
solo_spline_durations = 56
solo_spline_attach = 57
solo_message_shotmanager_error = 1000
solo_cable_cam_waypoint = 1001
solo_inspect_start = 10001
solo_inspect_set_waypoint = 10002
solo_inspect_move_gimbal = 10003
solo_inspect_move_vehicle = 10004
solo_scan_start = 10101
solo_survey_start = 10201
geofence_set_data = 3000
geofence_set_ack = 3001
geofence_update_poly = 3002
geofence_clear = 3003
geofence_activated = 3004
gopro_set_enabled = 5000
gopro_set_request = 5001
gopro_record = 5003
gopro_v1_state = 5005
gopro_v2_state = 5006
gopro_request_state = 5007
gopro_set_extended_request = 5009
gopro_photo = 5020
shot_error_bad_ekf = 0
shot_error_unarmed = 1
shot_error_rtl = 2
spline_error_none = 0
spline_error_mode = -1
spline_error_duplicate = -2 |
tempo_em_segundo = int(input())
horas = tempo_em_segundo//3600
tempo_em_segundo -= horas*3600
minutos = tempo_em_segundo//60
segundos = tempo_em_segundo - minutos*60
print(f"{horas}:{minutos}:{segundos}") | tempo_em_segundo = int(input())
horas = tempo_em_segundo // 3600
tempo_em_segundo -= horas * 3600
minutos = tempo_em_segundo // 60
segundos = tempo_em_segundo - minutos * 60
print(f'{horas}:{minutos}:{segundos}') |
#!/usr/bin/env python
def rev(stack):
return stack[::-1]
def cut(stack, n):
return stack[n:] + stack[:n]
def incr(stack, n):
size = len(stack)
new_stack = [-1] * size
i = 0
for a in range(size):
new_stack[i % size] = stack[a]
i += n
return new_stack
def solve(inp, size):
steps = [line.split(' ') for line in inp.strip().splitlines()]
stack = list(range(size))
for techn in steps:
if techn[1] == 'into':
stack = rev(stack)
if techn[0] == 'cut':
stack = cut(stack, int(techn[1]))
if techn[1] == 'with':
stack = incr(stack, int(techn[-1]))
return stack
# with open('test.txt', 'r') as f:
# inp = f.read()
# print(solve(inp, 10))
with open('input.txt', 'r') as f:
inp = f.read()
print(solve(inp, 10007).index(2019))
| def rev(stack):
return stack[::-1]
def cut(stack, n):
return stack[n:] + stack[:n]
def incr(stack, n):
size = len(stack)
new_stack = [-1] * size
i = 0
for a in range(size):
new_stack[i % size] = stack[a]
i += n
return new_stack
def solve(inp, size):
steps = [line.split(' ') for line in inp.strip().splitlines()]
stack = list(range(size))
for techn in steps:
if techn[1] == 'into':
stack = rev(stack)
if techn[0] == 'cut':
stack = cut(stack, int(techn[1]))
if techn[1] == 'with':
stack = incr(stack, int(techn[-1]))
return stack
with open('input.txt', 'r') as f:
inp = f.read()
print(solve(inp, 10007).index(2019)) |
class ParseError(Exception):
pass
class UnsupportedFile(Exception):
pass
class MultipleParentsGFF(UnsupportedFile):
pass
| class Parseerror(Exception):
pass
class Unsupportedfile(Exception):
pass
class Multipleparentsgff(UnsupportedFile):
pass |
mariadb = dict(
ip_address = 'localhost',
port = 3307,
user = 'root',
password = 'password',
db = 'cego',
users_table = 'users'
)
test = dict(
query = 'SELECT id, firstName, lastName, email FROM users',
filename = 'Test.txt'
) | mariadb = dict(ip_address='localhost', port=3307, user='root', password='password', db='cego', users_table='users')
test = dict(query='SELECT id, firstName, lastName, email FROM users', filename='Test.txt') |
class Graph:
def __init__ (self, adj = None):
''' Creates new graph from adjacency list. '''
if adj is None:
adj = []
self.adj = adj
def GetEdges (self):
''' Returns list of the graph's edges. '''
edges = []
for vertex in self.adj:
for edge in self.adj [vertex]:
if {edge, vertex} not in edges:
edges.append ({vertex, edge})
return edges
def AddEdge (self, edge):
''' Adds edge to adj. dict if not present. '''
edge = set (edge)
(vertexOne, vertexTwo) = tuple (edge)
if vertexOne in self.adj:
self.adj [vertexOne].append (vertexTwo)
else:
self.adj [vertexOne] = [vertexTwo]
def GetVertices (self):
''' Returns list of the graph's vertices. '''
return list (self.adj.keys ())
def AddVertex (self, vertex):
''' Adds vertex to adjacency dict as key. '''
if vertex not in self.adj:
self.adj [vertex] = []
| class Graph:
def __init__(self, adj=None):
""" Creates new graph from adjacency list. """
if adj is None:
adj = []
self.adj = adj
def get_edges(self):
""" Returns list of the graph's edges. """
edges = []
for vertex in self.adj:
for edge in self.adj[vertex]:
if {edge, vertex} not in edges:
edges.append({vertex, edge})
return edges
def add_edge(self, edge):
""" Adds edge to adj. dict if not present. """
edge = set(edge)
(vertex_one, vertex_two) = tuple(edge)
if vertexOne in self.adj:
self.adj[vertexOne].append(vertexTwo)
else:
self.adj[vertexOne] = [vertexTwo]
def get_vertices(self):
""" Returns list of the graph's vertices. """
return list(self.adj.keys())
def add_vertex(self, vertex):
""" Adds vertex to adjacency dict as key. """
if vertex not in self.adj:
self.adj[vertex] = [] |
def philosophy(statement):
def thought():
return statement
return thought
question = philosophy('To B, or not to B. It depends where the bomb is.')
print(question())
| def philosophy(statement):
def thought():
return statement
return thought
question = philosophy('To B, or not to B. It depends where the bomb is.')
print(question()) |
pass
# import os
# from unittest.mock import MagicMock, patch
# import pytest
# from JumpscaleZrobot.test.utils import ZrobotBaseTest, mock_decorator
# from node_port_manager import NODE_CLIENT, NodePortManager
# from zerorobot.template.state import StateCheckError
# import itertools
# class TestNodePortManagerTemplate(ZrobotBaseTest):
# @classmethod
# def setUpClass(cls):
# super().preTest(os.path.dirname(__file__), NodePortManager)
# def setUp(self):
# patch('jumpscale.j.clients', MagicMock()).start()
# def tearDown(self):
# patch.stopall()
# def test_reserve(self):
# node_sal = MagicMock()
# def freeports(nrports=1):
# import itertools
# i = 0
# def f():
# while True:
# yield i
# i += 1
# iter = f()
# return list(itertools.islice(iter, nrports))
# node_sal.freeports = freeports
# # get_node = patch('jumpscale.j.clients.zos.get', MagicMock(return_value=node_sal)).start()
# mgr = NodePortManager(name="name")
| pass |
test = { 'name': 'q41',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> # Oops, your name is assigned to the wrong data type!;\n'
'>>> type(year_population_crossed_6_billion) == int or type(year_population_crossed_6_billion) == np.int32\n'
'True',
'hidden': False,
'locked': False},
{'code': '>>> year_population_crossed_6_billion == 1999\nTrue', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| test = {'name': 'q41', 'points': 1, 'suites': [{'cases': [{'code': '>>> # Oops, your name is assigned to the wrong data type!;\n>>> type(year_population_crossed_6_billion) == int or type(year_population_crossed_6_billion) == np.int32\nTrue', 'hidden': False, 'locked': False}, {'code': '>>> year_population_crossed_6_billion == 1999\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]} |
class UrlConstructor:
def __init__(self, key='', base_url='https://androzoo.uni.lu/api/download?apikey={0}&sha256={01}'):
self.base_url = base_url
self.key = key
def construct(self, apk):
return self.base_url.format(self.key, apk.sha256)
| class Urlconstructor:
def __init__(self, key='', base_url='https://androzoo.uni.lu/api/download?apikey={0}&sha256={01}'):
self.base_url = base_url
self.key = key
def construct(self, apk):
return self.base_url.format(self.key, apk.sha256) |
class Node:
def __init__(self,data=None,next=None,position = 0):
self.data = data
self.next = next
self.position = position
class LinkedList:
def __init__(self) -> None:
self.head = None # Initialising the head as None
def insetElement(self,data):
newNode = Node(data) # Creates a new node
if self.head:
current = self.head
while current.next:
current = current.next
current.next = newNode
else: self.head = newNode
def size(self):
count = 0
current = self.head
while current != None:
count += 1
current = current.next
return count
def addLast(self,data):
new_node = Node(data)
# if our nexted list is empty we create a new node
if self.head is None:
self.head = new_node
return
# if our nexted list is not empty we traverse and insert at last
last = self.head
while (last.next):
last = last.next
last.next = new_node
def addFirst(self,data):
# Create a new node with the data
newNode = Node(data)
# Swap our head as new node and rest of the element as next
newNode.next,self.head = self.head,newNode
def getFirst(self):
if self.size() == 0: return 'No element in list'
# As we know the first data is head so we just returning the head of our nexted list
return self.head.data
def getLast(self):
if self.size() == 0: return 'No element in list'
return self.display()[-1]
def fetch(self,index):
current = self.head
count = 0
if self.size() == 0: return 'No element in list'
# traversing the node and if our count matches the index then we return the data
while current:
if count == index:
return current.data
count += 1
current = current.next
return 'List Index outbound'
'''
# Method 1 using while loop
def fropple(self):
current = self.head
while current and current.next:
if current.data != current.next.data:
current.data,current.next.data = current.next.data,current.data
current = current.next.next
return current.data
'''
''' Method 2
def swapNodes(self):
cur = self.head
while cur and cur.next:
cur.data, cur.next.data = cur.next.data, cur.data
cur = cur.next.next
return head
'''
def swapElement(self):
current = self.head
if self.size() == 0: return 'No element in the list'
for i in range(self.size()):
if i % 2 == 0:
current.data,current.next.data = current.next.data,current.data
return current.data
def appendLinkedList(self,newList):
current = self.head
# if our head is null so we assign the head as new list
if current is None: current = newList
# dummy head
last = self.head
while last.next != None: last = last.next
# adding the new list at last using addLast method
last.next = self.addLast(Node(newList))
'''
def mergeAlternate(self, q):
p_curr = self.head
q_curr = q.head
# While there are available positions in p;
while p_curr != None and q_curr != None:
# Save next pointers
p_next = p_curr.next
q_next = q_curr.next
# make q_curr as next of p_curr
q_curr.next = p_next # change next pointer of q_curr
p_curr.next = q_curr # change next pointer of p_curr
# update current pointers for next iteration
p_curr = p_next
q_curr = q_next
q.head = q_curr
'''
def reverse(self):
prev = None
current = self.head
while current is not None:
nextElement = current.next
current.next = prev
prev = current
current = nextElement
self.head = prev
def sortList(self):
swap = 0
current = self.head
if current != None:
while(1):
swap = 0
tmp = current
while(tmp.next != None):
if tmp.data > tmp.next.data:
# swap them
swap += 1
p = tmp.data
tmp.data = tmp.next.data
tmp.next.data = p
tmp = tmp.next
else:
tmp = tmp.next
if swap == 0:
break
else:
continue
return current
return current
def index(self,item):
current = self.head
while current != None:
if current.data == item:
return current.position
current = current.next
# return
def InsertNth(self, data, position):
start = self.head
if position == 0:
return Node(data, self.head)
while position > 1:
self.head = self.head.next
position -= 1
self.head.next = Node(data, self.head.next)
return start
def insertElements(self,newData):
current = self.head
# if the data not in linked list add at first
if newData != current.data: self.addFirst(newData)
while current != None:
if current.data == newData:
self.InsertNth(newData,self.index(current.data)+1)
current = current.next
# return self.sortList()
# Method to display the list
def display(self):
if self.size() == 0: return 'No element in list'
output = []
current = self.head
while(current):
output.append(current.data)
# print(current.data)
current = current.next
return output | class Node:
def __init__(self, data=None, next=None, position=0):
self.data = data
self.next = next
self.position = position
class Linkedlist:
def __init__(self) -> None:
self.head = None
def inset_element(self, data):
new_node = node(data)
if self.head:
current = self.head
while current.next:
current = current.next
current.next = newNode
else:
self.head = newNode
def size(self):
count = 0
current = self.head
while current != None:
count += 1
current = current.next
return count
def add_last(self, data):
new_node = node(data)
if self.head is None:
self.head = new_node
return
last = self.head
while last.next:
last = last.next
last.next = new_node
def add_first(self, data):
new_node = node(data)
(newNode.next, self.head) = (self.head, newNode)
def get_first(self):
if self.size() == 0:
return 'No element in list'
return self.head.data
def get_last(self):
if self.size() == 0:
return 'No element in list'
return self.display()[-1]
def fetch(self, index):
current = self.head
count = 0
if self.size() == 0:
return 'No element in list'
while current:
if count == index:
return current.data
count += 1
current = current.next
return 'List Index outbound'
'\n # Method 1 using while loop\n def fropple(self):\n current = self.head\n while current and current.next:\n if current.data != current.next.data:\n current.data,current.next.data = current.next.data,current.data\n current = current.next.next\n return current.data\n '
' Method 2 \n def swapNodes(self):\n cur = self.head\n while cur and cur.next:\n cur.data, cur.next.data = cur.next.data, cur.data\n cur = cur.next.next\n \n return head\n '
def swap_element(self):
current = self.head
if self.size() == 0:
return 'No element in the list'
for i in range(self.size()):
if i % 2 == 0:
(current.data, current.next.data) = (current.next.data, current.data)
return current.data
def append_linked_list(self, newList):
current = self.head
if current is None:
current = newList
last = self.head
while last.next != None:
last = last.next
last.next = self.addLast(node(newList))
'\n def mergeAlternate(self, q):\n p_curr = self.head\n q_curr = q.head\n \n # While there are available positions in p;\n while p_curr != None and q_curr != None:\n \n # Save next pointers\n p_next = p_curr.next\n q_next = q_curr.next\n \n # make q_curr as next of p_curr\n q_curr.next = p_next # change next pointer of q_curr\n p_curr.next = q_curr # change next pointer of p_curr\n \n # update current pointers for next iteration\n p_curr = p_next\n q_curr = q_next\n q.head = q_curr\n '
def reverse(self):
prev = None
current = self.head
while current is not None:
next_element = current.next
current.next = prev
prev = current
current = nextElement
self.head = prev
def sort_list(self):
swap = 0
current = self.head
if current != None:
while 1:
swap = 0
tmp = current
while tmp.next != None:
if tmp.data > tmp.next.data:
swap += 1
p = tmp.data
tmp.data = tmp.next.data
tmp.next.data = p
tmp = tmp.next
else:
tmp = tmp.next
if swap == 0:
break
else:
continue
return current
return current
def index(self, item):
current = self.head
while current != None:
if current.data == item:
return current.position
current = current.next
def insert_nth(self, data, position):
start = self.head
if position == 0:
return node(data, self.head)
while position > 1:
self.head = self.head.next
position -= 1
self.head.next = node(data, self.head.next)
return start
def insert_elements(self, newData):
current = self.head
if newData != current.data:
self.addFirst(newData)
while current != None:
if current.data == newData:
self.InsertNth(newData, self.index(current.data) + 1)
current = current.next
def display(self):
if self.size() == 0:
return 'No element in list'
output = []
current = self.head
while current:
output.append(current.data)
current = current.next
return output |
def solution(A):
count = []
len_a = len(A)
for i in range(len_a):
sub_count = 0
for j in range(len_a):
if i != j and A[i] % A[j] != 0:
sub_count += 1
count.append(sub_count)
return count
print(solution([3, 1, 2, 3, 6]))
| def solution(A):
count = []
len_a = len(A)
for i in range(len_a):
sub_count = 0
for j in range(len_a):
if i != j and A[i] % A[j] != 0:
sub_count += 1
count.append(sub_count)
return count
print(solution([3, 1, 2, 3, 6])) |
# def isPrime(number):
# counter = 2
# isPrime = True
#
# while counter < number:
# if number % counter == 0:
# isPrime = False
# break
#
# counter = counter + 1
#
# return isPrime
# function isPrime
def isPrime(number):
counter = 2
while counter < number:
if number % counter == 0:
return False
counter = counter + 1
return True
# main Code
print("Give me a number?")
inputNum = int(input())
if isPrime(inputNum):
print("It's a prime")
else:
print("It's not a prime")
| def is_prime(number):
counter = 2
while counter < number:
if number % counter == 0:
return False
counter = counter + 1
return True
print('Give me a number?')
input_num = int(input())
if is_prime(inputNum):
print("It's a prime")
else:
print("It's not a prime") |
i = 0
num = int(input("Enter your number:- "))
while i <= num:
if num > 0:
print("it is positive")
elif num < 0:
print("it is negative")
else :
print("zero")
i = i + 1
| i = 0
num = int(input('Enter your number:- '))
while i <= num:
if num > 0:
print('it is positive')
elif num < 0:
print('it is negative')
else:
print('zero')
i = i + 1 |
DATA = {
"B01003_001E": "Total Population",
"B01002_001E": "Median Age",
"B11005_001E": "Total Households Age",
"B11005_002E": "Total Households With Under 18",
# household income
"B19013_001E": "Median Household Income",
"B19001_001E": "Total Households Income",
"B19001_002E": "Household Income $0 - $10,000",
"B19001_003E": "Household Income $10,000 - $14,999",
"B19001_004E": "Household Income $15,000 - $19,999",
"B19001_005E": "Household Income $20,000 - $24,999",
"B19001_006E": "Household Income $25,000 - $29,999",
"B19001_007E": "Household Income $30,000 - $34,999",
"B19001_008E": "Household Income $35,000 - $39,999",
"B19001_009E": "Household Income $40,000 - $44,999",
"B19001_010E": "Household Income $45,000 - $49,999",
"B19001_011E": "Household Income $50,000 - $59,999",
"B19001_012E": "Household Income $60,000 - $74,999",
"B19001_013E": "Household Income $75,000 - $99,999",
"B19001_014E": "Household Income $100,000 - $124,999",
"B19001_015E": "Household Income $125,000 - $149,999",
"B19001_016E": "Household Income $150,000 - $199,999",
"B19001_017E": "Household Income $200,000+",
# population by age
"B01001_001E": "Total",
"B01001_002E": "Male",
"B01001_003E": "Male - Under 5 years",
"B01001_004E": "Male - 5 to 9 years",
"B01001_005E": "Male - 10 to 14 years",
"B01001_006E": "Male - 15 to 17 years",
"B01001_007E": "Male - 18 and 19 years",
"B01001_008E": "Male - 20 years",
"B01001_009E": "Male - 21 years",
"B01001_010E": "Male - 22 to 24 years",
"B01001_011E": "Male - 25 to 29 years",
"B01001_012E": "Male - 30 to 34 years",
"B01001_013E": "Male - 35 to 39 years",
"B01001_014E": "Male - 40 to 44 years",
"B01001_015E": "Male - 45 to 49 years",
"B01001_016E": "Male - 50 to 54 years",
"B01001_017E": "Male - 55 to 59 years",
"B01001_018E": "Male - 60 and 61 years",
"B01001_019E": "Male - 62 to 64 years",
"B01001_020E": "Male - 65 and 66 years",
"B01001_021E": "Male - 67 to 69 years",
"B01001_022E": "Male - 70 to 74 years",
"B01001_023E": "Male - 75 to 79 years",
"B01001_024E": "Male - 80 to 84 years",
"B01001_025E": "Male - 85+ years",
"B01001_026E": "Female",
"B01001_027E": "Female - Under 5 years",
"B01001_028E": "Female - 5 to 9 years",
"B01001_029E": "Female - 10 to 14 years",
"B01001_030E": "Female - 15 to 17 years",
"B01001_031E": "Female - 18 and 19 years",
"B01001_032E": "Female - 20 years",
"B01001_033E": "Female - 21 years",
"B01001_034E": "Female - 22 to 24 years",
"B01001_035E": "Female - 25 to 29 years",
"B01001_036E": "Female - 30 to 34 years",
"B01001_037E": "Female - 35 to 39 years",
"B01001_038E": "Female - 40 to 44 years",
"B01001_039E": "Female - 45 to 49 years",
"B01001_040E": "Female - 50 to 54 years",
"B01001_041E": "Female - 55 to 59 years",
"B01001_042E": "Female - 60 and 61 years",
"B01001_043E": "Female - 62 to 64 years",
"B01001_044E": "Female - 65 and 66 years",
"B01001_045E": "Female - 67 to 69 years",
"B01001_046E": "Female - 70 to 74 years",
"B01001_047E": "Female - 75 to 79 years",
"B01001_048E": "Female - 80 to 84 years",
"B01001_049E": "Female - 85+ years",
# ethnicity distribution
"B04003_001E": "Ethnicity Total",
"B04003_002E": "Afghan",
"B04003_003E": "Albanian",
"B04003_004E": "Alsatian",
"B04003_005E": "American",
"B04003_006E": "Arab",
"B04003_007E": "Arab - Egyptian",
"B04003_008E": "Arab - Iraqi",
"B04003_009E": "Arab - Jordanian",
"B04003_010E": "Arab - Lebanese",
"B04003_011E": "Arab - Moroccan",
"B04003_012E": "Arab - Palestinian",
"B04003_013E": "Arab - Syrian",
"B04003_014E": "Arab - Arab",
"B04003_015E": "Arab - Other Arab",
"B04003_016E": "Armenian",
"B04003_017E": "Assyrian/Chaldean/Syriac",
"B04003_018E": "Australian",
"B04003_019E": "Austrian",
"B04003_020E": "Basque",
"B04003_021E": "Belgian",
"B04003_022E": "Brazilian",
"B04003_023E": "British",
"B04003_024E": "Bulgarian",
"B04003_025E": "Cajun",
"B04003_026E": "Canadian",
"B04003_027E": "Carpatho Rusyn",
"B04003_028E": "Celtic",
"B04003_029E": "Croatian",
"B04003_030E": "Cypriot",
"B04003_031E": "Czech",
"B04003_032E": "Czechoslovakian",
"B04003_033E": "Danish",
"B04003_034E": "Dutch",
"B04003_035E": "Eastern European",
"B04003_036E": "English",
"B04003_037E": "Estonian",
"B04003_038E": "European",
"B04003_039E": "Finnish",
"B04003_040E": "French (except Basque)",
"B04003_041E": "French Canadian",
"B04003_042E": "German",
"B04003_043E": "German Russian",
"B04003_044E": "Greek",
"B04003_045E": "Guyanese",
"B04003_046E": "Hungarian",
"B04003_047E": "Icelander",
"B04003_048E": "Iranian",
"B04003_049E": "Irish",
"B04003_050E": "Israeli",
"B04003_051E": "Italian",
"B04003_052E": "Latvian",
"B04003_053E": "Lithuanian",
"B04003_054E": "Luxemburger",
"B04003_055E": "Macedonian",
"B04003_056E": "Maltese",
"B04003_057E": "New Zealander",
"B04003_058E": "Northern European",
"B04003_059E": "Norwegian",
"B04003_060E": "Pennsylvania German",
"B04003_061E": "Polish",
"B04003_062E": "Portuguese",
"B04003_063E": "Romanian",
"B04003_064E": "Russian",
"B04003_065E": "Scandinavian",
"B04003_066E": "Scotch-Irish",
"B04003_067E": "Scottish",
"B04003_068E": "Serbian",
"B04003_069E": "Slavic",
"B04003_070E": "Slovak",
"B04003_071E": "Slovene",
"B04003_072E": "Soviet Union",
"B04003_073E": "Subsaharan African",
"B04003_074E": "Subsaharan African - Cape Verdean",
"B04003_075E": "Subsaharan African - Ethiopian",
"B04003_076E": "Subsaharan African - Ghanaian",
"B04003_077E": "Subsaharan African - Kenyan",
"B04003_078E": "Subsaharan African - Liberian",
"B04003_079E": "Subsaharan African - Nigerian",
"B04003_080E": "Subsaharan African - Senegalese",
"B04003_081E": "Subsaharan African - Sierra Leonean",
"B04003_082E": "Subsaharan African - Somalian",
"B04003_083E": "Subsaharan African - South African",
"B04003_084E": "Subsaharan African - Sudanese",
"B04003_085E": "Subsaharan African - Ugandan",
"B04003_086E": "Subsaharan African - Zimbabwean",
"B04003_087E": "Subsaharan African - African",
"B04003_088E": "Subsaharan African - Other Subsaharan African",
"B04003_089E": "Swedish",
"B04003_090E": "Swiss",
"B04003_091E": "Turkish",
"B04003_092E": "Ukrainian",
"B04003_093E": "Welsh",
"B04003_094E": "West Indian",
"B04003_095E": "West Indian - Bahamian",
"B04003_096E": "West Indian - Barbadian",
"B04003_097E": "West Indian - Belizean",
"B04003_098E": "West Indian - Bermudan",
"B04003_099E": "West Indian - British West Indian",
"B04003_100E": "West Indian - Dutch West Indian",
"B04003_101E": "West Indian - Haitian",
"B04003_102E": "West Indian - Jamaican",
"B04003_103E": "West Indian - Trinidadian and Tobagonian",
"B04003_104E": "West Indian - U.S. Virgin Islander",
"B04003_105E": "West Indian - West Indian",
"B04003_106E": "West Indian - Other West Indian",
"B04003_107E": "Yugoslavian",
"B04003_108E": "Other groups",
#
# new as of 2012-12-03
#
# rent
"B25058_001E": "Median contract rent",
"B25064_001E": "Median gross rent",
# owning
"B25077_001E": "Median value (dollars)",
# own v rent
#"B25003_001E": "Total",
"B25003_002E": "Owner occupied",
"B25003_003E": "Renter occupied",
#
# new as of 2012-12-05
#
# means of transportation to work
"B08301_001E": "Total",
"B08301_002E": "Car, truck, or van", ####
"B08301_003E": "Drove alone",
"B08301_004E": "Carpooled",
"B08301_005E": "In 2-person carpool",
"B08301_006E": "In 3-person carpool",
"B08301_007E": "In 4-person carpool",
"B08301_008E": "In 5- or 6-person carpool",
"B08301_009E": "In 7-or-more-person carpool",
"B08301_010E": "Public transportation (excluding taxicab)", ####
"B08301_011E": "Bus or trolley bus",
"B08301_012E": "Streetcar or trolley car (carro publico in Puerto Rico)",
"B08301_013E": "Subway or elevated",
"B08301_014E": "Railroad",
"B08301_015E": "Ferryboat",
"B08301_016E": "Taxicab", ####
"B08301_017E": "Motorcycle", ####
"B08301_018E": "Bicycle", ####
"B08301_019E": "Walked", ####
"B08301_020E": "Other means", ####
"B08301_021E": "Worked at home", ####
#
# new as of 2012-12-11
#
"B25035_001E": "Median year structure built",
}
| data = {'B01003_001E': 'Total Population', 'B01002_001E': 'Median Age', 'B11005_001E': 'Total Households Age', 'B11005_002E': 'Total Households With Under 18', 'B19013_001E': 'Median Household Income', 'B19001_001E': 'Total Households Income', 'B19001_002E': 'Household Income $0 - $10,000', 'B19001_003E': 'Household Income $10,000 - $14,999', 'B19001_004E': 'Household Income $15,000 - $19,999', 'B19001_005E': 'Household Income $20,000 - $24,999', 'B19001_006E': 'Household Income $25,000 - $29,999', 'B19001_007E': 'Household Income $30,000 - $34,999', 'B19001_008E': 'Household Income $35,000 - $39,999', 'B19001_009E': 'Household Income $40,000 - $44,999', 'B19001_010E': 'Household Income $45,000 - $49,999', 'B19001_011E': 'Household Income $50,000 - $59,999', 'B19001_012E': 'Household Income $60,000 - $74,999', 'B19001_013E': 'Household Income $75,000 - $99,999', 'B19001_014E': 'Household Income $100,000 - $124,999', 'B19001_015E': 'Household Income $125,000 - $149,999', 'B19001_016E': 'Household Income $150,000 - $199,999', 'B19001_017E': 'Household Income $200,000+', 'B01001_001E': 'Total', 'B01001_002E': 'Male', 'B01001_003E': 'Male - Under 5 years', 'B01001_004E': 'Male - 5 to 9 years', 'B01001_005E': 'Male - 10 to 14 years', 'B01001_006E': 'Male - 15 to 17 years', 'B01001_007E': 'Male - 18 and 19 years', 'B01001_008E': 'Male - 20 years', 'B01001_009E': 'Male - 21 years', 'B01001_010E': 'Male - 22 to 24 years', 'B01001_011E': 'Male - 25 to 29 years', 'B01001_012E': 'Male - 30 to 34 years', 'B01001_013E': 'Male - 35 to 39 years', 'B01001_014E': 'Male - 40 to 44 years', 'B01001_015E': 'Male - 45 to 49 years', 'B01001_016E': 'Male - 50 to 54 years', 'B01001_017E': 'Male - 55 to 59 years', 'B01001_018E': 'Male - 60 and 61 years', 'B01001_019E': 'Male - 62 to 64 years', 'B01001_020E': 'Male - 65 and 66 years', 'B01001_021E': 'Male - 67 to 69 years', 'B01001_022E': 'Male - 70 to 74 years', 'B01001_023E': 'Male - 75 to 79 years', 'B01001_024E': 'Male - 80 to 84 years', 'B01001_025E': 'Male - 85+ years', 'B01001_026E': 'Female', 'B01001_027E': 'Female - Under 5 years', 'B01001_028E': 'Female - 5 to 9 years', 'B01001_029E': 'Female - 10 to 14 years', 'B01001_030E': 'Female - 15 to 17 years', 'B01001_031E': 'Female - 18 and 19 years', 'B01001_032E': 'Female - 20 years', 'B01001_033E': 'Female - 21 years', 'B01001_034E': 'Female - 22 to 24 years', 'B01001_035E': 'Female - 25 to 29 years', 'B01001_036E': 'Female - 30 to 34 years', 'B01001_037E': 'Female - 35 to 39 years', 'B01001_038E': 'Female - 40 to 44 years', 'B01001_039E': 'Female - 45 to 49 years', 'B01001_040E': 'Female - 50 to 54 years', 'B01001_041E': 'Female - 55 to 59 years', 'B01001_042E': 'Female - 60 and 61 years', 'B01001_043E': 'Female - 62 to 64 years', 'B01001_044E': 'Female - 65 and 66 years', 'B01001_045E': 'Female - 67 to 69 years', 'B01001_046E': 'Female - 70 to 74 years', 'B01001_047E': 'Female - 75 to 79 years', 'B01001_048E': 'Female - 80 to 84 years', 'B01001_049E': 'Female - 85+ years', 'B04003_001E': 'Ethnicity Total', 'B04003_002E': 'Afghan', 'B04003_003E': 'Albanian', 'B04003_004E': 'Alsatian', 'B04003_005E': 'American', 'B04003_006E': 'Arab', 'B04003_007E': 'Arab - Egyptian', 'B04003_008E': 'Arab - Iraqi', 'B04003_009E': 'Arab - Jordanian', 'B04003_010E': 'Arab - Lebanese', 'B04003_011E': 'Arab - Moroccan', 'B04003_012E': 'Arab - Palestinian', 'B04003_013E': 'Arab - Syrian', 'B04003_014E': 'Arab - Arab', 'B04003_015E': 'Arab - Other Arab', 'B04003_016E': 'Armenian', 'B04003_017E': 'Assyrian/Chaldean/Syriac', 'B04003_018E': 'Australian', 'B04003_019E': 'Austrian', 'B04003_020E': 'Basque', 'B04003_021E': 'Belgian', 'B04003_022E': 'Brazilian', 'B04003_023E': 'British', 'B04003_024E': 'Bulgarian', 'B04003_025E': 'Cajun', 'B04003_026E': 'Canadian', 'B04003_027E': 'Carpatho Rusyn', 'B04003_028E': 'Celtic', 'B04003_029E': 'Croatian', 'B04003_030E': 'Cypriot', 'B04003_031E': 'Czech', 'B04003_032E': 'Czechoslovakian', 'B04003_033E': 'Danish', 'B04003_034E': 'Dutch', 'B04003_035E': 'Eastern European', 'B04003_036E': 'English', 'B04003_037E': 'Estonian', 'B04003_038E': 'European', 'B04003_039E': 'Finnish', 'B04003_040E': 'French (except Basque)', 'B04003_041E': 'French Canadian', 'B04003_042E': 'German', 'B04003_043E': 'German Russian', 'B04003_044E': 'Greek', 'B04003_045E': 'Guyanese', 'B04003_046E': 'Hungarian', 'B04003_047E': 'Icelander', 'B04003_048E': 'Iranian', 'B04003_049E': 'Irish', 'B04003_050E': 'Israeli', 'B04003_051E': 'Italian', 'B04003_052E': 'Latvian', 'B04003_053E': 'Lithuanian', 'B04003_054E': 'Luxemburger', 'B04003_055E': 'Macedonian', 'B04003_056E': 'Maltese', 'B04003_057E': 'New Zealander', 'B04003_058E': 'Northern European', 'B04003_059E': 'Norwegian', 'B04003_060E': 'Pennsylvania German', 'B04003_061E': 'Polish', 'B04003_062E': 'Portuguese', 'B04003_063E': 'Romanian', 'B04003_064E': 'Russian', 'B04003_065E': 'Scandinavian', 'B04003_066E': 'Scotch-Irish', 'B04003_067E': 'Scottish', 'B04003_068E': 'Serbian', 'B04003_069E': 'Slavic', 'B04003_070E': 'Slovak', 'B04003_071E': 'Slovene', 'B04003_072E': 'Soviet Union', 'B04003_073E': 'Subsaharan African', 'B04003_074E': 'Subsaharan African - Cape Verdean', 'B04003_075E': 'Subsaharan African - Ethiopian', 'B04003_076E': 'Subsaharan African - Ghanaian', 'B04003_077E': 'Subsaharan African - Kenyan', 'B04003_078E': 'Subsaharan African - Liberian', 'B04003_079E': 'Subsaharan African - Nigerian', 'B04003_080E': 'Subsaharan African - Senegalese', 'B04003_081E': 'Subsaharan African - Sierra Leonean', 'B04003_082E': 'Subsaharan African - Somalian', 'B04003_083E': 'Subsaharan African - South African', 'B04003_084E': 'Subsaharan African - Sudanese', 'B04003_085E': 'Subsaharan African - Ugandan', 'B04003_086E': 'Subsaharan African - Zimbabwean', 'B04003_087E': 'Subsaharan African - African', 'B04003_088E': 'Subsaharan African - Other Subsaharan African', 'B04003_089E': 'Swedish', 'B04003_090E': 'Swiss', 'B04003_091E': 'Turkish', 'B04003_092E': 'Ukrainian', 'B04003_093E': 'Welsh', 'B04003_094E': 'West Indian', 'B04003_095E': 'West Indian - Bahamian', 'B04003_096E': 'West Indian - Barbadian', 'B04003_097E': 'West Indian - Belizean', 'B04003_098E': 'West Indian - Bermudan', 'B04003_099E': 'West Indian - British West Indian', 'B04003_100E': 'West Indian - Dutch West Indian', 'B04003_101E': 'West Indian - Haitian', 'B04003_102E': 'West Indian - Jamaican', 'B04003_103E': 'West Indian - Trinidadian and Tobagonian', 'B04003_104E': 'West Indian - U.S. Virgin Islander', 'B04003_105E': 'West Indian - West Indian', 'B04003_106E': 'West Indian - Other West Indian', 'B04003_107E': 'Yugoslavian', 'B04003_108E': 'Other groups', 'B25058_001E': 'Median contract rent', 'B25064_001E': 'Median gross rent', 'B25077_001E': 'Median value (dollars)', 'B25003_002E': 'Owner occupied', 'B25003_003E': 'Renter occupied', 'B08301_001E': 'Total', 'B08301_002E': 'Car, truck, or van', 'B08301_003E': 'Drove alone', 'B08301_004E': 'Carpooled', 'B08301_005E': 'In 2-person carpool', 'B08301_006E': 'In 3-person carpool', 'B08301_007E': 'In 4-person carpool', 'B08301_008E': 'In 5- or 6-person carpool', 'B08301_009E': 'In 7-or-more-person carpool', 'B08301_010E': 'Public transportation (excluding taxicab)', 'B08301_011E': 'Bus or trolley bus', 'B08301_012E': 'Streetcar or trolley car (carro publico in Puerto Rico)', 'B08301_013E': 'Subway or elevated', 'B08301_014E': 'Railroad', 'B08301_015E': 'Ferryboat', 'B08301_016E': 'Taxicab', 'B08301_017E': 'Motorcycle', 'B08301_018E': 'Bicycle', 'B08301_019E': 'Walked', 'B08301_020E': 'Other means', 'B08301_021E': 'Worked at home', 'B25035_001E': 'Median year structure built'} |
'''Basic object to store the agents and auxiliary content in the agent system
graph. The object should be considered to be replaced with namedtuple at some
point, once the default field has matured
'''
class Node(object):
'''Basic object to store agent and auxiliary content in the agent system.
Parameters
----------
name : str
Name of node
agent_content : Agent
An Agent object
aux_content : optional
Auxiliary content, such as an immediate environment, to the Agent of
the Node
other_attributes : dict, optional
Dictionary of additional attributes assigned to the Node. These can
be part of operations on the graph during a simulation or they can be
part of graph sampling, for example. Each key is the name of the
attribute, the value is the value of the attribute.
'''
def __str__(self):
return 'Node(name:%s)' %(self.name)
def __contains__(self, item):
if self.agent_content is None:
return False
else:
return item == self.agent_content.agent_id_system
def __init__(self, name, agent_content, aux_content=None,
other_attributes={}):
self.name = name
self.agent_content = agent_content
self.aux_content = aux_content
for key, item in other_attributes:
setattr(self, key, item)
def node_maker(agents, envs=None, node_names=None, node_attributes=None):
'''Convenience function to place a collection of agents and environments in nodes
Parameters
----------
TBD
Returns
-------
TBD
'''
n_nodes = len(agents)
if not envs is None:
if len(envs) != n_nodes:
raise ValueError('Environment container not of same size as agent container')
envs_iter = envs
else:
envs_iter = [None] * n_nodes
if not node_names is None:
if len(node_names) != n_nodes:
raise ValueError('Node names container no of same size as agent container')
node_names_iter = node_names
else:
node_names_iter = ['ID {}'.format(k) for k in range(n_nodes)]
if not node_attributes is None:
if len(node_attributes) != n_nodes:
raise ValueError('Node attributes container not of same size as agent container')
node_attributes_iter = node_attributes
else:
node_attributes_iter = [{}] * n_nodes
ret = []
for agent, env, name, attributes in zip(agents, envs_iter, node_names_iter, node_attributes_iter):
ret.append(Node(name, agent, env, attributes))
return ret | """Basic object to store the agents and auxiliary content in the agent system
graph. The object should be considered to be replaced with namedtuple at some
point, once the default field has matured
"""
class Node(object):
"""Basic object to store agent and auxiliary content in the agent system.
Parameters
----------
name : str
Name of node
agent_content : Agent
An Agent object
aux_content : optional
Auxiliary content, such as an immediate environment, to the Agent of
the Node
other_attributes : dict, optional
Dictionary of additional attributes assigned to the Node. These can
be part of operations on the graph during a simulation or they can be
part of graph sampling, for example. Each key is the name of the
attribute, the value is the value of the attribute.
"""
def __str__(self):
return 'Node(name:%s)' % self.name
def __contains__(self, item):
if self.agent_content is None:
return False
else:
return item == self.agent_content.agent_id_system
def __init__(self, name, agent_content, aux_content=None, other_attributes={}):
self.name = name
self.agent_content = agent_content
self.aux_content = aux_content
for (key, item) in other_attributes:
setattr(self, key, item)
def node_maker(agents, envs=None, node_names=None, node_attributes=None):
"""Convenience function to place a collection of agents and environments in nodes
Parameters
----------
TBD
Returns
-------
TBD
"""
n_nodes = len(agents)
if not envs is None:
if len(envs) != n_nodes:
raise value_error('Environment container not of same size as agent container')
envs_iter = envs
else:
envs_iter = [None] * n_nodes
if not node_names is None:
if len(node_names) != n_nodes:
raise value_error('Node names container no of same size as agent container')
node_names_iter = node_names
else:
node_names_iter = ['ID {}'.format(k) for k in range(n_nodes)]
if not node_attributes is None:
if len(node_attributes) != n_nodes:
raise value_error('Node attributes container not of same size as agent container')
node_attributes_iter = node_attributes
else:
node_attributes_iter = [{}] * n_nodes
ret = []
for (agent, env, name, attributes) in zip(agents, envs_iter, node_names_iter, node_attributes_iter):
ret.append(node(name, agent, env, attributes))
return ret |
masuk=int(input("Masukkan Jam Masuk = "))
keluar=int(input("Masukkan Jam Keluar ="))
lama=keluar-masuk
payment=12000
print("Lama Mengajar = ", lama, "jam")
if lama <=1:
satu_jam_pertama=payment
print("Biaya Mengajar= Rp", satu_jam_pertama)
elif lama <10:
biaya_selanjutnya = (lama+1)*3000+payment
print("Biaya Mengajar = Rp", biaya_selanjutnya)
elif lama >= 10:
print("Biaya Mengajar = Rp", 1000000)
else:
print("nul")
| masuk = int(input('Masukkan Jam Masuk = '))
keluar = int(input('Masukkan Jam Keluar ='))
lama = keluar - masuk
payment = 12000
print('Lama Mengajar = ', lama, 'jam')
if lama <= 1:
satu_jam_pertama = payment
print('Biaya Mengajar= Rp', satu_jam_pertama)
elif lama < 10:
biaya_selanjutnya = (lama + 1) * 3000 + payment
print('Biaya Mengajar = Rp', biaya_selanjutnya)
elif lama >= 10:
print('Biaya Mengajar = Rp', 1000000)
else:
print('nul') |
# dataset settings
dataset_type = 'PhoneDataset'
data_root = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/'
ann_files = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/annotations/instances_train2017_cell_phone_format_widerface.txt'
val_data_root = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/'
val_ann_files = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/annotations/instances_val2017_cell_phone_format_widerface.txt'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
gray_train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True, color_type='grayscale'),
dict(type='Stack'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(320, 320), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 320),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# rgb_dataset_train = dict(
# type='RepeatDataset',
# times=2,
# dataset=dict(
# type=dataset_type,
# ann_file=ann_files,
# img_prefix=data_root,
# pipeline=train_pipeline
# )
# )
# gray_dataset_train = dict(
# type='RepeatDataset',
# times=2,
# dataset=dict(
# type=dataset_type,
# ann_file=ann_files,
# img_prefix=data_root,
# pipeline=gray_train_pipeline
# )
# )
data = dict(
samples_per_gpu=60,
workers_per_gpu=4,
# train=[rgb_dataset_train, gray_dataset_train],
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_files,
img_prefix=data_root,
pipeline=train_pipeline
)
),
val=dict(
type=dataset_type,
ann_file=val_ann_files,
img_prefix=val_data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=val_ann_files,
img_prefix=val_data_root,
pipeline=test_pipeline))
| dataset_type = 'PhoneDataset'
data_root = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/'
ann_files = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/annotations/instances_train2017_cell_phone_format_widerface.txt'
val_data_root = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/'
val_ann_files = '/home/ubuntu/tienpv/datasets/PhoneDatasets/COCO2017/annotations/instances_val2017_cell_phone_format_widerface.txt'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict(type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict(type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
gray_train_pipeline = [dict(type='LoadImageFromFile', to_float32=True, color_type='grayscale'), dict(type='Stack'), dict(type='LoadAnnotations', with_bbox=True), dict(type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict(type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])])]
data = dict(samples_per_gpu=60, workers_per_gpu=4, train=dict(type='RepeatDataset', times=2, dataset=dict(type=dataset_type, ann_file=ann_files, img_prefix=data_root, pipeline=train_pipeline)), val=dict(type=dataset_type, ann_file=val_ann_files, img_prefix=val_data_root, pipeline=test_pipeline), test=dict(type=dataset_type, ann_file=val_ann_files, img_prefix=val_data_root, pipeline=test_pipeline)) |
def longestPeak(array):
max_size = 0
i = 1
while i < len(array) - 1:
peak = array[i - 1] < array[i] > array[i + 1]
if not peak:
i += 1
continue
left = i - 1
right = i + 1
while left >= 0 and array[left] < array[left + 1]:
left -= 1
while right < len(array) and array[right] < array[right - 1]:
right += 1
max_size = max(max_size, right - left - 1)
i = right
return max_size
| def longest_peak(array):
max_size = 0
i = 1
while i < len(array) - 1:
peak = array[i - 1] < array[i] > array[i + 1]
if not peak:
i += 1
continue
left = i - 1
right = i + 1
while left >= 0 and array[left] < array[left + 1]:
left -= 1
while right < len(array) and array[right] < array[right - 1]:
right += 1
max_size = max(max_size, right - left - 1)
i = right
return max_size |
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/s10-standard-deviation
# Difficulty: Easy
# Max Score: 30
# Language: Python
# ========================
# Solution
# ========================
N = int(input())
X = list(map(int, input().strip().split(' ')))
MEAN = sum(X)/N
sum = 0
for i in range(N):
sum += ((X[i]-MEAN)**2)/N
print(round(sum**0.5, 1))
| n = int(input())
x = list(map(int, input().strip().split(' ')))
mean = sum(X) / N
sum = 0
for i in range(N):
sum += (X[i] - MEAN) ** 2 / N
print(round(sum ** 0.5, 1)) |
class Entity(object):
def __init__(self, name, represented_class_name=None, parent_entity=None,
is_abstract=False, attributes=None, relationships=None):
self.name = name
self.represented_class_name = represented_class_name or name
self.parent_entity = parent_entity
self.is_abstract = is_abstract
self.attributes = attributes or []
self.relationships = relationships or []
def __str__(self):
return self.name
def __repr__(self):
return '<Entity {}>'.format(self.name)
def __eq__(self, other):
return isinstance(other, Entity) and \
other.name == self.name and \
other.represented_class_name == self.represented_class_name and \
other.parent_entity == self.parent_entity and \
other.is_abstract == self.is_abstract and \
other.attributes == self.attributes and \
other.relationships == self.relationships
@property
def super_class_name(self):
if self.parent_entity:
return self.parent_entity.represented_class_name
return 'NSManagedObject'
@property
def to_many_relationships(self):
return [relationship for relationship in self.relationships if relationship.is_to_many]
@property
def to_one_relationships(self):
return [relationship for relationship in self.relationships if relationship.is_to_one]
| class Entity(object):
def __init__(self, name, represented_class_name=None, parent_entity=None, is_abstract=False, attributes=None, relationships=None):
self.name = name
self.represented_class_name = represented_class_name or name
self.parent_entity = parent_entity
self.is_abstract = is_abstract
self.attributes = attributes or []
self.relationships = relationships or []
def __str__(self):
return self.name
def __repr__(self):
return '<Entity {}>'.format(self.name)
def __eq__(self, other):
return isinstance(other, Entity) and other.name == self.name and (other.represented_class_name == self.represented_class_name) and (other.parent_entity == self.parent_entity) and (other.is_abstract == self.is_abstract) and (other.attributes == self.attributes) and (other.relationships == self.relationships)
@property
def super_class_name(self):
if self.parent_entity:
return self.parent_entity.represented_class_name
return 'NSManagedObject'
@property
def to_many_relationships(self):
return [relationship for relationship in self.relationships if relationship.is_to_many]
@property
def to_one_relationships(self):
return [relationship for relationship in self.relationships if relationship.is_to_one] |
class Solution:
def answer(self, current, end, scalar):
if current == end:
return scalar
self.visited.add(current)
if current in self.graph:
for i in self.graph[current]:
if i[0] not in self.visited:
a = self.answer(i[0], end, scalar*i[1])
if a != -1:
return a
return -1
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
self.graph, self.visited = {}, set()
for i in range(len(equations)):
if equations[i][0] not in self.graph:
self.graph[equations[i][0]] = []
if equations[i][1] not in self.graph:
self.graph[equations[i][1]] = []
self.graph[equations[i][0]].append((equations[i][1], 1/values[i]))
self.graph[equations[i][1]].append((equations[i][0], values[i]))
v = []
for i in queries:
self.visited = set()
if i[0] not in self.graph or i[1] not in self.graph:
v.append(-1)
continue
v.append(1/self.answer(i[0], i[1], 1) if i[0] != i[1] else 1)
return v
| class Solution:
def answer(self, current, end, scalar):
if current == end:
return scalar
self.visited.add(current)
if current in self.graph:
for i in self.graph[current]:
if i[0] not in self.visited:
a = self.answer(i[0], end, scalar * i[1])
if a != -1:
return a
return -1
def calc_equation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
(self.graph, self.visited) = ({}, set())
for i in range(len(equations)):
if equations[i][0] not in self.graph:
self.graph[equations[i][0]] = []
if equations[i][1] not in self.graph:
self.graph[equations[i][1]] = []
self.graph[equations[i][0]].append((equations[i][1], 1 / values[i]))
self.graph[equations[i][1]].append((equations[i][0], values[i]))
v = []
for i in queries:
self.visited = set()
if i[0] not in self.graph or i[1] not in self.graph:
v.append(-1)
continue
v.append(1 / self.answer(i[0], i[1], 1) if i[0] != i[1] else 1)
return v |
def isIsosceles(x, y, z):
if x <= 0 or y <=0 or z <=0:
return False
if x == y:
return True
if y == z:
return True
if x == z:
return True
else:
return False
print(isIsosceles(-2, -2, 3))
print(isIsosceles(2, 3, 2))
def isIsosceles(x, y, z):
if x <= 0 or y <=0 or z <=0:
return False
elif x == y or y == z or x == z:
return True
else:
return False
print(isIsosceles(-2, -2, 3))
print(isIsosceles(2, 3, 2))
| def is_isosceles(x, y, z):
if x <= 0 or y <= 0 or z <= 0:
return False
if x == y:
return True
if y == z:
return True
if x == z:
return True
else:
return False
print(is_isosceles(-2, -2, 3))
print(is_isosceles(2, 3, 2))
def is_isosceles(x, y, z):
if x <= 0 or y <= 0 or z <= 0:
return False
elif x == y or y == z or x == z:
return True
else:
return False
print(is_isosceles(-2, -2, 3))
print(is_isosceles(2, 3, 2)) |
class CmdResponse:
__status: bool
__type: str
__data: dict
__content: str
def __init__(self, status: bool, contentType: str):
self.__status = status
self.__type = contentType
self.__data = {'status': status}
self.__content = None
def setData(self, data: object):
self.__data['data'] = data
def setContent(self, content: str):
self.__content = content
def getContent(self) -> str:
return self.__content
def getData(self) -> dict:
return self.__data
def getContentType(self) -> str:
return self.__type
def getStatus(self) -> bool:
return self.__status
| class Cmdresponse:
__status: bool
__type: str
__data: dict
__content: str
def __init__(self, status: bool, contentType: str):
self.__status = status
self.__type = contentType
self.__data = {'status': status}
self.__content = None
def set_data(self, data: object):
self.__data['data'] = data
def set_content(self, content: str):
self.__content = content
def get_content(self) -> str:
return self.__content
def get_data(self) -> dict:
return self.__data
def get_content_type(self) -> str:
return self.__type
def get_status(self) -> bool:
return self.__status |
with open("pytest_results.xml", "w") as f:
f.write("<?xml version='1.0' encoding='utf-8'?>")
f.write("<test>")
f.write("<!-- No tests executed -->")
f.write("</test>")
| with open('pytest_results.xml', 'w') as f:
f.write("<?xml version='1.0' encoding='utf-8'?>")
f.write('<test>')
f.write('<!-- No tests executed -->')
f.write('</test>') |
def exec(path: str, data: bytes) -> None:
fs = open(path, 'wb')
fs.write(data)
fs.close()
| def exec(path: str, data: bytes) -> None:
fs = open(path, 'wb')
fs.write(data)
fs.close() |
# model
batch = 1
in_chans = 1
out_chans = 1
in_rows = 4
in_cols = 4
out_rows = 8
out_cols = 8
ker_rows = 3
ker_cols = 3
stride = 2
# pad is 0 (left: 0 right: 1 top: 0 bottom: 1)
input_table = [x for x in range(batch * in_rows * in_cols * in_chans)]
kernel_table = [x for x in range(out_chans * ker_rows * ker_cols * in_chans)]
out_table = [0 for x in range(batch * out_rows * out_cols * out_chans)]
for i in range(batch):
for j in range(in_rows):
for k in range(in_cols):
for l in range(in_chans):
out_row_origin = j * stride
out_col_origin = k * stride
input_value = input_table[((i * in_rows + j) * in_cols + k) * in_chans + l]
for m in range(ker_rows):
for n in range(ker_cols):
for o in range(out_chans):
out_row = out_row_origin + m
out_col = out_col_origin + n
if (out_row < out_rows) and (out_col < out_cols) and (out_row >= 0) and (out_col >= 0):
kernel_value = kernel_table[((o * ker_rows + m) * ker_cols + n) * in_chans + l]
out_table[((i * out_rows + out_row) * out_cols + out_col) * out_chans + o] += (input_value * kernel_value)
model = Model()
i0 = Input("op_shape", "TENSOR_INT32", "{4}")
weights = Parameter("ker", "TENSOR_FLOAT32", "{1, 3, 3, 1}", kernel_table)
i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}" )
pad = Int32Scalar("pad_same", 1)
s_x = Int32Scalar("stride_x", 2)
s_y = Int32Scalar("stride_y", 2)
i2 = Output("op", "TENSOR_FLOAT32", "{1, 8, 8, 1}")
model = model.Operation("TRANSPOSE_CONV_EX", i0, weights, i1, pad, s_x, s_y).To(i2)
# Example 1. Input in operand 0,
input0 = {i0: # output shape
[1, 8, 8, 1],
i1: # input 0
input_table}
output0 = {i2: # output 0
out_table}
# Instantiate an example
Example((input0, output0))
| batch = 1
in_chans = 1
out_chans = 1
in_rows = 4
in_cols = 4
out_rows = 8
out_cols = 8
ker_rows = 3
ker_cols = 3
stride = 2
input_table = [x for x in range(batch * in_rows * in_cols * in_chans)]
kernel_table = [x for x in range(out_chans * ker_rows * ker_cols * in_chans)]
out_table = [0 for x in range(batch * out_rows * out_cols * out_chans)]
for i in range(batch):
for j in range(in_rows):
for k in range(in_cols):
for l in range(in_chans):
out_row_origin = j * stride
out_col_origin = k * stride
input_value = input_table[((i * in_rows + j) * in_cols + k) * in_chans + l]
for m in range(ker_rows):
for n in range(ker_cols):
for o in range(out_chans):
out_row = out_row_origin + m
out_col = out_col_origin + n
if out_row < out_rows and out_col < out_cols and (out_row >= 0) and (out_col >= 0):
kernel_value = kernel_table[((o * ker_rows + m) * ker_cols + n) * in_chans + l]
out_table[((i * out_rows + out_row) * out_cols + out_col) * out_chans + o] += input_value * kernel_value
model = model()
i0 = input('op_shape', 'TENSOR_INT32', '{4}')
weights = parameter('ker', 'TENSOR_FLOAT32', '{1, 3, 3, 1}', kernel_table)
i1 = input('in', 'TENSOR_FLOAT32', '{1, 4, 4, 1}')
pad = int32_scalar('pad_same', 1)
s_x = int32_scalar('stride_x', 2)
s_y = int32_scalar('stride_y', 2)
i2 = output('op', 'TENSOR_FLOAT32', '{1, 8, 8, 1}')
model = model.Operation('TRANSPOSE_CONV_EX', i0, weights, i1, pad, s_x, s_y).To(i2)
input0 = {i0: [1, 8, 8, 1], i1: input_table}
output0 = {i2: out_table}
example((input0, output0)) |
def main():
# input
css = [[*map(int, input().split())] for _ in range(3)]
# compute
for i in range(3):
if css[i-1][i-1]+css[i][i] != css[i-1][i]+css[i][i-1]:
print('No')
exit()
# output
print('Yes')
if __name__ == '__main__':
main()
| def main():
css = [[*map(int, input().split())] for _ in range(3)]
for i in range(3):
if css[i - 1][i - 1] + css[i][i] != css[i - 1][i] + css[i][i - 1]:
print('No')
exit()
print('Yes')
if __name__ == '__main__':
main() |
'''
This is a math Module
Do Some thing
'''
def add(a=0, b=0):
return a + b;
def minus(a=0, b=0):
return a - b;
def multy(a=1, b=1):
return a * b;
| """
This is a math Module
Do Some thing
"""
def add(a=0, b=0):
return a + b
def minus(a=0, b=0):
return a - b
def multy(a=1, b=1):
return a * b |
class MyClass:
data = 3
a = MyClass()
b = MyClass()
a.data = 5
print(a.data)
print(b.data)
| class Myclass:
data = 3
a = my_class()
b = my_class()
a.data = 5
print(a.data)
print(b.data) |
class Solution:
def findLHS(self, nums) -> int:
nums.sort()
pre_num, pre_length = -1, 0
cur_num, cur_length = -1, 0
i = 0
max_length = 0
while i < len(nums):
if nums[i] == cur_num:
cur_length += 1
else:
if cur_num == pre_num + 1:
max_length = max(max_length, cur_length + pre_length)
pre_num = cur_num
pre_length = cur_length
cur_num = nums[i]
cur_length = 1
i += 1
if cur_num == pre_num + 1:
max_length = max(max_length, cur_length + pre_length)
return max_length
slu = Solution()
print(slu.findLHS([1, 1, 1, 1, 2]))
| class Solution:
def find_lhs(self, nums) -> int:
nums.sort()
(pre_num, pre_length) = (-1, 0)
(cur_num, cur_length) = (-1, 0)
i = 0
max_length = 0
while i < len(nums):
if nums[i] == cur_num:
cur_length += 1
else:
if cur_num == pre_num + 1:
max_length = max(max_length, cur_length + pre_length)
pre_num = cur_num
pre_length = cur_length
cur_num = nums[i]
cur_length = 1
i += 1
if cur_num == pre_num + 1:
max_length = max(max_length, cur_length + pre_length)
return max_length
slu = solution()
print(slu.findLHS([1, 1, 1, 1, 2])) |
def validate_count(d):
print(len([0 for e in d if((c:=e[2].count(e[1]))>e[0][0])and(c<e[0][1])]))
def validate_position(d):
print(len([0 for e in d if(e[2][e[0][0]-1]==e[1])^(e[2][e[0][1]-1]==e[1])]))
if __name__ == "__main__":
with open('2020/input/day02.txt') as f:
database = [[[*map(int, (e := entry.split(' '))[0].split('-'))], e[1][0], e[2].replace('\n', '')] for entry in f.readlines()]
validate_count(database) # 410
validate_position(database) # 694 | def validate_count(d):
print(len([0 for e in d if (c := e[2].count(e[1])) > e[0][0] and c < e[0][1]]))
def validate_position(d):
print(len([0 for e in d if (e[2][e[0][0] - 1] == e[1]) ^ (e[2][e[0][1] - 1] == e[1])]))
if __name__ == '__main__':
with open('2020/input/day02.txt') as f:
database = [[[*map(int, (e := entry.split(' '))[0].split('-'))], e[1][0], e[2].replace('\n', '')] for entry in f.readlines()]
validate_count(database)
validate_position(database) |
# Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class pyfos_type():
type_na = 0
type_int = 1
type_wwn = 2
type_str = 3
type_bool = 4
type_ip_addr = 5
type_ipv6_addr = 6
type_zoning_name = 7
type_domain_port = 8
def __init__(self, pyfos_type):
self.pyfos_type = pyfos_type
def get_type(self):
return self.pyfos_type
def vaildate_set(self, value):
return True
def __validate_peek_help(self, cur_type, value):
if value is None:
return True, None
elif cur_type == pyfos_type.type_int:
cur_value = int(value)
if isinstance(cur_value, int):
return True, cur_value
elif cur_type == pyfos_type.type_wwn:
cur_value = str(value)
if isinstance(cur_value, str):
return True, cur_value
elif cur_type == pyfos_type.type_wwn:
cur_value = str(value)
if isinstance(cur_value, str):
return True, cur_value
elif cur_type == pyfos_type.type_str:
cur_value = str(value)
if isinstance(cur_value, str):
return True, cur_value
elif cur_type == pyfos_type.type_bool:
cur_value = bool(value)
if isinstance(cur_value, bool):
return True, cur_value
elif cur_type == pyfos_type.type_ip_addr:
cur_value = str(value)
if isinstance(cur_value, str):
return True, cur_value
elif cur_type == pyfos_type.type_zoning_name:
cur_value = str(value)
if isinstance(cur_value, str):
return True, cur_value
elif cur_type == pyfos_type.type_domain_port:
cur_value = str(value)
if isinstance(cur_value, str):
return True, cur_value
if cur_type == pyfos_type.type_na:
return True, value
else:
return False, None
def validate_peek(self, value):
if isinstance(value, list):
# if the list is empty, just return
if not list:
return True, value
# otherwise, walk through element
# and see if they are of the type
# expected
ret_list = []
for cur_value in value:
correct_type, cast_value = self.__validate_peek_help(
self.pyfos_type, cur_value)
if correct_type is True:
ret_list.append(cast_value)
else:
print("invalid type", value, cur_value, self.pyfos_type)
return True, ret_list
else:
return self.__validate_peek_help(self.pyfos_type, value)
| class Pyfos_Type:
type_na = 0
type_int = 1
type_wwn = 2
type_str = 3
type_bool = 4
type_ip_addr = 5
type_ipv6_addr = 6
type_zoning_name = 7
type_domain_port = 8
def __init__(self, pyfos_type):
self.pyfos_type = pyfos_type
def get_type(self):
return self.pyfos_type
def vaildate_set(self, value):
return True
def __validate_peek_help(self, cur_type, value):
if value is None:
return (True, None)
elif cur_type == pyfos_type.type_int:
cur_value = int(value)
if isinstance(cur_value, int):
return (True, cur_value)
elif cur_type == pyfos_type.type_wwn:
cur_value = str(value)
if isinstance(cur_value, str):
return (True, cur_value)
elif cur_type == pyfos_type.type_wwn:
cur_value = str(value)
if isinstance(cur_value, str):
return (True, cur_value)
elif cur_type == pyfos_type.type_str:
cur_value = str(value)
if isinstance(cur_value, str):
return (True, cur_value)
elif cur_type == pyfos_type.type_bool:
cur_value = bool(value)
if isinstance(cur_value, bool):
return (True, cur_value)
elif cur_type == pyfos_type.type_ip_addr:
cur_value = str(value)
if isinstance(cur_value, str):
return (True, cur_value)
elif cur_type == pyfos_type.type_zoning_name:
cur_value = str(value)
if isinstance(cur_value, str):
return (True, cur_value)
elif cur_type == pyfos_type.type_domain_port:
cur_value = str(value)
if isinstance(cur_value, str):
return (True, cur_value)
if cur_type == pyfos_type.type_na:
return (True, value)
else:
return (False, None)
def validate_peek(self, value):
if isinstance(value, list):
if not list:
return (True, value)
ret_list = []
for cur_value in value:
(correct_type, cast_value) = self.__validate_peek_help(self.pyfos_type, cur_value)
if correct_type is True:
ret_list.append(cast_value)
else:
print('invalid type', value, cur_value, self.pyfos_type)
return (True, ret_list)
else:
return self.__validate_peek_help(self.pyfos_type, value) |
{
"includes": [
"../common.gypi"
],
"targets": [
{
"configurations": {
"Release": {
"defines": [
"NDEBUG"
]
}
},
"include_dirs": [
"apr-iconv/include"
],
"sources": [
"dependencies/apr-iconv/lib/iconv.c",
"dependencies/apr-iconv/lib/iconv_ces.c",
"dependencies/apr-iconv/lib/iconv_ces_euc.c",
"dependencies/apr-iconv/lib/iconv_ces_iso2022.c",
"dependencies/apr-iconv/lib/iconv_int.c",
"dependencies/apr-iconv/lib/iconv_module.c",
"dependencies/apr-iconv/lib/iconv_uc.c"
],
"target_name": "apr-iconv",
}
]
}
| {'includes': ['../common.gypi'], 'targets': [{'configurations': {'Release': {'defines': ['NDEBUG']}}, 'include_dirs': ['apr-iconv/include'], 'sources': ['dependencies/apr-iconv/lib/iconv.c', 'dependencies/apr-iconv/lib/iconv_ces.c', 'dependencies/apr-iconv/lib/iconv_ces_euc.c', 'dependencies/apr-iconv/lib/iconv_ces_iso2022.c', 'dependencies/apr-iconv/lib/iconv_int.c', 'dependencies/apr-iconv/lib/iconv_module.c', 'dependencies/apr-iconv/lib/iconv_uc.c'], 'target_name': 'apr-iconv'}]} |
def flatten_forest(forest):
flat_forest = []
for row in forest:
flat_forest += row
return flat_forest
def deflatten_forest(forest_1d, rows):
cols = len(forest_1d) // rows
forest_2d = []
for i in range(cols):
forest_slice = forest_1d[i*cols: (i+1)*cols]
forest_2d.append(forest_slice)
return forest_2d
| def flatten_forest(forest):
flat_forest = []
for row in forest:
flat_forest += row
return flat_forest
def deflatten_forest(forest_1d, rows):
cols = len(forest_1d) // rows
forest_2d = []
for i in range(cols):
forest_slice = forest_1d[i * cols:(i + 1) * cols]
forest_2d.append(forest_slice)
return forest_2d |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Temporary variable which stores settings during the backup process
backup_perfdata_enabled = True
def performancedata_restore(pre_restore = True):
global backup_perfdata_enabled
site = config.default_site()
html.live.set_only_sites([site])
if pre_restore:
data = html.live.query("GET status\nColumns: process_performance_data")
if data:
backup_perfdata_enabled = data[0][0] == 1
else:
backup_perfdata_enabled = None # Core is offline
# Return if perfdata is not activated - nothing to do..
if not backup_perfdata_enabled: # False or None
return []
command = pre_restore and "DISABLE_PERFORMANCE_DATA" or "ENABLE_PERFORMANCE_DATA"
html.live.command("[%d] %s" % (int(time.time()), command), site)
html.live.set_only_sites()
return []
if not defaults.omd_root:
backup_domains.update( {
"noomd-config": {
"group" : _("Configuration"),
"title" : _("WATO Configuration"),
"prefix" : defaults.default_config_dir,
"paths" : [
("dir", "conf.d/wato"),
("dir", "multisite.d/wato"),
("file", "multisite.d/sites.mk")
],
"default" : True,
},
"noomd-personalsettings": {
"title" : _("Personal User Settings and Custom Views"),
"prefix" : defaults.var_dir,
"paths" : [ ("dir", "web") ],
"default" : True
},
"noomd-authorization": {
"group" : _("Configuration"),
"title" : _("Local Authentication Data"),
"prefix" : os.path.dirname(defaults.htpasswd_file),
"paths" : [
("file", "htpasswd"),
("file", "auth.secret"),
("file", "auth.serials")
],
"cleanup" : False,
"default" : True
}})
else:
backup_domains.update({
"check_mk": {
"group" : _("Configuration"),
"title" : _("Hosts, Services, Groups, Timeperiods, Business Intelligence and Monitoring Configuration"),
"prefix" : defaults.default_config_dir,
"paths" : [
("file", "liveproxyd.mk"),
("file", "main.mk"),
("file", "final.mk"),
("file", "local.mk"),
("file", "mkeventd.mk"),
("dir", "conf.d"),
("dir", "multisite.d"),
("dir", "mkeventd.d"),
("dir", "mknotifyd.d"),
],
"default" : True,
},
"authorization": {
# This domain is obsolete
# It no longer shows up in the backup screen
"deprecated" : True,
"group" : _("Configuration"),
"title" : _("Local Authentication Data"),
"prefix" : os.path.dirname(defaults.htpasswd_file),
"paths" : [
("file", "htpasswd"),
("file", "auth.secret"),
("file", "auth.serials")
],
"cleanup" : False,
"default" : True,
},
"authorization_v1": {
"group" : _("Configuration"),
"title" : _("Local Authentication Data"),
"prefix" : defaults.omd_root,
"paths" : [
("file", "etc/htpasswd"),
("file", "etc/auth.secret"),
("file", "etc/auth.serials"),
("file", "var/check_mk/web/*/serial.mk")
],
"cleanup" : False,
"default" : True
},
"personalsettings": {
"title" : _("Personal User Settings and Custom Views"),
"prefix" : defaults.var_dir,
"paths" : [ ("dir", "web") ],
"exclude" : [ "*/serial.mk" ],
"cleanup" : False,
},
"autochecks": {
"group" : _("Configuration"),
"title" : _("Automatically Detected Services"),
"prefix" : defaults.autochecksdir,
"paths" : [ ("dir", "") ],
},
"snmpwalks": {
"title" : _("Stored SNMP Walks"),
"prefix" : defaults.snmpwalks_dir,
"paths" : [ ("dir", "") ],
},
"logwatch": {
"group" : _("Historic Data"),
"title" : _("Logwatch Data"),
"prefix" : defaults.var_dir,
"paths" : [
("dir", "logwatch"),
],
},
"mkeventstatus": {
"group" : _("Configuration"),
"title" : _("Event Console Configuration"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "etc/check_mk/mkeventd.d"),
],
"default" : True
},
"mkeventhistory": {
"group" : _("Historic Data"),
"title" : _("Event Console Archive and Current State"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "var/mkeventd/history"),
("file", "var/mkeventd/status"),
("file", "var/mkeventd/messages"),
("dir", "var/mkeventd/messages-history"),
],
},
"corehistory": {
"group" : _("Historic Data"),
"title" : _("Monitoring History"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "var/nagios/archive"),
("file", "var/nagios/nagios.log"),
("dir", "var/icinga/archive"),
("file", "var/icinga/icinga.log"),
("dir", "var/check_mk/core/archive"),
("file", "var/check_mk/core/history"),
],
},
"performancedata": {
"group" : _("Historic Data"),
"title" : _("Performance Data"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "var/pnp4nagios/perfdata"),
("dir", "var/rrdcached"),
("dir", "var/check_mk/rrd"),
],
"pre_restore" : lambda: performancedata_restore(pre_restore = True),
"post_restore" : lambda: performancedata_restore(pre_restore = False),
"checksum" : False,
},
"applicationlogs": {
"group" : _("Historic Data"),
"title" : _("Application Logs"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "var/log"),
("file", "var/nagios/livestatus.log"),
("dir", "var/pnp4nagios/log"),
],
"checksum" : False,
},
"snmpmibs": {
"group" : _("Configuration"),
"title" : _("SNMP MIBs"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "local/share/check_mk/mibs"),
],
},
"extensions" : {
"title" : _("Extensions in <tt>~/local/</tt> and MKPs"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "var/check_mk/packages" ),
("dir", "local" ),
],
"default" : True,
},
"dokuwiki": {
"title" : _("Doku Wiki Pages and Settings"),
"prefix" : defaults.omd_root,
"paths" : [
("dir", "var/dokuwiki"),
],
},
"nagvis": {
"title" : _("NagVis Maps, Configurations and User Files"),
"prefix" : defaults.omd_root,
"exclude" : [
"etc/nagvis/apache.conf",
"etc/nagvis/conf.d/authorisation.ini.php",
"etc/nagvis/conf.d/omd.ini.php",
"etc/nagvis/conf.d/cookie_auth.ini.php",
"etc/nagvis/conf.d/urls.ini.php"
],
"paths" : [
("dir", "local/share/nagvis"),
("dir", "etc/nagvis"),
("dir", "var/nagvis"),
],
},
})
| backup_perfdata_enabled = True
def performancedata_restore(pre_restore=True):
global backup_perfdata_enabled
site = config.default_site()
html.live.set_only_sites([site])
if pre_restore:
data = html.live.query('GET status\nColumns: process_performance_data')
if data:
backup_perfdata_enabled = data[0][0] == 1
else:
backup_perfdata_enabled = None
if not backup_perfdata_enabled:
return []
command = pre_restore and 'DISABLE_PERFORMANCE_DATA' or 'ENABLE_PERFORMANCE_DATA'
html.live.command('[%d] %s' % (int(time.time()), command), site)
html.live.set_only_sites()
return []
if not defaults.omd_root:
backup_domains.update({'noomd-config': {'group': _('Configuration'), 'title': _('WATO Configuration'), 'prefix': defaults.default_config_dir, 'paths': [('dir', 'conf.d/wato'), ('dir', 'multisite.d/wato'), ('file', 'multisite.d/sites.mk')], 'default': True}, 'noomd-personalsettings': {'title': _('Personal User Settings and Custom Views'), 'prefix': defaults.var_dir, 'paths': [('dir', 'web')], 'default': True}, 'noomd-authorization': {'group': _('Configuration'), 'title': _('Local Authentication Data'), 'prefix': os.path.dirname(defaults.htpasswd_file), 'paths': [('file', 'htpasswd'), ('file', 'auth.secret'), ('file', 'auth.serials')], 'cleanup': False, 'default': True}})
else:
backup_domains.update({'check_mk': {'group': _('Configuration'), 'title': _('Hosts, Services, Groups, Timeperiods, Business Intelligence and Monitoring Configuration'), 'prefix': defaults.default_config_dir, 'paths': [('file', 'liveproxyd.mk'), ('file', 'main.mk'), ('file', 'final.mk'), ('file', 'local.mk'), ('file', 'mkeventd.mk'), ('dir', 'conf.d'), ('dir', 'multisite.d'), ('dir', 'mkeventd.d'), ('dir', 'mknotifyd.d')], 'default': True}, 'authorization': {'deprecated': True, 'group': _('Configuration'), 'title': _('Local Authentication Data'), 'prefix': os.path.dirname(defaults.htpasswd_file), 'paths': [('file', 'htpasswd'), ('file', 'auth.secret'), ('file', 'auth.serials')], 'cleanup': False, 'default': True}, 'authorization_v1': {'group': _('Configuration'), 'title': _('Local Authentication Data'), 'prefix': defaults.omd_root, 'paths': [('file', 'etc/htpasswd'), ('file', 'etc/auth.secret'), ('file', 'etc/auth.serials'), ('file', 'var/check_mk/web/*/serial.mk')], 'cleanup': False, 'default': True}, 'personalsettings': {'title': _('Personal User Settings and Custom Views'), 'prefix': defaults.var_dir, 'paths': [('dir', 'web')], 'exclude': ['*/serial.mk'], 'cleanup': False}, 'autochecks': {'group': _('Configuration'), 'title': _('Automatically Detected Services'), 'prefix': defaults.autochecksdir, 'paths': [('dir', '')]}, 'snmpwalks': {'title': _('Stored SNMP Walks'), 'prefix': defaults.snmpwalks_dir, 'paths': [('dir', '')]}, 'logwatch': {'group': _('Historic Data'), 'title': _('Logwatch Data'), 'prefix': defaults.var_dir, 'paths': [('dir', 'logwatch')]}, 'mkeventstatus': {'group': _('Configuration'), 'title': _('Event Console Configuration'), 'prefix': defaults.omd_root, 'paths': [('dir', 'etc/check_mk/mkeventd.d')], 'default': True}, 'mkeventhistory': {'group': _('Historic Data'), 'title': _('Event Console Archive and Current State'), 'prefix': defaults.omd_root, 'paths': [('dir', 'var/mkeventd/history'), ('file', 'var/mkeventd/status'), ('file', 'var/mkeventd/messages'), ('dir', 'var/mkeventd/messages-history')]}, 'corehistory': {'group': _('Historic Data'), 'title': _('Monitoring History'), 'prefix': defaults.omd_root, 'paths': [('dir', 'var/nagios/archive'), ('file', 'var/nagios/nagios.log'), ('dir', 'var/icinga/archive'), ('file', 'var/icinga/icinga.log'), ('dir', 'var/check_mk/core/archive'), ('file', 'var/check_mk/core/history')]}, 'performancedata': {'group': _('Historic Data'), 'title': _('Performance Data'), 'prefix': defaults.omd_root, 'paths': [('dir', 'var/pnp4nagios/perfdata'), ('dir', 'var/rrdcached'), ('dir', 'var/check_mk/rrd')], 'pre_restore': lambda : performancedata_restore(pre_restore=True), 'post_restore': lambda : performancedata_restore(pre_restore=False), 'checksum': False}, 'applicationlogs': {'group': _('Historic Data'), 'title': _('Application Logs'), 'prefix': defaults.omd_root, 'paths': [('dir', 'var/log'), ('file', 'var/nagios/livestatus.log'), ('dir', 'var/pnp4nagios/log')], 'checksum': False}, 'snmpmibs': {'group': _('Configuration'), 'title': _('SNMP MIBs'), 'prefix': defaults.omd_root, 'paths': [('dir', 'local/share/check_mk/mibs')]}, 'extensions': {'title': _('Extensions in <tt>~/local/</tt> and MKPs'), 'prefix': defaults.omd_root, 'paths': [('dir', 'var/check_mk/packages'), ('dir', 'local')], 'default': True}, 'dokuwiki': {'title': _('Doku Wiki Pages and Settings'), 'prefix': defaults.omd_root, 'paths': [('dir', 'var/dokuwiki')]}, 'nagvis': {'title': _('NagVis Maps, Configurations and User Files'), 'prefix': defaults.omd_root, 'exclude': ['etc/nagvis/apache.conf', 'etc/nagvis/conf.d/authorisation.ini.php', 'etc/nagvis/conf.d/omd.ini.php', 'etc/nagvis/conf.d/cookie_auth.ini.php', 'etc/nagvis/conf.d/urls.ini.php'], 'paths': [('dir', 'local/share/nagvis'), ('dir', 'etc/nagvis'), ('dir', 'var/nagvis')]}}) |
def create_mine_field(n, m, mines):
mine_field = [
[0 for _ in range(m)
] for _ in range(n)
]
for mine in mines:
x, y = mine
mine_field[x-1][y-1] = '*'
return mine_field
def neighbours(i, j, m):
nearest = [m[x][y] for x in [i-1, i, i+1] for y in [j-1, j, j+1]
if x in range(0, len(m)) and y in range(0, len(m[x]))
and (x, y) != (i, j)]
nearest_count = nearest.count('*')
return nearest_count
def check_field(mine_field, n, m):
for x in range(n):
for y in range(m):
if mine_field[x][y] == '*':
continue
else:
mine_field[x][y] = neighbours(i=x, j=y, m=mine_field)
with open('input.txt') as file:
lines = file.readlines()
n, m, k = list(map(int, lines[0].split()))
mines = []
for line in lines[1::]:
mines.append(list(map(int, line.split())))
mine_field = create_mine_field(n, m, mines)
check_field(mine_field, n, m)
with open('output.txt', 'w') as file:
rows = []
for row in mine_field:
line = f"{' '.join([str(item) for item in row])}\n"
rows.append(line)
file.writelines(rows)
| def create_mine_field(n, m, mines):
mine_field = [[0 for _ in range(m)] for _ in range(n)]
for mine in mines:
(x, y) = mine
mine_field[x - 1][y - 1] = '*'
return mine_field
def neighbours(i, j, m):
nearest = [m[x][y] for x in [i - 1, i, i + 1] for y in [j - 1, j, j + 1] if x in range(0, len(m)) and y in range(0, len(m[x])) and ((x, y) != (i, j))]
nearest_count = nearest.count('*')
return nearest_count
def check_field(mine_field, n, m):
for x in range(n):
for y in range(m):
if mine_field[x][y] == '*':
continue
else:
mine_field[x][y] = neighbours(i=x, j=y, m=mine_field)
with open('input.txt') as file:
lines = file.readlines()
(n, m, k) = list(map(int, lines[0].split()))
mines = []
for line in lines[1:]:
mines.append(list(map(int, line.split())))
mine_field = create_mine_field(n, m, mines)
check_field(mine_field, n, m)
with open('output.txt', 'w') as file:
rows = []
for row in mine_field:
line = f"{' '.join([str(item) for item in row])}\n"
rows.append(line)
file.writelines(rows) |
line = '-'*39
blank = '|' + ' '*37 + '|'
print(line)
print(blank)
print(blank)
print(blank)
print(blank)
print(blank)
print(line)
| line = '-' * 39
blank = '|' + ' ' * 37 + '|'
print(line)
print(blank)
print(blank)
print(blank)
print(blank)
print(blank)
print(line) |
class Solution:
def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:
if len(a:=nums1) > len(b:=nums2):
a, b = b, a
n = len(a)
m = len(b)
median, i, j = 0, 0, 0
min_index = 0
max_index = n
while (min_index <= max_index) :
i = int((min_index + max_index) / 2)
j = int(((n + m + 1) / 2) - i)
if (i < n and j > 0 and b[j - 1] > a[i]) :
min_index = i + 1
elif (i > 0 and j < m and b[j] < a[i - 1]) :
max_index = i - 1
else :
if (i == 0) :
median = b[j - 1]
elif (j == 0) :
median = a[i - 1]
else :
median = maximum(a[i - 1], b[j - 1])
break
if ((n + m) % 2 == 1) :
return median
if (i == n) :
return ((median + b[j]) / 2.0)
if (j == m) :
return ((median + a[i]) / 2.0)
return ((median + minimum(a[i], b[j])) / 2.0)
def maximum(a, b) :
return a if a > b else b
def minimum(a, b) :
return a if a < b else b | class Solution:
def find_median_sorted_arrays(self, nums1: List[int], nums2: List[int]) -> float:
if len((a := nums1)) > len((b := nums2)):
(a, b) = (b, a)
n = len(a)
m = len(b)
(median, i, j) = (0, 0, 0)
min_index = 0
max_index = n
while min_index <= max_index:
i = int((min_index + max_index) / 2)
j = int((n + m + 1) / 2 - i)
if i < n and j > 0 and (b[j - 1] > a[i]):
min_index = i + 1
elif i > 0 and j < m and (b[j] < a[i - 1]):
max_index = i - 1
else:
if i == 0:
median = b[j - 1]
elif j == 0:
median = a[i - 1]
else:
median = maximum(a[i - 1], b[j - 1])
break
if (n + m) % 2 == 1:
return median
if i == n:
return (median + b[j]) / 2.0
if j == m:
return (median + a[i]) / 2.0
return (median + minimum(a[i], b[j])) / 2.0
def maximum(a, b):
return a if a > b else b
def minimum(a, b):
return a if a < b else b |
'''
Python function to check whether a number is divisible by another number.
Accept two integers values form the user.
'''
def multiple(m, n):
return True if m % n == 0 else False
print(multiple(20, 5))
print(multiple(7, 2))
| """
Python function to check whether a number is divisible by another number.
Accept two integers values form the user.
"""
def multiple(m, n):
return True if m % n == 0 else False
print(multiple(20, 5))
print(multiple(7, 2)) |
#
# PySNMP MIB module MISSION-CRITICAL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MISSION-CRITICAL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:12:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, TimeTicks, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, MibIdentifier, Bits, NotificationType, enterprises, Gauge32, Counter32, Unsigned32, IpAddress, Integer32, ModuleIdentity, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "TimeTicks", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "MibIdentifier", "Bits", "NotificationType", "enterprises", "Gauge32", "Counter32", "Unsigned32", "IpAddress", "Integer32", "ModuleIdentity", "ObjectIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
missionCritical = MibIdentifier((1, 3, 6, 1, 4, 1, 2349))
mcsCompanyInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 1))
mcsSoftware = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2))
eemProductInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2, 1))
omProductInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2, 2))
ownershipDetails = MibScalar((1, 3, 6, 1, 4, 1, 2349, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ownershipDetails.setStatus('mandatory')
if mibBuilder.loadTexts: ownershipDetails.setDescription('Details of the company providing this MIB')
contactDetails = MibScalar((1, 3, 6, 1, 4, 1, 2349, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: contactDetails.setStatus('mandatory')
if mibBuilder.loadTexts: contactDetails.setDescription('Contact responsible for maintaining this MIB')
eemService = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1))
version = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: version.setStatus('mandatory')
if mibBuilder.loadTexts: version.setDescription('The version of the EEM Agent running')
primaryServer = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: primaryServer.setStatus('mandatory')
if mibBuilder.loadTexts: primaryServer.setDescription('The Primary Server for this EEM Agent')
serviceState = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: serviceState.setStatus('mandatory')
if mibBuilder.loadTexts: serviceState.setDescription('State of the service. Running is 1, stopped is 2')
serviceUpTime = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: serviceUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: serviceUpTime.setDescription('No. of milliseconds since the service was started')
redTrapCount = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: redTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts: redTrapCount.setDescription('The number of red alert traps sent since the service was started')
orangeTrapCount = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: orangeTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts: orangeTrapCount.setDescription('The number of orange alert traps sent since the service was started')
amberTrapCount = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: amberTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts: amberTrapCount.setDescription('The number of yellow alert traps sent since the service was started')
blueTrapCount = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: blueTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts: blueTrapCount.setDescription('The number of blue alert traps sent since the service was started')
greenTrapCount = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: greenTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts: greenTrapCount.setDescription('The number of Green Alert Traps since the service was started')
eemLastTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2))
trapTime = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 1), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapTime.setStatus('deprecated')
if mibBuilder.loadTexts: trapTime.setDescription('Time of the last trap sent')
alertLevel = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("red", 1), ("orange", 2), ("yellow", 3), ("blue", 4), ("green", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: alertLevel.setStatus('mandatory')
if mibBuilder.loadTexts: alertLevel.setDescription('Alert level of the last trap sent. red=1, orange=2, yellow=3, blue=4, green=5')
logType = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 99))).clone(namedValues=NamedValues(("ntevent", 1), ("application", 2), ("snmp", 3), ("wbem", 4), ("activemonitoring", 5), ("performancemonitoring", 6), ("timedevent", 7), ("eem", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logType.setStatus('mandatory')
if mibBuilder.loadTexts: logType.setDescription('Log type generating the last trap sent. system=1,application=2,security=3 (fill in others here) EEM=99')
server = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: server.setStatus('mandatory')
if mibBuilder.loadTexts: server.setDescription('Server generating the last trap sent')
source = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: source.setStatus('mandatory')
if mibBuilder.loadTexts: source.setDescription('Source generating the last trap sent')
user = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user.setStatus('mandatory')
if mibBuilder.loadTexts: user.setDescription('User generating the last trap sent')
eventID = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: eventID.setStatus('mandatory')
if mibBuilder.loadTexts: eventID.setDescription('Event ID of the last trap sent')
description = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: description.setStatus('mandatory')
if mibBuilder.loadTexts: description.setDescription('Text description of the last trap sent')
genericTrapNumber = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: genericTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts: genericTrapNumber.setDescription('The generic trap number of the last trap sent')
specificTrapNumber = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: specificTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts: specificTrapNumber.setDescription('The user specific trap number of the last trap sent')
serviceGoingDown = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,2))
if mibBuilder.loadTexts: serviceGoingDown.setDescription('The SeNTry EEM Sender service is stopping.')
serviceComingUp = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,3))
if mibBuilder.loadTexts: serviceComingUp.setDescription('The SeNTry EEM Sender service is starting.')
gathererServiceGoingDown = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,4))
if mibBuilder.loadTexts: gathererServiceGoingDown.setDescription('The SeNTry EEM Gatherer service is stopping.')
gathererServiceComingUp = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,5))
if mibBuilder.loadTexts: gathererServiceComingUp.setDescription('The SeNTry EEM Gatherer service is starting.')
eemRedAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,100)).setObjects(("MISSION-CRITICAL-MIB", "alertLevel"), ("MISSION-CRITICAL-MIB", "logType"), ("MISSION-CRITICAL-MIB", "server"), ("MISSION-CRITICAL-MIB", "source"), ("MISSION-CRITICAL-MIB", "user"), ("MISSION-CRITICAL-MIB", "eventID"), ("MISSION-CRITICAL-MIB", "description"))
if mibBuilder.loadTexts: eemRedAlert.setDescription('A SeNTry EEM red alert has been generated.')
eemOrangeAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,200)).setObjects(("MISSION-CRITICAL-MIB", "alertLevel"), ("MISSION-CRITICAL-MIB", "logType"), ("MISSION-CRITICAL-MIB", "server"), ("MISSION-CRITICAL-MIB", "source"), ("MISSION-CRITICAL-MIB", "user"), ("MISSION-CRITICAL-MIB", "eventID"), ("MISSION-CRITICAL-MIB", "description"))
if mibBuilder.loadTexts: eemOrangeAlert.setDescription('A SeNTry EEM orange alert has been generated.')
eemYellowAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,300)).setObjects(("MISSION-CRITICAL-MIB", "alertLevel"), ("MISSION-CRITICAL-MIB", "logType"), ("MISSION-CRITICAL-MIB", "server"), ("MISSION-CRITICAL-MIB", "source"), ("MISSION-CRITICAL-MIB", "user"), ("MISSION-CRITICAL-MIB", "eventID"), ("MISSION-CRITICAL-MIB", "description"))
if mibBuilder.loadTexts: eemYellowAlert.setDescription('A SeNTry EEM yellow alert has been generated.')
eemBlueAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,400)).setObjects(("MISSION-CRITICAL-MIB", "alertLevel"), ("MISSION-CRITICAL-MIB", "logType"), ("MISSION-CRITICAL-MIB", "server"), ("MISSION-CRITICAL-MIB", "source"), ("MISSION-CRITICAL-MIB", "user"), ("MISSION-CRITICAL-MIB", "eventID"), ("MISSION-CRITICAL-MIB", "description"))
if mibBuilder.loadTexts: eemBlueAlert.setDescription('A SeNTry EEM blue alert has been generated.')
eemGreenAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0,500)).setObjects(("MISSION-CRITICAL-MIB", "alertLevel"), ("MISSION-CRITICAL-MIB", "logType"), ("MISSION-CRITICAL-MIB", "server"), ("MISSION-CRITICAL-MIB", "source"), ("MISSION-CRITICAL-MIB", "user"), ("MISSION-CRITICAL-MIB", "eventID"), ("MISSION-CRITICAL-MIB", "description"))
if mibBuilder.loadTexts: eemGreenAlert.setDescription('A SeNTry EEM green alert has been generated.')
omService = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2, 2, 1))
omLastTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2))
omTrapTime = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 1), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: omTrapTime.setStatus('deprecated')
if mibBuilder.loadTexts: omTrapTime.setDescription('Time of the last trap sent.')
omAlertLevel = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: omAlertLevel.setStatus('mandatory')
if mibBuilder.loadTexts: omAlertLevel.setDescription('Alert level of the last trap sent.')
omAlertLevelName = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omAlertLevelName.setStatus('mandatory')
if mibBuilder.loadTexts: omAlertLevelName.setDescription('A textual description of the alert level for the last trap sent.')
omServer = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omServer.setStatus('mandatory')
if mibBuilder.loadTexts: omServer.setDescription('Server generating the last trap sent.')
omSource = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omSource.setStatus('mandatory')
if mibBuilder.loadTexts: omSource.setDescription('Source generating the last trap sent.')
omOwner = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omOwner.setStatus('mandatory')
if mibBuilder.loadTexts: omOwner.setDescription('User generating the last trap sent.')
omDescription = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omDescription.setStatus('mandatory')
if mibBuilder.loadTexts: omDescription.setDescription('Text description of the last trap sent.')
omCustomField1 = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omCustomField1.setStatus('mandatory')
if mibBuilder.loadTexts: omCustomField1.setDescription('Custom Field 1 defined by user')
omCustomField2 = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omCustomField2.setStatus('mandatory')
if mibBuilder.loadTexts: omCustomField2.setDescription('Custom Field 2 defined by user')
omCustomField3 = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omCustomField3.setStatus('mandatory')
if mibBuilder.loadTexts: omCustomField3.setDescription('Custom Field 3 defined by user')
omCustomField4 = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omCustomField4.setStatus('mandatory')
if mibBuilder.loadTexts: omCustomField4.setDescription('Custom Field 4 defined by user')
omCustomField5 = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1024))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omCustomField5.setStatus('mandatory')
if mibBuilder.loadTexts: omCustomField5.setDescription('Custom Field 5 defined by user')
omAlertURL = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2048))).setMaxAccess("readonly")
if mibBuilder.loadTexts: omAlertURL.setStatus('mandatory')
if mibBuilder.loadTexts: omAlertURL.setDescription('URL used to view alert details')
omGenericTrapNumber = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: omGenericTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts: omGenericTrapNumber.setDescription('The generic trap number of the last trap sent.')
omSpecificTrapNumber = MibScalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: omSpecificTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts: omSpecificTrapNumber.setDescription('The user specific trap number of the last trap sent')
omBlueAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,10)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omBlueAlert.setDescription('A OnePoint Operations Manager Blue Alert has been generated.')
omGreenAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,20)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omGreenAlert.setDescription('A OnePoint Operations Manager Green Alert has been generated.')
omYellowAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,30)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omYellowAlert.setDescription('A OnePoint Operations Manager Yellow Alert has been generated.')
omOrangeAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,40)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omOrangeAlert.setDescription('A OnePoint Operations Manager Orange Alert has been generated.')
omRedCriticalErrorAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,50)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omRedCriticalErrorAlert.setDescription('A OnePoint Operations Manager Critical Error Alert has been generated.')
omRedSecurityBreachAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,60)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omRedSecurityBreachAlert.setDescription('A OnePoint Operations Manager Security Breach Alert has been generated.')
omRedServiceUnavailableAlert = NotificationType((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0,70)).setObjects(("MISSION-CRITICAL-MIB", "omAlertLevel"), ("MISSION-CRITICAL-MIB", "omAlertLevelName"), ("MISSION-CRITICAL-MIB", "omServer"), ("MISSION-CRITICAL-MIB", "omSource"), ("MISSION-CRITICAL-MIB", "omOwner"), ("MISSION-CRITICAL-MIB", "omDescription"), ("MISSION-CRITICAL-MIB", "omCustomField1"), ("MISSION-CRITICAL-MIB", "omCustomField2"), ("MISSION-CRITICAL-MIB", "omCustomField3"), ("MISSION-CRITICAL-MIB", "omCustomField4"), ("MISSION-CRITICAL-MIB", "omCustomField5"), ("MISSION-CRITICAL-MIB", "omAlertURL"))
if mibBuilder.loadTexts: omRedServiceUnavailableAlert.setDescription('A OnePoint Operations Manager Service Unavailable Alert has been generated.')
mibBuilder.exportSymbols("MISSION-CRITICAL-MIB", serviceUpTime=serviceUpTime, omYellowAlert=omYellowAlert, redTrapCount=redTrapCount, eemOrangeAlert=eemOrangeAlert, mcsCompanyInfo=mcsCompanyInfo, omCustomField4=omCustomField4, gathererServiceComingUp=gathererServiceComingUp, serviceState=serviceState, omCustomField2=omCustomField2, omDescription=omDescription, missionCritical=missionCritical, omService=omService, eventID=eventID, omAlertLevelName=omAlertLevelName, serviceGoingDown=serviceGoingDown, omProductInfo=omProductInfo, trapTime=trapTime, eemService=eemService, eemYellowAlert=eemYellowAlert, omRedCriticalErrorAlert=omRedCriticalErrorAlert, omRedSecurityBreachAlert=omRedSecurityBreachAlert, blueTrapCount=blueTrapCount, greenTrapCount=greenTrapCount, omServer=omServer, mcsSoftware=mcsSoftware, serviceComingUp=serviceComingUp, omCustomField1=omCustomField1, omGreenAlert=omGreenAlert, eemLastTrap=eemLastTrap, omCustomField5=omCustomField5, omAlertURL=omAlertURL, omOrangeAlert=omOrangeAlert, omTrapTime=omTrapTime, logType=logType, amberTrapCount=amberTrapCount, user=user, specificTrapNumber=specificTrapNumber, source=source, omBlueAlert=omBlueAlert, ownershipDetails=ownershipDetails, eemRedAlert=eemRedAlert, omSpecificTrapNumber=omSpecificTrapNumber, omOwner=omOwner, gathererServiceGoingDown=gathererServiceGoingDown, orangeTrapCount=orangeTrapCount, server=server, omLastTrap=omLastTrap, omAlertLevel=omAlertLevel, omCustomField3=omCustomField3, omGenericTrapNumber=omGenericTrapNumber, description=description, genericTrapNumber=genericTrapNumber, eemGreenAlert=eemGreenAlert, primaryServer=primaryServer, alertLevel=alertLevel, version=version, omSource=omSource, eemProductInfo=eemProductInfo, eemBlueAlert=eemBlueAlert, contactDetails=contactDetails, omRedServiceUnavailableAlert=omRedServiceUnavailableAlert)
| (integer, octet_string, object_identifier) = mibBuilder.importSymbols('ASN1', 'Integer', 'OctetString', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(single_value_constraint, constraints_union, value_size_constraint, constraints_intersection, value_range_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'SingleValueConstraint', 'ConstraintsUnion', 'ValueSizeConstraint', 'ConstraintsIntersection', 'ValueRangeConstraint')
(notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance')
(notification_type, time_ticks, iso, mib_scalar, mib_table, mib_table_row, mib_table_column, counter64, mib_identifier, bits, notification_type, enterprises, gauge32, counter32, unsigned32, ip_address, integer32, module_identity, object_identity) = mibBuilder.importSymbols('SNMPv2-SMI', 'NotificationType', 'TimeTicks', 'iso', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Counter64', 'MibIdentifier', 'Bits', 'NotificationType', 'enterprises', 'Gauge32', 'Counter32', 'Unsigned32', 'IpAddress', 'Integer32', 'ModuleIdentity', 'ObjectIdentity')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
mission_critical = mib_identifier((1, 3, 6, 1, 4, 1, 2349))
mcs_company_info = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 1))
mcs_software = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2))
eem_product_info = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2, 1))
om_product_info = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2, 2))
ownership_details = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 1, 1), display_string().subtype(subtypeSpec=value_size_constraint(1, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
ownershipDetails.setStatus('mandatory')
if mibBuilder.loadTexts:
ownershipDetails.setDescription('Details of the company providing this MIB')
contact_details = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 1, 2), display_string().subtype(subtypeSpec=value_size_constraint(1, 64))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
contactDetails.setStatus('mandatory')
if mibBuilder.loadTexts:
contactDetails.setDescription('Contact responsible for maintaining this MIB')
eem_service = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1))
version = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 1), display_string().subtype(subtypeSpec=value_size_constraint(1, 16))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
version.setStatus('mandatory')
if mibBuilder.loadTexts:
version.setDescription('The version of the EEM Agent running')
primary_server = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 2), display_string().subtype(subtypeSpec=value_size_constraint(1, 16))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
primaryServer.setStatus('mandatory')
if mibBuilder.loadTexts:
primaryServer.setDescription('The Primary Server for this EEM Agent')
service_state = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('up', 1), ('down', 2)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
serviceState.setStatus('mandatory')
if mibBuilder.loadTexts:
serviceState.setDescription('State of the service. Running is 1, stopped is 2')
service_up_time = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 4), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
serviceUpTime.setStatus('mandatory')
if mibBuilder.loadTexts:
serviceUpTime.setDescription('No. of milliseconds since the service was started')
red_trap_count = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 5), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
redTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts:
redTrapCount.setDescription('The number of red alert traps sent since the service was started')
orange_trap_count = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 6), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
orangeTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts:
orangeTrapCount.setDescription('The number of orange alert traps sent since the service was started')
amber_trap_count = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 7), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
amberTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts:
amberTrapCount.setDescription('The number of yellow alert traps sent since the service was started')
blue_trap_count = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 8), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
blueTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts:
blueTrapCount.setDescription('The number of blue alert traps sent since the service was started')
green_trap_count = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 1, 9), counter32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
greenTrapCount.setStatus('deprecated')
if mibBuilder.loadTexts:
greenTrapCount.setDescription('The number of Green Alert Traps since the service was started')
eem_last_trap = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2))
trap_time = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 1), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
trapTime.setStatus('deprecated')
if mibBuilder.loadTexts:
trapTime.setDescription('Time of the last trap sent')
alert_level = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 2), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5))).clone(namedValues=named_values(('red', 1), ('orange', 2), ('yellow', 3), ('blue', 4), ('green', 5)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
alertLevel.setStatus('mandatory')
if mibBuilder.loadTexts:
alertLevel.setDescription('Alert level of the last trap sent. red=1, orange=2, yellow=3, blue=4, green=5')
log_type = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7, 99))).clone(namedValues=named_values(('ntevent', 1), ('application', 2), ('snmp', 3), ('wbem', 4), ('activemonitoring', 5), ('performancemonitoring', 6), ('timedevent', 7), ('eem', 99)))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
logType.setStatus('mandatory')
if mibBuilder.loadTexts:
logType.setDescription('Log type generating the last trap sent. system=1,application=2,security=3 (fill in others here) EEM=99')
server = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 4), display_string().subtype(subtypeSpec=value_size_constraint(1, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
server.setStatus('mandatory')
if mibBuilder.loadTexts:
server.setDescription('Server generating the last trap sent')
source = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 5), display_string().subtype(subtypeSpec=value_size_constraint(1, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
source.setStatus('mandatory')
if mibBuilder.loadTexts:
source.setDescription('Source generating the last trap sent')
user = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 6), display_string().subtype(subtypeSpec=value_size_constraint(1, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
user.setStatus('mandatory')
if mibBuilder.loadTexts:
user.setDescription('User generating the last trap sent')
event_id = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 7), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
eventID.setStatus('mandatory')
if mibBuilder.loadTexts:
eventID.setDescription('Event ID of the last trap sent')
description = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 8), display_string().subtype(subtypeSpec=value_size_constraint(1, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
description.setStatus('mandatory')
if mibBuilder.loadTexts:
description.setDescription('Text description of the last trap sent')
generic_trap_number = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 9), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
genericTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts:
genericTrapNumber.setDescription('The generic trap number of the last trap sent')
specific_trap_number = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 1, 2, 10), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
specificTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts:
specificTrapNumber.setDescription('The user specific trap number of the last trap sent')
service_going_down = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 2))
if mibBuilder.loadTexts:
serviceGoingDown.setDescription('The SeNTry EEM Sender service is stopping.')
service_coming_up = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 3))
if mibBuilder.loadTexts:
serviceComingUp.setDescription('The SeNTry EEM Sender service is starting.')
gatherer_service_going_down = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 4))
if mibBuilder.loadTexts:
gathererServiceGoingDown.setDescription('The SeNTry EEM Gatherer service is stopping.')
gatherer_service_coming_up = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 5))
if mibBuilder.loadTexts:
gathererServiceComingUp.setDescription('The SeNTry EEM Gatherer service is starting.')
eem_red_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 100)).setObjects(('MISSION-CRITICAL-MIB', 'alertLevel'), ('MISSION-CRITICAL-MIB', 'logType'), ('MISSION-CRITICAL-MIB', 'server'), ('MISSION-CRITICAL-MIB', 'source'), ('MISSION-CRITICAL-MIB', 'user'), ('MISSION-CRITICAL-MIB', 'eventID'), ('MISSION-CRITICAL-MIB', 'description'))
if mibBuilder.loadTexts:
eemRedAlert.setDescription('A SeNTry EEM red alert has been generated.')
eem_orange_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 200)).setObjects(('MISSION-CRITICAL-MIB', 'alertLevel'), ('MISSION-CRITICAL-MIB', 'logType'), ('MISSION-CRITICAL-MIB', 'server'), ('MISSION-CRITICAL-MIB', 'source'), ('MISSION-CRITICAL-MIB', 'user'), ('MISSION-CRITICAL-MIB', 'eventID'), ('MISSION-CRITICAL-MIB', 'description'))
if mibBuilder.loadTexts:
eemOrangeAlert.setDescription('A SeNTry EEM orange alert has been generated.')
eem_yellow_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 300)).setObjects(('MISSION-CRITICAL-MIB', 'alertLevel'), ('MISSION-CRITICAL-MIB', 'logType'), ('MISSION-CRITICAL-MIB', 'server'), ('MISSION-CRITICAL-MIB', 'source'), ('MISSION-CRITICAL-MIB', 'user'), ('MISSION-CRITICAL-MIB', 'eventID'), ('MISSION-CRITICAL-MIB', 'description'))
if mibBuilder.loadTexts:
eemYellowAlert.setDescription('A SeNTry EEM yellow alert has been generated.')
eem_blue_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 400)).setObjects(('MISSION-CRITICAL-MIB', 'alertLevel'), ('MISSION-CRITICAL-MIB', 'logType'), ('MISSION-CRITICAL-MIB', 'server'), ('MISSION-CRITICAL-MIB', 'source'), ('MISSION-CRITICAL-MIB', 'user'), ('MISSION-CRITICAL-MIB', 'eventID'), ('MISSION-CRITICAL-MIB', 'description'))
if mibBuilder.loadTexts:
eemBlueAlert.setDescription('A SeNTry EEM blue alert has been generated.')
eem_green_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 1) + (0, 500)).setObjects(('MISSION-CRITICAL-MIB', 'alertLevel'), ('MISSION-CRITICAL-MIB', 'logType'), ('MISSION-CRITICAL-MIB', 'server'), ('MISSION-CRITICAL-MIB', 'source'), ('MISSION-CRITICAL-MIB', 'user'), ('MISSION-CRITICAL-MIB', 'eventID'), ('MISSION-CRITICAL-MIB', 'description'))
if mibBuilder.loadTexts:
eemGreenAlert.setDescription('A SeNTry EEM green alert has been generated.')
om_service = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2, 2, 1))
om_last_trap = mib_identifier((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2))
om_trap_time = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 1), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omTrapTime.setStatus('deprecated')
if mibBuilder.loadTexts:
omTrapTime.setDescription('Time of the last trap sent.')
om_alert_level = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 2), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omAlertLevel.setStatus('mandatory')
if mibBuilder.loadTexts:
omAlertLevel.setDescription('Alert level of the last trap sent.')
om_alert_level_name = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 3), display_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omAlertLevelName.setStatus('mandatory')
if mibBuilder.loadTexts:
omAlertLevelName.setDescription('A textual description of the alert level for the last trap sent.')
om_server = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 4), display_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omServer.setStatus('mandatory')
if mibBuilder.loadTexts:
omServer.setDescription('Server generating the last trap sent.')
om_source = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 5), display_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omSource.setStatus('mandatory')
if mibBuilder.loadTexts:
omSource.setDescription('Source generating the last trap sent.')
om_owner = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 6), display_string().subtype(subtypeSpec=value_size_constraint(0, 255))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omOwner.setStatus('mandatory')
if mibBuilder.loadTexts:
omOwner.setDescription('User generating the last trap sent.')
om_description = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 7), display_string().subtype(subtypeSpec=value_size_constraint(0, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omDescription.setStatus('mandatory')
if mibBuilder.loadTexts:
omDescription.setDescription('Text description of the last trap sent.')
om_custom_field1 = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 8), display_string().subtype(subtypeSpec=value_size_constraint(0, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omCustomField1.setStatus('mandatory')
if mibBuilder.loadTexts:
omCustomField1.setDescription('Custom Field 1 defined by user')
om_custom_field2 = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 9), display_string().subtype(subtypeSpec=value_size_constraint(0, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omCustomField2.setStatus('mandatory')
if mibBuilder.loadTexts:
omCustomField2.setDescription('Custom Field 2 defined by user')
om_custom_field3 = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 10), display_string().subtype(subtypeSpec=value_size_constraint(0, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omCustomField3.setStatus('mandatory')
if mibBuilder.loadTexts:
omCustomField3.setDescription('Custom Field 3 defined by user')
om_custom_field4 = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 11), display_string().subtype(subtypeSpec=value_size_constraint(0, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omCustomField4.setStatus('mandatory')
if mibBuilder.loadTexts:
omCustomField4.setDescription('Custom Field 4 defined by user')
om_custom_field5 = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 12), display_string().subtype(subtypeSpec=value_size_constraint(0, 1024))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omCustomField5.setStatus('mandatory')
if mibBuilder.loadTexts:
omCustomField5.setDescription('Custom Field 5 defined by user')
om_alert_url = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 13), display_string().subtype(subtypeSpec=value_size_constraint(0, 2048))).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omAlertURL.setStatus('mandatory')
if mibBuilder.loadTexts:
omAlertURL.setDescription('URL used to view alert details')
om_generic_trap_number = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 14), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omGenericTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts:
omGenericTrapNumber.setDescription('The generic trap number of the last trap sent.')
om_specific_trap_number = mib_scalar((1, 3, 6, 1, 4, 1, 2349, 2, 2, 2, 15), integer32()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
omSpecificTrapNumber.setStatus('mandatory')
if mibBuilder.loadTexts:
omSpecificTrapNumber.setDescription('The user specific trap number of the last trap sent')
om_blue_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 10)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omBlueAlert.setDescription('A OnePoint Operations Manager Blue Alert has been generated.')
om_green_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 20)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omGreenAlert.setDescription('A OnePoint Operations Manager Green Alert has been generated.')
om_yellow_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 30)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omYellowAlert.setDescription('A OnePoint Operations Manager Yellow Alert has been generated.')
om_orange_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 40)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omOrangeAlert.setDescription('A OnePoint Operations Manager Orange Alert has been generated.')
om_red_critical_error_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 50)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omRedCriticalErrorAlert.setDescription('A OnePoint Operations Manager Critical Error Alert has been generated.')
om_red_security_breach_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 60)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omRedSecurityBreachAlert.setDescription('A OnePoint Operations Manager Security Breach Alert has been generated.')
om_red_service_unavailable_alert = notification_type((1, 3, 6, 1, 4, 1, 2349, 2, 2) + (0, 70)).setObjects(('MISSION-CRITICAL-MIB', 'omAlertLevel'), ('MISSION-CRITICAL-MIB', 'omAlertLevelName'), ('MISSION-CRITICAL-MIB', 'omServer'), ('MISSION-CRITICAL-MIB', 'omSource'), ('MISSION-CRITICAL-MIB', 'omOwner'), ('MISSION-CRITICAL-MIB', 'omDescription'), ('MISSION-CRITICAL-MIB', 'omCustomField1'), ('MISSION-CRITICAL-MIB', 'omCustomField2'), ('MISSION-CRITICAL-MIB', 'omCustomField3'), ('MISSION-CRITICAL-MIB', 'omCustomField4'), ('MISSION-CRITICAL-MIB', 'omCustomField5'), ('MISSION-CRITICAL-MIB', 'omAlertURL'))
if mibBuilder.loadTexts:
omRedServiceUnavailableAlert.setDescription('A OnePoint Operations Manager Service Unavailable Alert has been generated.')
mibBuilder.exportSymbols('MISSION-CRITICAL-MIB', serviceUpTime=serviceUpTime, omYellowAlert=omYellowAlert, redTrapCount=redTrapCount, eemOrangeAlert=eemOrangeAlert, mcsCompanyInfo=mcsCompanyInfo, omCustomField4=omCustomField4, gathererServiceComingUp=gathererServiceComingUp, serviceState=serviceState, omCustomField2=omCustomField2, omDescription=omDescription, missionCritical=missionCritical, omService=omService, eventID=eventID, omAlertLevelName=omAlertLevelName, serviceGoingDown=serviceGoingDown, omProductInfo=omProductInfo, trapTime=trapTime, eemService=eemService, eemYellowAlert=eemYellowAlert, omRedCriticalErrorAlert=omRedCriticalErrorAlert, omRedSecurityBreachAlert=omRedSecurityBreachAlert, blueTrapCount=blueTrapCount, greenTrapCount=greenTrapCount, omServer=omServer, mcsSoftware=mcsSoftware, serviceComingUp=serviceComingUp, omCustomField1=omCustomField1, omGreenAlert=omGreenAlert, eemLastTrap=eemLastTrap, omCustomField5=omCustomField5, omAlertURL=omAlertURL, omOrangeAlert=omOrangeAlert, omTrapTime=omTrapTime, logType=logType, amberTrapCount=amberTrapCount, user=user, specificTrapNumber=specificTrapNumber, source=source, omBlueAlert=omBlueAlert, ownershipDetails=ownershipDetails, eemRedAlert=eemRedAlert, omSpecificTrapNumber=omSpecificTrapNumber, omOwner=omOwner, gathererServiceGoingDown=gathererServiceGoingDown, orangeTrapCount=orangeTrapCount, server=server, omLastTrap=omLastTrap, omAlertLevel=omAlertLevel, omCustomField3=omCustomField3, omGenericTrapNumber=omGenericTrapNumber, description=description, genericTrapNumber=genericTrapNumber, eemGreenAlert=eemGreenAlert, primaryServer=primaryServer, alertLevel=alertLevel, version=version, omSource=omSource, eemProductInfo=eemProductInfo, eemBlueAlert=eemBlueAlert, contactDetails=contactDetails, omRedServiceUnavailableAlert=omRedServiceUnavailableAlert) |
num = 111
num = 222
num = 333333
num = 333
num4 = 44444
| num = 111
num = 222
num = 333333
num = 333
num4 = 44444 |
__author__ = "hoongeun"
__version__ = "0.0.1"
__copyright__ = "Copyright (c) hoongeun"
__license__ = "Beer ware"
| __author__ = 'hoongeun'
__version__ = '0.0.1'
__copyright__ = 'Copyright (c) hoongeun'
__license__ = 'Beer ware' |
def test1():
inp="0 2 7 0"
inp="4 10 4 1 8 4 9 14 5 1 14 15 0 15 3 5"
nums = list(map(lambda x: int(x), inp.split()))
hist = [ nums ]
step = 1
current = nums[:]
while True:
#print('step', step, 'current', current)
#search max
m = max(current)
#max index
idx = current.index(m)
current[idx] = 0
idx += 1
while m > 0:
idx = 0 if idx >= len(current) else idx
current[idx] += 1
m -= 1
idx += 1
if current in hist:
print(step, hist.index(current), step - hist.index(current))
break
step += 1
hist.append(current[:])
#print(hist[0])
| def test1():
inp = '0 2 7 0'
inp = '4 10 4 1 8 4 9 14 5 1 14 15 0 15 3 5'
nums = list(map(lambda x: int(x), inp.split()))
hist = [nums]
step = 1
current = nums[:]
while True:
m = max(current)
idx = current.index(m)
current[idx] = 0
idx += 1
while m > 0:
idx = 0 if idx >= len(current) else idx
current[idx] += 1
m -= 1
idx += 1
if current in hist:
print(step, hist.index(current), step - hist.index(current))
break
step += 1
hist.append(current[:]) |
# test floor-division and modulo operators
@micropython.viper
def div(x:int, y:int) -> int:
return x // y
@micropython.viper
def mod(x:int, y:int) -> int:
return x % y
def dm(x, y):
print(div(x, y), mod(x, y))
for x in (-6, 6):
for y in range(-7, 8):
if y == 0:
continue
dm(x, y)
| @micropython.viper
def div(x: int, y: int) -> int:
return x // y
@micropython.viper
def mod(x: int, y: int) -> int:
return x % y
def dm(x, y):
print(div(x, y), mod(x, y))
for x in (-6, 6):
for y in range(-7, 8):
if y == 0:
continue
dm(x, y) |
# -*- coding: utf-8 -*-
def func(precess_data, x):
precess_data = list(range(0, 100, 3))
low = 0
high = 34
guess = int((low + high) / 2)
while precess_data[guess] != x:
if precess_data[guess] < x:
low = guess
elif precess_data[guess] > x:
high = guess
else:
break
guess = (low + high) // 2
return guess
print(func(list(range(0, 100, 3)), 99))
| def func(precess_data, x):
precess_data = list(range(0, 100, 3))
low = 0
high = 34
guess = int((low + high) / 2)
while precess_data[guess] != x:
if precess_data[guess] < x:
low = guess
elif precess_data[guess] > x:
high = guess
else:
break
guess = (low + high) // 2
return guess
print(func(list(range(0, 100, 3)), 99)) |
'''
Created on 08.06.2014
@author: ionitadaniel19
'''
map_selenium_objects={
"SUSER":"name=login",
"SPWD":"name=password",
"SREMEMBER":"id=remember_me",
"SSUBMIT":"name=commit",
"SKEYWORD":"id=q1c",
"SSHOWANSER":"name=showanswer",
"SANSWER":"css=#answer > p"
} | """
Created on 08.06.2014
@author: ionitadaniel19
"""
map_selenium_objects = {'SUSER': 'name=login', 'SPWD': 'name=password', 'SREMEMBER': 'id=remember_me', 'SSUBMIT': 'name=commit', 'SKEYWORD': 'id=q1c', 'SSHOWANSER': 'name=showanswer', 'SANSWER': 'css=#answer > p'} |
linesize = int(input())
table = [[0 for x in range(4)] for y in range(linesize)]
queue = []
for i in range(linesize):
entry = input().split(' ')
# print(entry, 'pushed')
country = (int(entry[1]),int(entry[2]),int(entry[3]),str(entry[0]))
queue.append(country)
out = sorted(queue, key = lambda x: x[3])
out = sorted(out, key = lambda x: (x[0], x[1], x[2]), reverse=True)
for elemt in out:
print("{0} {1} {2} {3}".format(elemt[3],elemt[0],elemt[1],elemt[2])) | linesize = int(input())
table = [[0 for x in range(4)] for y in range(linesize)]
queue = []
for i in range(linesize):
entry = input().split(' ')
country = (int(entry[1]), int(entry[2]), int(entry[3]), str(entry[0]))
queue.append(country)
out = sorted(queue, key=lambda x: x[3])
out = sorted(out, key=lambda x: (x[0], x[1], x[2]), reverse=True)
for elemt in out:
print('{0} {1} {2} {3}'.format(elemt[3], elemt[0], elemt[1], elemt[2])) |
name = input('Enter your Name: ')
sen = "Hello "+ name +" ,How r u today??"
print(sen)
para = ''' hey , this is a
multiline comment.Lets see how
it works.'''
print(para)
| name = input('Enter your Name: ')
sen = 'Hello ' + name + ' ,How r u today??'
print(sen)
para = ' hey , this is a\n multiline comment.Lets see how\n it works.'
print(para) |
x, y = map(float, input().split())
exp = 0.0001
count = 1
while y - x > exp:
x += x * 0.7
count += 1
print(count) | (x, y) = map(float, input().split())
exp = 0.0001
count = 1
while y - x > exp:
x += x * 0.7
count += 1
print(count) |
#!/usr/bin/python
class helloworld:
def __init__(self):
print("Hello World!")
helloworld()
| class Helloworld:
def __init__(self):
print('Hello World!')
helloworld() |
def init():
return {
"ingest": {
"outputKafkaTopic": "telemetry.ingest",
"inputPrefix": "ingest",
"dependentSinkSources": [
{
"type": "azure",
"prefix": "raw"
},
{
"type": "azure",
"prefix": "unique"
},
{
"type": "azure",
"prefix": "channel"
},
{
"type": "azure",
"prefix": "telemetry-denormalized/raw"
},
{
"type": "druid",
"prefix": "telemetry-events"
},
{
"type": "druid",
"prefix": "telemetry-log-events"
},
{
"type": "druid",
"prefix": "telemetry-error-events"
},
{
"type": "druid",
"prefix": "telemetry-feedback-events"
}
]
},
"raw": {
"outputKafkaTopic": "telemetry.raw",
"inputPrefix": "raw",
"dependentSinkSources": [
{
"type": "azure",
"prefix": "unique"
},
{
"type": "azure",
"prefix": "channel"
},
{
"type": "azure",
"prefix": "telemetry-denormalized/raw"
},
{
"type": "druid",
"prefix": "telemetry-events"
},
{
"type": "druid",
"prefix": "telemetry-log-events"
},
{
"type": "druid",
"prefix": "telemetry-error-events"
},
{
"type": "druid",
"prefix": "telemetry-feedback-events"
}
]
},
"unique": {
"outputKafkaTopic": "telemetry.unique",
"inputPrefix": "unique",
"dependentSinkSources": [
{
"type": "azure",
"prefix": "channel"
},
{
"type": "azure",
"prefix": "telemetry-denormalized/raw"
},
{
"type": "druid",
"prefix": "telemetry-events"
},
{
"type": "druid",
"prefix": "telemetry-log-events"
},
{
"type": "druid",
"prefix": "telemetry-error-events"
},
{
"type": "druid",
"prefix": "telemetry-feedback-events"
}
]
},
"telemetry-denorm": {
"outputKafkaTopic": "telemetry.denorm",
"inputPrefix": "telemetry-denormalized/raw",
"dependentSinkSources": [
{
"type": "druid",
"prefix": "telemetry-events"
},
{
"type": "druid",
"prefix": "telemetry-feedback-events"
}
]
},
"summary-denorm": {
"outputKafkaTopic": "telemetry.denorm",
"inputPrefix": "telemetry-denormalized/summary",
"dependentSinkSources": [
{
"type": "druid",
"prefix": "summary-events"
}
]
},
"failed": {
"outputKafkaTopic": "telemetry.raw",
"inputPrefix": "failed",
"dependentSinkSources": [
],
"filters": [
{
"key": "flags",
"operator": "Is Null",
"value": ""
}
]
},
"batch-failed": {
"outputKafkaTopic": "telemetry.ingest",
"inputPrefix": "extractor-failed",
"dependentSinkSources": [
],
"filters": [
{
"key": "flags",
"operator": "Is Null",
"value": ""
}
]
},
"wfs": {
"outputKafkaTopic": "telemetry.derived",
"inputPrefix": "derived/wfs",
"dependentSinkSources": [
{
"type": "azure",
"prefix": "channel"
},
{
"type": "azure",
"prefix": "telemetry-denormalized/summary"
},
{
"type": "druid",
"prefix": "summary-events"
}
]
}
} | def init():
return {'ingest': {'outputKafkaTopic': 'telemetry.ingest', 'inputPrefix': 'ingest', 'dependentSinkSources': [{'type': 'azure', 'prefix': 'raw'}, {'type': 'azure', 'prefix': 'unique'}, {'type': 'azure', 'prefix': 'channel'}, {'type': 'azure', 'prefix': 'telemetry-denormalized/raw'}, {'type': 'druid', 'prefix': 'telemetry-events'}, {'type': 'druid', 'prefix': 'telemetry-log-events'}, {'type': 'druid', 'prefix': 'telemetry-error-events'}, {'type': 'druid', 'prefix': 'telemetry-feedback-events'}]}, 'raw': {'outputKafkaTopic': 'telemetry.raw', 'inputPrefix': 'raw', 'dependentSinkSources': [{'type': 'azure', 'prefix': 'unique'}, {'type': 'azure', 'prefix': 'channel'}, {'type': 'azure', 'prefix': 'telemetry-denormalized/raw'}, {'type': 'druid', 'prefix': 'telemetry-events'}, {'type': 'druid', 'prefix': 'telemetry-log-events'}, {'type': 'druid', 'prefix': 'telemetry-error-events'}, {'type': 'druid', 'prefix': 'telemetry-feedback-events'}]}, 'unique': {'outputKafkaTopic': 'telemetry.unique', 'inputPrefix': 'unique', 'dependentSinkSources': [{'type': 'azure', 'prefix': 'channel'}, {'type': 'azure', 'prefix': 'telemetry-denormalized/raw'}, {'type': 'druid', 'prefix': 'telemetry-events'}, {'type': 'druid', 'prefix': 'telemetry-log-events'}, {'type': 'druid', 'prefix': 'telemetry-error-events'}, {'type': 'druid', 'prefix': 'telemetry-feedback-events'}]}, 'telemetry-denorm': {'outputKafkaTopic': 'telemetry.denorm', 'inputPrefix': 'telemetry-denormalized/raw', 'dependentSinkSources': [{'type': 'druid', 'prefix': 'telemetry-events'}, {'type': 'druid', 'prefix': 'telemetry-feedback-events'}]}, 'summary-denorm': {'outputKafkaTopic': 'telemetry.denorm', 'inputPrefix': 'telemetry-denormalized/summary', 'dependentSinkSources': [{'type': 'druid', 'prefix': 'summary-events'}]}, 'failed': {'outputKafkaTopic': 'telemetry.raw', 'inputPrefix': 'failed', 'dependentSinkSources': [], 'filters': [{'key': 'flags', 'operator': 'Is Null', 'value': ''}]}, 'batch-failed': {'outputKafkaTopic': 'telemetry.ingest', 'inputPrefix': 'extractor-failed', 'dependentSinkSources': [], 'filters': [{'key': 'flags', 'operator': 'Is Null', 'value': ''}]}, 'wfs': {'outputKafkaTopic': 'telemetry.derived', 'inputPrefix': 'derived/wfs', 'dependentSinkSources': [{'type': 'azure', 'prefix': 'channel'}, {'type': 'azure', 'prefix': 'telemetry-denormalized/summary'}, {'type': 'druid', 'prefix': 'summary-events'}]}} |
#!/usr/bin/python
# -*- coding: utf-8 -*-
RECOVER_ITEM = [
("n 't ", "n't ")
]
def recover_quotewords(text):
for before, after in RECOVER_ITEM:
text = text.replace(before, after)
return text
| recover_item = [("n 't ", "n't ")]
def recover_quotewords(text):
for (before, after) in RECOVER_ITEM:
text = text.replace(before, after)
return text |
names = [
'Christal',
'Ray',
'Ron'
]
print(names)
| names = ['Christal', 'Ray', 'Ron']
print(names) |
def solution(numBottles,numExchange):
finalsum = numBottles
emptyBottles = numBottles
numBottles = 0
while (emptyBottles >= numExchange):
numBottles = emptyBottles // numExchange
emptyBottles -= emptyBottles // numExchange * numExchange
finalsum += numBottles
emptyBottles += numBottles
print (finalsum)
numBottles = int(input("numBottles = "))
numExchange = int(input("numExchange = "))
solution(numBottles,numExchange) | def solution(numBottles, numExchange):
finalsum = numBottles
empty_bottles = numBottles
num_bottles = 0
while emptyBottles >= numExchange:
num_bottles = emptyBottles // numExchange
empty_bottles -= emptyBottles // numExchange * numExchange
finalsum += numBottles
empty_bottles += numBottles
print(finalsum)
num_bottles = int(input('numBottles = '))
num_exchange = int(input('numExchange = '))
solution(numBottles, numExchange) |
def undistort_image(image, objectpoints, imagepoints):
# Get image size
img_size = (image.shape[1], image.shape[0])
# Calibrate camera based on objectpoints, imagepoints, and image size
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectpoints, imagepoints, img_size, None, None)
# Call cv2.undistort
dst = cv2.undistort(image, mtx, dist, None, mtx)
return dst
def get_shresholded_img(image,grad_thresh,s_thresh):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#process the x direction gradient
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= grad_thresh[0]) & (scaled_sobel <= grad_thresh[1])] = 1
#process the HIS s channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# one can show it out to see the colored binary
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
def warp_image_to_birdseye_view(image,corners):
img_size=(image.shape[1], image.shape[0])
#choose an offset to determine the distination for birdseye view area
offset = 150
src = np.float32(
[corners[0],
corners[1],
corners[2],
corners[3]])
#decide a place to place the birdviewed image, get these points by testing an image
dst = np.float32([
[offset, 0],
[offset, img_size[1]],
[img_size[0] - offset, img_size[1]],
[img_size[0] - offset,0]])
# Get perspective transform
perspectiveTransform = cv2.getPerspectiveTransform(src, dst)
# Warp perspective
warped = cv2.warpPerspective(image, perspectiveTransform, img_size, flags=cv2.INTER_LINEAR)
# Get the destination perspective transform
Minv = cv2.getPerspectiveTransform(dst, src)
return warped, Minv
def find_lane_lines(warped_binary_image, testing=False):
if testing == True:
# Create an output image to draw on and visualize the result
output_image = np.dstack((warped_binary_image, warped_binary_image, warped_binary_image))*255
# Create histogram to find the lanes by identifying the peaks in the histogram
histogram = np.sum(warped_binary_image[int(warped_binary_image.shape[0]/2):,:], axis=0)
# Find the peak of the left and right halves of the histogram
midpoint = np.int(histogram.shape[0]/2)
left_x_base = np.argmax(histogram[:midpoint])
right_x_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
number_of_windows = 9
# Set height of windows
window_height = np.int(warped_binary_image.shape[0]/number_of_windows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero_pixels = warped_binary_image.nonzero()
nonzero_y_pixels = np.array(nonzero_pixels[0])
nonzero_x_pixels = np.array(nonzero_pixels[1])
# Current positions to be updated for each window
left_x_current = left_x_base
right_x_current = right_x_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(number_of_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped_binary_image.shape[0] - (window+1)*window_height
win_y_high = warped_binary_image.shape[0] - window*window_height
win_x_left_low = left_x_current - margin
win_x_left_high = left_x_current + margin
win_x_right_low = right_x_current - margin
win_x_right_high = right_x_current + margin
if testing == True:
# Draw the windows on the visualization image
cv2.rectangle(output_image, (win_x_left_low,win_y_low), (win_x_left_high,win_y_high), (0,255,0), 2)
cv2.rectangle(output_image, (win_x_right_low,win_y_low), (win_x_right_high,win_y_high), (0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
left_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_left_low) & (nonzero_x_pixels < win_x_left_high)).nonzero()[0]
right_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_right_low) & (nonzero_x_pixels < win_x_right_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(left_inds)
right_lane_inds.append(right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(left_inds) > minpix:
left_x_current = np.int(np.mean(nonzero_x_pixels[left_inds]))
if len(right_inds) > minpix:
right_x_current = np.int(np.mean(nonzero_x_pixels[right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
left_x = nonzero_x_pixels[left_lane_inds]
left_y = nonzero_y_pixels[left_lane_inds]
right_x = nonzero_x_pixels[right_lane_inds]
right_y = nonzero_y_pixels[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
# Generate x and y values for plotting
plot_y = np.linspace(0, warped_binary_image.shape[0]-1, warped_binary_image.shape[0] )
left_fit_x = left_fit[0]*plot_y**2 + left_fit[1]*plot_y + left_fit[2]
right_fit_x = right_fit[0]*plot_y**2 + right_fit[1]*plot_y + right_fit[2]
# Get binary warped image size
image_size = warped_binary_image.shape
# Get max of plot_y
y_eval = np.max(plot_y)
# Define conversions in x and y from pixels space to meters
y_m_per_pix = 30/720
x_m_per_pix = 3.7/700
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(left_y*y_m_per_pix, left_x*x_m_per_pix, 2)
right_fit_cr = np.polyfit(right_y*y_m_per_pix, right_x*x_m_per_pix, 2)
# Calculate radius of curve
left_curve = ((1+(2*left_fit_cr[0]*y_eval*y_m_per_pix+left_fit_cr[1])**2)**1.5)/np.absolute(2*left_fit_cr[0])
right_curve = ((1+(2*right_fit_cr[0]*y_eval*y_m_per_pix+right_fit_cr[1])**2)**1.5)/np.absolute(2*right_fit_cr[0])
# Calculate lane deviation from center of lane
scene_height = image_size[0] * y_m_per_pix
scene_width = image_size[1] * x_m_per_pix
# Calculate the intercept points at the bottom of our image
left_intercept = left_fit_cr[0] * scene_height ** 2 + left_fit_cr[1] * scene_height + left_fit_cr[2]
right_intercept = right_fit_cr[0] * scene_height ** 2 + right_fit_cr[1] * scene_height + right_fit_cr[2]
center = (left_intercept + right_intercept) / 2.0
# Use intercept points to calculate the lane deviation of the vehicle
lane_deviation = (center - scene_width / 2.0)
if testing == True:
output_image[nonzero_y_pixels[left_lane_inds], nonzero_x_pixels[left_lane_inds]] = [255, 0, 0]
output_image[nonzero_y_pixels[right_lane_inds], nonzero_x_pixels[right_lane_inds]] = [0, 0, 255]
return left_fit_x, right_fit_x, plot_y, left_fit, right_fit, left_curve, right_curve, lane_deviation, output_image
else:
return left_fit_x, right_fit_x, plot_y, left_curve, right_curve, lane_deviation
def draw_lane_lines(warped_binary_image, undistorted_image, Minv):
# Create a blank image to draw the lines on
warp_zero = np.zeros_like(warped_binary_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
left_fit_x, right_fit_x, ploty, left_radius, right_radius, lane_deviation=find_lane_lines(warped_binary_image)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fit_x, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fit_x, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image with green color
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
unwarp = cv2.warpPerspective(color_warp, Minv, (undistorted_image.shape[1], undistorted_image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undistorted_image, 1, unwarp, 0.3, 0)
# Write text on image
curvature_text = "Curvature: Left = " + str(np.round(left_radius, 2)) + ", Right = " + str(np.round(right_radius, 2))
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(result, curvature_text, (30, 60), font, 1, (0,255,0), 2)
deviation_text = "Lane deviation from center = {:.2f} m".format(lane_deviation)
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(result, deviation_text, (30, 90), font, 1, (0,255,0), 2)
return result
#the pipeline function
def process_image(image):
undistorted = undistort_image(image, objpoints, imgpoints)
combined_binary = get_shresholded_img(undistorted,grad_thresh,s_thresh)
binary_warped, Minv = warp_image_to_birdseye_view(combined_binary,corners)
lane_lines_img = draw_lane_lines(binary_warped, undistorted, Minv)
return lane_lines_img
| def undistort_image(image, objectpoints, imagepoints):
img_size = (image.shape[1], image.shape[0])
(ret, mtx, dist, rvecs, tvecs) = cv2.calibrateCamera(objectpoints, imagepoints, img_size, None, None)
dst = cv2.undistort(image, mtx, dist, None, mtx)
return dst
def get_shresholded_img(image, grad_thresh, s_thresh):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= grad_thresh[0]) & (scaled_sobel <= grad_thresh[1])] = 1
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:, :, 2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
def warp_image_to_birdseye_view(image, corners):
img_size = (image.shape[1], image.shape[0])
offset = 150
src = np.float32([corners[0], corners[1], corners[2], corners[3]])
dst = np.float32([[offset, 0], [offset, img_size[1]], [img_size[0] - offset, img_size[1]], [img_size[0] - offset, 0]])
perspective_transform = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(image, perspectiveTransform, img_size, flags=cv2.INTER_LINEAR)
minv = cv2.getPerspectiveTransform(dst, src)
return (warped, Minv)
def find_lane_lines(warped_binary_image, testing=False):
if testing == True:
output_image = np.dstack((warped_binary_image, warped_binary_image, warped_binary_image)) * 255
histogram = np.sum(warped_binary_image[int(warped_binary_image.shape[0] / 2):, :], axis=0)
midpoint = np.int(histogram.shape[0] / 2)
left_x_base = np.argmax(histogram[:midpoint])
right_x_base = np.argmax(histogram[midpoint:]) + midpoint
number_of_windows = 9
window_height = np.int(warped_binary_image.shape[0] / number_of_windows)
nonzero_pixels = warped_binary_image.nonzero()
nonzero_y_pixels = np.array(nonzero_pixels[0])
nonzero_x_pixels = np.array(nonzero_pixels[1])
left_x_current = left_x_base
right_x_current = right_x_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
for window in range(number_of_windows):
win_y_low = warped_binary_image.shape[0] - (window + 1) * window_height
win_y_high = warped_binary_image.shape[0] - window * window_height
win_x_left_low = left_x_current - margin
win_x_left_high = left_x_current + margin
win_x_right_low = right_x_current - margin
win_x_right_high = right_x_current + margin
if testing == True:
cv2.rectangle(output_image, (win_x_left_low, win_y_low), (win_x_left_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(output_image, (win_x_right_low, win_y_low), (win_x_right_high, win_y_high), (0, 255, 0), 2)
left_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_left_low) & (nonzero_x_pixels < win_x_left_high)).nonzero()[0]
right_inds = ((nonzero_y_pixels >= win_y_low) & (nonzero_y_pixels < win_y_high) & (nonzero_x_pixels >= win_x_right_low) & (nonzero_x_pixels < win_x_right_high)).nonzero()[0]
left_lane_inds.append(left_inds)
right_lane_inds.append(right_inds)
if len(left_inds) > minpix:
left_x_current = np.int(np.mean(nonzero_x_pixels[left_inds]))
if len(right_inds) > minpix:
right_x_current = np.int(np.mean(nonzero_x_pixels[right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
left_x = nonzero_x_pixels[left_lane_inds]
left_y = nonzero_y_pixels[left_lane_inds]
right_x = nonzero_x_pixels[right_lane_inds]
right_y = nonzero_y_pixels[right_lane_inds]
left_fit = np.polyfit(left_y, left_x, 2)
right_fit = np.polyfit(right_y, right_x, 2)
plot_y = np.linspace(0, warped_binary_image.shape[0] - 1, warped_binary_image.shape[0])
left_fit_x = left_fit[0] * plot_y ** 2 + left_fit[1] * plot_y + left_fit[2]
right_fit_x = right_fit[0] * plot_y ** 2 + right_fit[1] * plot_y + right_fit[2]
image_size = warped_binary_image.shape
y_eval = np.max(plot_y)
y_m_per_pix = 30 / 720
x_m_per_pix = 3.7 / 700
left_fit_cr = np.polyfit(left_y * y_m_per_pix, left_x * x_m_per_pix, 2)
right_fit_cr = np.polyfit(right_y * y_m_per_pix, right_x * x_m_per_pix, 2)
left_curve = (1 + (2 * left_fit_cr[0] * y_eval * y_m_per_pix + left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])
right_curve = (1 + (2 * right_fit_cr[0] * y_eval * y_m_per_pix + right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])
scene_height = image_size[0] * y_m_per_pix
scene_width = image_size[1] * x_m_per_pix
left_intercept = left_fit_cr[0] * scene_height ** 2 + left_fit_cr[1] * scene_height + left_fit_cr[2]
right_intercept = right_fit_cr[0] * scene_height ** 2 + right_fit_cr[1] * scene_height + right_fit_cr[2]
center = (left_intercept + right_intercept) / 2.0
lane_deviation = center - scene_width / 2.0
if testing == True:
output_image[nonzero_y_pixels[left_lane_inds], nonzero_x_pixels[left_lane_inds]] = [255, 0, 0]
output_image[nonzero_y_pixels[right_lane_inds], nonzero_x_pixels[right_lane_inds]] = [0, 0, 255]
return (left_fit_x, right_fit_x, plot_y, left_fit, right_fit, left_curve, right_curve, lane_deviation, output_image)
else:
return (left_fit_x, right_fit_x, plot_y, left_curve, right_curve, lane_deviation)
def draw_lane_lines(warped_binary_image, undistorted_image, Minv):
warp_zero = np.zeros_like(warped_binary_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
(left_fit_x, right_fit_x, ploty, left_radius, right_radius, lane_deviation) = find_lane_lines(warped_binary_image)
pts_left = np.array([np.transpose(np.vstack([left_fit_x, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fit_x, ploty])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
unwarp = cv2.warpPerspective(color_warp, Minv, (undistorted_image.shape[1], undistorted_image.shape[0]))
result = cv2.addWeighted(undistorted_image, 1, unwarp, 0.3, 0)
curvature_text = 'Curvature: Left = ' + str(np.round(left_radius, 2)) + ', Right = ' + str(np.round(right_radius, 2))
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(result, curvature_text, (30, 60), font, 1, (0, 255, 0), 2)
deviation_text = 'Lane deviation from center = {:.2f} m'.format(lane_deviation)
font = cv2.FONT_HERSHEY_TRIPLEX
cv2.putText(result, deviation_text, (30, 90), font, 1, (0, 255, 0), 2)
return result
def process_image(image):
undistorted = undistort_image(image, objpoints, imgpoints)
combined_binary = get_shresholded_img(undistorted, grad_thresh, s_thresh)
(binary_warped, minv) = warp_image_to_birdseye_view(combined_binary, corners)
lane_lines_img = draw_lane_lines(binary_warped, undistorted, Minv)
return lane_lines_img |
largest=None
smallest=None
while True:
number=input("Enter a number:")
if number == "done":
break
try:
number=int(number)
if largest == None:
largest = number
elif largest < number:
largest = number
if smallest==None:
smallest=number
elif smallest>number:
smallest=number
except ValueError:
print("Invalid input")
print ("Maximum is", largest)
print ("Minimum is", smallest)
| largest = None
smallest = None
while True:
number = input('Enter a number:')
if number == 'done':
break
try:
number = int(number)
if largest == None:
largest = number
elif largest < number:
largest = number
if smallest == None:
smallest = number
elif smallest > number:
smallest = number
except ValueError:
print('Invalid input')
print('Maximum is', largest)
print('Minimum is', smallest) |
class UsdValue(float):
def __init__(self, v) -> None:
super().__init__()
class UsdPrice(float):
def __init__(self, v) -> None:
super().__init__() | class Usdvalue(float):
def __init__(self, v) -> None:
super().__init__()
class Usdprice(float):
def __init__(self, v) -> None:
super().__init__() |
def filter(fname,data):
list=[]
for i in range(len(data)):
f=fname(data[i])
if f==True:
list.append(data[i])
return list
def map(fname,newdata):
list=[]
for i in range(len(newdata)):
f=fname(newdata[i])
list.append(f)
return list
def reduce(fname,incrementdata):
list=[]
for i in range(len(incrementdata)):
if (len(incrementdata))>=2:
f=fname(incrementdata[0],incrementdata[1])
del incrementdata[0]
del incrementdata[0]
incrementdata.append(f)
return incrementdata[0]
| def filter(fname, data):
list = []
for i in range(len(data)):
f = fname(data[i])
if f == True:
list.append(data[i])
return list
def map(fname, newdata):
list = []
for i in range(len(newdata)):
f = fname(newdata[i])
list.append(f)
return list
def reduce(fname, incrementdata):
list = []
for i in range(len(incrementdata)):
if len(incrementdata) >= 2:
f = fname(incrementdata[0], incrementdata[1])
del incrementdata[0]
del incrementdata[0]
incrementdata.append(f)
return incrementdata[0] |
#
numbers = [str(x) for x in range(32)]
letters = [chr(x) for x in range(97, 123)]
crate = '''
sandbox crate
map {boot: @init}
/*initialize utility vars and register vars*/
service init {
writer = 0
alpha = 0
beta = 0
status = 0'''
for letter in letters:
crate += '\n ' + letter + ' = 0'
crate += '''
}
/*map operator service to exec jump table*/
map {
copy: @copy
add: @add
sub: @sub
not: @not
or: @or
and: @and
eq: @eq
ne: @ne
gt: @gt
lt: @lt
gte: @gte
lte: @lte
unary: @status_alpha
}
service copy { @status_zero alpha = beta @writer}
service add { @status_zero alpha = alpha + beta @writer}
service sub { @status_zero alpha = alpha - beta @writer}
service not { @status_zero alpha = !beta @writer}
service or { @status_zero alpha = alpha | beta @writer}
service and { @status_zero alpha = alpha & beta @writer}
service eq { @status_zero if (alpha == beta) {[true]} else {[false]}}
service ne { @status_zero if (alpha != beta) {[true]} else {[false]}}
service gt { @status_zero if (alpha > beta) {[true]} else {[false]}}
service lt { @status_zero if (alpha < beta) {[true]} else {[false]}}
service gte { @status_zero if (alpha >= beta) {[true]} else {[false]}}
service lte { @status_zero if (alpha <= beta) {[true]} else {[false]}}
service status_zero {
status = 0
}
service status_alpha {
status = 1
}
service status_beta {
status = 2
}
service writer {
jump (writer) {'''
for letter in letters:
crate += '{ ' + letter + ' = alpha } '
crate += '''}
}
map {jump: @jump}
service jump {
jump (z) {'''
for number in numbers:
crate += '{ [ jump' + number + '] } '
crate += '''}
}
map {printme : @printme}
service printme { ['''
for number in numbers:
crate += '''alias jump''' + number + ''' echo ''' + number + ''';'''
crate += '''jump]
}'''
for letter in letters:
crate += '''
map {''' + letter + ' : @' + letter + '''}
service ''' + letter + ''' { jump (status) {
{ alpha = ''' + letter + ''' @status_alpha}
{ beta = ''' + letter + ''' @status_beta}
{ writer = ''' + str(ord(letter) - 97) + ''' }
}
}'''
for number in numbers:
crate += '''
map {delete''' + number + ' : @delete' + number + '''}
service delete''' + number + ''' { jump (status) {
{ alpha = ''' + number + ''' @status_alpha}
{ beta = ''' + number + ''' @status_beta}
{ }
}
}'''
print(crate)
| numbers = [str(x) for x in range(32)]
letters = [chr(x) for x in range(97, 123)]
crate = '\nsandbox crate\n\nmap {boot: @init}\n/*initialize utility vars and register vars*/\nservice init {\n writer = 0\n alpha = 0\n beta = 0\n status = 0'
for letter in letters:
crate += '\n ' + letter + ' = 0'
crate += '\n}\n\n/*map operator service to exec jump table*/\n\nmap {\n copy: @copy\n add: @add\n sub: @sub\n not: @not\n or: @or\n and: @and\n eq: @eq\n ne: @ne\n gt: @gt\n lt: @lt\n gte: @gte\n lte: @lte\n unary: @status_alpha\n}\n\nservice copy { @status_zero alpha = beta @writer}\n\nservice add { @status_zero alpha = alpha + beta @writer}\n\nservice sub { @status_zero alpha = alpha - beta @writer}\n\nservice not { @status_zero alpha = !beta @writer}\n\nservice or { @status_zero alpha = alpha | beta @writer}\n\nservice and { @status_zero alpha = alpha & beta @writer}\n\nservice eq { @status_zero if (alpha == beta) {[true]} else {[false]}}\n\nservice ne { @status_zero if (alpha != beta) {[true]} else {[false]}}\n\nservice gt { @status_zero if (alpha > beta) {[true]} else {[false]}}\n\nservice lt { @status_zero if (alpha < beta) {[true]} else {[false]}}\n\nservice gte { @status_zero if (alpha >= beta) {[true]} else {[false]}}\n\nservice lte { @status_zero if (alpha <= beta) {[true]} else {[false]}}\n\nservice status_zero {\n status = 0\n}\n\nservice status_alpha {\n status = 1\n}\n\nservice status_beta {\n status = 2\n}\n\nservice writer {\n jump (writer) {'
for letter in letters:
crate += '{ ' + letter + ' = alpha } '
crate += '}\n}\n\nmap {jump: @jump}\nservice jump {\n jump (z) {'
for number in numbers:
crate += '{ [ jump' + number + '] } '
crate += '}\n}\n\n\nmap {printme : @printme}\nservice printme { ['
for number in numbers:
crate += 'alias jump' + number + ' echo ' + number + ';'
crate += 'jump]\n}'
for letter in letters:
crate += '\nmap {' + letter + ' : @' + letter + '}\nservice ' + letter + ' { jump (status) {\n { alpha = ' + letter + ' @status_alpha}\n { beta = ' + letter + ' @status_beta}\n { writer = ' + str(ord(letter) - 97) + ' }\n } \n}'
for number in numbers:
crate += '\nmap {delete' + number + ' : @delete' + number + '}\nservice delete' + number + ' { jump (status) {\n { alpha = ' + number + ' @status_alpha}\n { beta = ' + number + ' @status_beta}\n { }\n }\n}'
print(crate) |
# coding: utf-8
# http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser
DEFAULT_PARSER = 'lxml'
ALLOWED_CONTENT_TYPES = [
'text/html',
'image/',
]
FINDER_PIPELINE = (
'haul.finders.pipeline.html.img_src_finder',
'haul.finders.pipeline.html.a_href_finder',
'haul.finders.pipeline.css.background_image_finder',
)
EXTENDER_PIPELINE = (
'haul.extenders.pipeline.google.blogspot_s1600_extender',
'haul.extenders.pipeline.google.ggpht_s1600_extender',
'haul.extenders.pipeline.google.googleusercontent_s1600_extender',
'haul.extenders.pipeline.pinterest.original_image_extender',
'haul.extenders.pipeline.wordpress.original_image_extender',
'haul.extenders.pipeline.tumblr.media_1280_extender',
'haul.extenders.pipeline.tumblr.avatar_128_extender',
)
SHOULD_JOIN_URL = True
| default_parser = 'lxml'
allowed_content_types = ['text/html', 'image/']
finder_pipeline = ('haul.finders.pipeline.html.img_src_finder', 'haul.finders.pipeline.html.a_href_finder', 'haul.finders.pipeline.css.background_image_finder')
extender_pipeline = ('haul.extenders.pipeline.google.blogspot_s1600_extender', 'haul.extenders.pipeline.google.ggpht_s1600_extender', 'haul.extenders.pipeline.google.googleusercontent_s1600_extender', 'haul.extenders.pipeline.pinterest.original_image_extender', 'haul.extenders.pipeline.wordpress.original_image_extender', 'haul.extenders.pipeline.tumblr.media_1280_extender', 'haul.extenders.pipeline.tumblr.avatar_128_extender')
should_join_url = True |
# pythran export _brief_loop(float64[:,:], uint8[:,:],
# intp[:,:], int[:,:], int[:,:])
def _brief_loop(image, descriptors, keypoints, pos0, pos1):
for k in range(len(keypoints)):
kr, kc = keypoints[k]
for p in range(len(pos0)):
pr0, pc0 = pos0[p]
pr1, pc1 = pos1[p]
descriptors[k, p] = (image[kr + pr0, kc + pc0]
< image[kr + pr1, kc + pc1])
| def _brief_loop(image, descriptors, keypoints, pos0, pos1):
for k in range(len(keypoints)):
(kr, kc) = keypoints[k]
for p in range(len(pos0)):
(pr0, pc0) = pos0[p]
(pr1, pc1) = pos1[p]
descriptors[k, p] = image[kr + pr0, kc + pc0] < image[kr + pr1, kc + pc1] |
factors = {
1:{
1:"I",5:"I",9:"I",13:"I",17:"I",21:"I",25:"I",29:"I",33:"I",37:"I",41:"I",45:"I",49:"I",53:"I",57:"I"
, 2:"S", 6:"S", 10:"S", 14:"S", 18:"S", 22:"S", 26:"S",30:"S" ,34:"S",38:"S",42:"S",46:"S",50:"S",54:"S",58:"S"
, 3:"T", 7:"T" , 11:"T", 15:"T", 19:"T",23:"T" ,27:"T", 31:"T" ,35:"T" ,39:"T",43:"T",47:"T",51:"T" ,55:"T",59:"T"
, 4:"P", 8:"P", 12:"P", 16:"P", 20:"P", 24:"P", 28:"P", 32:"P", 36:"P", 40:"P", 44:"P", 48:"P", 52:"P", 56:"P", 60:"P"
}
,
2 :{
1:"E",5:"E",9:"E",13:"E",17:"E",21:"E",25:"E",29:"E",33:"E",37:"E",41:"E",45:"E",49:"E",53:"E",57:"E"
, 2:"N", 6:"N", 10:"N", 14:"N", 18:"N", 22:"N", 26:"N",30:"N" ,34:"N",38:"N",42:"N",46:"N",50:"N",54:"N",58:"N"
, 3:"F", 7:"F" , 11:"F", 15:"F", 19:"F",23:"F" ,27:"F", 31:"F" ,35:"F" ,39:"F",43:"F",47:"F",51:"F" ,55:"F",59:"F"
, 4:"J", 8:"J", 12:"J", 16:"J", 20:"J", 24:"J", 28:"J", 32:"J", 36:"J", 40:"J", 44:"J", 48:"J", 52:"J", 56:"J", 60:"J"
}
}
factors_names = ('E', 'I', 'S', 'N', 'F', 'T', 'P', 'J', 'report')
factors_group = (('E', 'I'), ('S', 'N'), ('F', 'T'), ('P', 'J')) | factors = {1: {1: 'I', 5: 'I', 9: 'I', 13: 'I', 17: 'I', 21: 'I', 25: 'I', 29: 'I', 33: 'I', 37: 'I', 41: 'I', 45: 'I', 49: 'I', 53: 'I', 57: 'I', 2: 'S', 6: 'S', 10: 'S', 14: 'S', 18: 'S', 22: 'S', 26: 'S', 30: 'S', 34: 'S', 38: 'S', 42: 'S', 46: 'S', 50: 'S', 54: 'S', 58: 'S', 3: 'T', 7: 'T', 11: 'T', 15: 'T', 19: 'T', 23: 'T', 27: 'T', 31: 'T', 35: 'T', 39: 'T', 43: 'T', 47: 'T', 51: 'T', 55: 'T', 59: 'T', 4: 'P', 8: 'P', 12: 'P', 16: 'P', 20: 'P', 24: 'P', 28: 'P', 32: 'P', 36: 'P', 40: 'P', 44: 'P', 48: 'P', 52: 'P', 56: 'P', 60: 'P'}, 2: {1: 'E', 5: 'E', 9: 'E', 13: 'E', 17: 'E', 21: 'E', 25: 'E', 29: 'E', 33: 'E', 37: 'E', 41: 'E', 45: 'E', 49: 'E', 53: 'E', 57: 'E', 2: 'N', 6: 'N', 10: 'N', 14: 'N', 18: 'N', 22: 'N', 26: 'N', 30: 'N', 34: 'N', 38: 'N', 42: 'N', 46: 'N', 50: 'N', 54: 'N', 58: 'N', 3: 'F', 7: 'F', 11: 'F', 15: 'F', 19: 'F', 23: 'F', 27: 'F', 31: 'F', 35: 'F', 39: 'F', 43: 'F', 47: 'F', 51: 'F', 55: 'F', 59: 'F', 4: 'J', 8: 'J', 12: 'J', 16: 'J', 20: 'J', 24: 'J', 28: 'J', 32: 'J', 36: 'J', 40: 'J', 44: 'J', 48: 'J', 52: 'J', 56: 'J', 60: 'J'}}
factors_names = ('E', 'I', 'S', 'N', 'F', 'T', 'P', 'J', 'report')
factors_group = (('E', 'I'), ('S', 'N'), ('F', 'T'), ('P', 'J')) |
wkidInfo = {
'4326':{'type':'gcs', 'path':'World/WGS 1984.prj'},
'102100':{'type':'pcs', 'path':r'World/WGS 1984 Web Mercator (auxiliary sphere).prj'},
'3857' : {'type':'pcs', 'path':r'World/WGS 1984 Web Mercator (auxiliary sphere).prj'}
} | wkid_info = {'4326': {'type': 'gcs', 'path': 'World/WGS 1984.prj'}, '102100': {'type': 'pcs', 'path': 'World/WGS 1984 Web Mercator (auxiliary sphere).prj'}, '3857': {'type': 'pcs', 'path': 'World/WGS 1984 Web Mercator (auxiliary sphere).prj'}} |
#import ctypes
#import GdaImport
#import matplotlib.pyplot as plt
# getting example
# gjden
def GDA_MAIN(gda_obj):
per='the apk permission:\n'
# per+=gda_obj.GetAppString()
# per+=gda_obj.GetCert()
# per+=gda_obj.GetUrlString()
#
per+=gda_obj.GetPermission()
gda_obj.log(per)
tofile = open('out.txt','w')
tofile.write(per)
tofile.close()
return 0
| def gda_main(gda_obj):
per = 'the apk permission:\n'
per += gda_obj.GetPermission()
gda_obj.log(per)
tofile = open('out.txt', 'w')
tofile.write(per)
tofile.close()
return 0 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'control_bar',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'profile_browser_proxy',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'create_profile',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior',
'profile_browser_proxy',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'error_dialog',
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'import_supervised_user',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'profile_browser_proxy',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'profile_browser_proxy',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'supervised_user_create_confirm',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
'profile_browser_proxy',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'supervised_user_learn_more',
'dependencies': [
'profile_browser_proxy',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'user_manager_pages',
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
{
'target_name': 'user_manager_tutorial',
'dependencies': [
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior',
'<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util',
],
'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'],
},
],
}
| {'targets': [{'target_name': 'control_bar', 'dependencies': ['<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior', 'profile_browser_proxy'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'create_profile', 'dependencies': ['<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:web_ui_listener_behavior', 'profile_browser_proxy'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'error_dialog', 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'import_supervised_user', 'dependencies': ['<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior', 'profile_browser_proxy'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'profile_browser_proxy', 'dependencies': ['<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'supervised_user_create_confirm', 'dependencies': ['<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util', 'profile_browser_proxy'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'supervised_user_learn_more', 'dependencies': ['profile_browser_proxy'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'user_manager_pages', 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}, {'target_name': 'user_manager_tutorial', 'dependencies': ['<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:i18n_behavior', '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:util'], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']}]} |
# Part 1 of the Python Review lab.
def hello_world():
print("hello world")
pass
def greet_by_name(name):
print("please enter your name")
name = input
print
pass
def encode(x):
pass
def decode(coded_message):
pass | def hello_world():
print('hello world')
pass
def greet_by_name(name):
print('please enter your name')
name = input
print
pass
def encode(x):
pass
def decode(coded_message):
pass |
pizzas = ["triple carne", "extra queso", "suprema"]
friend_pizzas = ["triple carne", "extra queso", "suprema"]
pizzas.append("baggel")
friend_pizzas.append("hawaiana")
print("Mis pizzas favoritas son:")
for i in range(0,len(pizzas)):
print(pizzas[i])
print()
print("Las pizzas favoritas de mi amigo son:")
for i in range(0,len(friend_pizzas)):
print(friend_pizzas[i])
| pizzas = ['triple carne', 'extra queso', 'suprema']
friend_pizzas = ['triple carne', 'extra queso', 'suprema']
pizzas.append('baggel')
friend_pizzas.append('hawaiana')
print('Mis pizzas favoritas son:')
for i in range(0, len(pizzas)):
print(pizzas[i])
print()
print('Las pizzas favoritas de mi amigo son:')
for i in range(0, len(friend_pizzas)):
print(friend_pizzas[i]) |
# -*- coding: utf-8 -*-
GITHUB_STRING = 'https://github.com/earaujoassis/watchman/archive/v{0}.zip'
NAME = "agents"
VERSION = "0.2.4"
| github_string = 'https://github.com/earaujoassis/watchman/archive/v{0}.zip'
name = 'agents'
version = '0.2.4' |
def first(arr, low , high):
if high >= low:
mid = low + (high - low)//2
if (mid ==0 or arr[mid-1] == 0) and arr[mid] == 1:
return mid
elif arr[mid] == 0:
return first(arr, mid+1, high)
else:
return first(arr, low, mid-1)
return -1
def row_with_max_ones(mat):
r = len(mat)
c = len(mat[0])
max_row_index = 0
max_ = -1
for i in range(r):
index = first(mat[i], 0, c-1)
if index != -1 and c - index > max_:
max_ = c - index
max_row_index = i
return max_row_index
| def first(arr, low, high):
if high >= low:
mid = low + (high - low) // 2
if (mid == 0 or arr[mid - 1] == 0) and arr[mid] == 1:
return mid
elif arr[mid] == 0:
return first(arr, mid + 1, high)
else:
return first(arr, low, mid - 1)
return -1
def row_with_max_ones(mat):
r = len(mat)
c = len(mat[0])
max_row_index = 0
max_ = -1
for i in range(r):
index = first(mat[i], 0, c - 1)
if index != -1 and c - index > max_:
max_ = c - index
max_row_index = i
return max_row_index |
def get_path_components(path):
path = path.strip("/").split("/")
path = [c for c in path if c]
normalized = []
for comp in path:
if comp == ".":
continue
elif comp == "..":
if normalized:
normalized.pop()
else:
raise ValueError("URL tried to traverse above root")
else:
normalized.append(comp)
return normalized
| def get_path_components(path):
path = path.strip('/').split('/')
path = [c for c in path if c]
normalized = []
for comp in path:
if comp == '.':
continue
elif comp == '..':
if normalized:
normalized.pop()
else:
raise value_error('URL tried to traverse above root')
else:
normalized.append(comp)
return normalized |
#Belajar String Method
#https://docs.python.org/3/library/stdtypes.html#string-methods
nama = "muhammad aris septanugroho"
print(nama)
print(nama.upper()) #Huruf besar semua
print(nama.capitalize()) #Huruf besar kata pertama
print(nama.title()) #Huruf besar tiap kata
print(nama.split(" ")) #Memisah data menjadi list dengan ketentuan "spasi"
| nama = 'muhammad aris septanugroho'
print(nama)
print(nama.upper())
print(nama.capitalize())
print(nama.title())
print(nama.split(' ')) |
def product_left_recursive(alist, result=None):
if alist == []:
return result
g = result[-1] * alist[0]
result.append(g)
return product_left_recursive(alist[1:], result)
def product_left(alist):
new_list = [1]
for index in range(1, len(alist)):
value = new_list[-1] * alist[index-1]
new_list.append(value)
return new_list
def product_right(alist):
new_list = [1]
for index in range(len(alist)-2, -1, -1):
value = new_list[-1] * alist[index-1]
new_list.append(value)
return new_list
def product_of_array_of_array(alist):
left_list = product_left(alist)
right_list = product_right(alist)
new_list = []
for index, item in enumerate(alist):
value = left_list[index] * right_list[index]
new_list.append(value)
return new_list
def product_recursive(alist):
if alist == []:
return 1
return alist[0] * product_recursive(alist[1:])
def paa(alist):
new_list = []
for index, item in enumerate(alist):
current_list = alist[:index] + alist[index+1:]
value = product_recursive(current_list)
new_list.append(value)
return new_list
alist = [1, 2, 3, 4, 5, 6]
rlist = alist[::-1]
print(alist)
print(product_left(alist))
print(product_left_recursive(alist[1:], [1]))
print(product_left_recursive(rlist[1:], [1]))
print(product_right(alist))
#print(product_right_recursive(alist, [1]))
#print(product_of_array_of_array(alist))
#print(paa(alist))
| def product_left_recursive(alist, result=None):
if alist == []:
return result
g = result[-1] * alist[0]
result.append(g)
return product_left_recursive(alist[1:], result)
def product_left(alist):
new_list = [1]
for index in range(1, len(alist)):
value = new_list[-1] * alist[index - 1]
new_list.append(value)
return new_list
def product_right(alist):
new_list = [1]
for index in range(len(alist) - 2, -1, -1):
value = new_list[-1] * alist[index - 1]
new_list.append(value)
return new_list
def product_of_array_of_array(alist):
left_list = product_left(alist)
right_list = product_right(alist)
new_list = []
for (index, item) in enumerate(alist):
value = left_list[index] * right_list[index]
new_list.append(value)
return new_list
def product_recursive(alist):
if alist == []:
return 1
return alist[0] * product_recursive(alist[1:])
def paa(alist):
new_list = []
for (index, item) in enumerate(alist):
current_list = alist[:index] + alist[index + 1:]
value = product_recursive(current_list)
new_list.append(value)
return new_list
alist = [1, 2, 3, 4, 5, 6]
rlist = alist[::-1]
print(alist)
print(product_left(alist))
print(product_left_recursive(alist[1:], [1]))
print(product_left_recursive(rlist[1:], [1]))
print(product_right(alist)) |
fileName = ["nohup_2", "nohup_1", "nohup_4", "nohup"]
Fo = open("new nohup", "w")
for fil in fileName:
lineNum = 0
with open(fil) as F:
for line in F:
if lineNum % 10 == 0:
Fo.write(",\t".join(line.split()))
Fo.write("\n")
lineNum += 1
Fo.write("e\n") | file_name = ['nohup_2', 'nohup_1', 'nohup_4', 'nohup']
fo = open('new nohup', 'w')
for fil in fileName:
line_num = 0
with open(fil) as f:
for line in F:
if lineNum % 10 == 0:
Fo.write(',\t'.join(line.split()))
Fo.write('\n')
line_num += 1
Fo.write('e\n') |
# Python - 3.6.0
test.assert_equals(last([1, 2, 3, 4, 5]), 5)
test.assert_equals(last('abcde'), 'e')
test.assert_equals(last(1, 'b', 3, 'd', 5), 5)
| test.assert_equals(last([1, 2, 3, 4, 5]), 5)
test.assert_equals(last('abcde'), 'e')
test.assert_equals(last(1, 'b', 3, 'd', 5), 5) |
class Student:
def __init__(self,m1,m2):
self.m1 = m1
self.m2 = m2
def sum(self, a = None, b = None, c = None):
addition = 0
if a!=None and b!=None and c!=None:
addition = a + b + c
elif a!=None and b!= None:
addition = a + b
else:
addition = a
return addition
s1 = Student(10,20)
print(s1.sum(2,4)) | class Student:
def __init__(self, m1, m2):
self.m1 = m1
self.m2 = m2
def sum(self, a=None, b=None, c=None):
addition = 0
if a != None and b != None and (c != None):
addition = a + b + c
elif a != None and b != None:
addition = a + b
else:
addition = a
return addition
s1 = student(10, 20)
print(s1.sum(2, 4)) |
a = int(input("Enter number of elements in set A "))
A = set(map(int,input("# Spaced Separated list of elements of A ").split())) # Spaced Separated list of elements of A
n = int(input("Number of sets ")) # Number of sets
for i in range(n):
p = input("Enter the operation and number of elements in set"+i).split()
s2 = set(map(int,input("Enter space separated list of elements for operation #"+p[1]+" ").split()))
if p[0] == "intersection_update":
A.intersection_update(s2)
elif p[0]=="update":
A.update(s2)
elif p[0]=="symmetric_difference_update":
A.symmetric_difference_update(s2)
elif p[0]=="difference_update":
A.difference_update(s2)
print(sum(A)) | a = int(input('Enter number of elements in set A '))
a = set(map(int, input('# Spaced Separated list of elements of A ').split()))
n = int(input('Number of sets '))
for i in range(n):
p = input('Enter the operation and number of elements in set' + i).split()
s2 = set(map(int, input('Enter space separated list of elements for operation #' + p[1] + ' ').split()))
if p[0] == 'intersection_update':
A.intersection_update(s2)
elif p[0] == 'update':
A.update(s2)
elif p[0] == 'symmetric_difference_update':
A.symmetric_difference_update(s2)
elif p[0] == 'difference_update':
A.difference_update(s2)
print(sum(A)) |
class Solution:
def sqrt(self, x):
low = 0
high = 65536
best = 0
while high > low:
mid = (high + low) / 2
sqr = mid ** 2
if sqr > x:
high = mid
elif sqr == x:
return mid
else:
best = mid
low = mid + 1
return best
| class Solution:
def sqrt(self, x):
low = 0
high = 65536
best = 0
while high > low:
mid = (high + low) / 2
sqr = mid ** 2
if sqr > x:
high = mid
elif sqr == x:
return mid
else:
best = mid
low = mid + 1
return best |
def palindrome(word, ind):
if word == word[::-1]:
return f"{word} is a palindrome"
if word[ind] != word[len(word) - 1 - ind]:
return f"{word} is not a palindrome"
return palindrome(word, ind + 1)
print(palindrome("abcba", 0))
print(palindrome("peter", 0))
| def palindrome(word, ind):
if word == word[::-1]:
return f'{word} is a palindrome'
if word[ind] != word[len(word) - 1 - ind]:
return f'{word} is not a palindrome'
return palindrome(word, ind + 1)
print(palindrome('abcba', 0))
print(palindrome('peter', 0)) |
#NETWORK
LOCALHOST = "127.0.0.1"
PI_ADDRESS = "192.168.0.1"
PORT = 5000
#STATE
MOVEMENT_MARGIN = 2
KICK_TIMEOUT = 1
LAST_POSITION = -1
PLAYER_LENGTH = 2
NOISE_THRESHOLD = 3
MIN_VELOCITY_THRESHOLD = 300
OPEN_PREP_RANGE = -30
BLOCK_PREP_RANGE = 100
OPEN_KICK_RANGE = -20
BLOCK_KICK_RANGE = 60
KICK_ANGLE = 55
PREP_ANGLE = -30
BLOCK_ANGLE = 0
OPEN_ANGLE = -90
SPEED_THRESHOLD = 3000
MIN_PLAYER_OFFSET = 40
MAX_PLAYER_OFFSET = 640
IDLE_RANGE = 600
RECOVERY_LINEAR = 80
RECOVERY_ANGLE = -57
#PHYSICAL DIMENSIONS
GOAL_ROD = {"maxActuation":228, "playerSpacing":182, "rodX":1125, "numPlayers":3}
TWO_ROD = {"maxActuation":356, "playerSpacing":237, "rodX":975, "numPlayers":2}
FIVE_ROD = {"maxActuation":115, "playerSpacing":120, "rodX":675, "numPlayers":5}
THREE_ROD = {"maxActuation":181, "playerSpacing":207, "rodX":375, "numPlayers":3}
TABLE = {"robot_goalX":1200, "robot_goalY":350, "player_goalX":0, "player_goalY":350, "goalWidth":200, "width":685, "length":1200}
| localhost = '127.0.0.1'
pi_address = '192.168.0.1'
port = 5000
movement_margin = 2
kick_timeout = 1
last_position = -1
player_length = 2
noise_threshold = 3
min_velocity_threshold = 300
open_prep_range = -30
block_prep_range = 100
open_kick_range = -20
block_kick_range = 60
kick_angle = 55
prep_angle = -30
block_angle = 0
open_angle = -90
speed_threshold = 3000
min_player_offset = 40
max_player_offset = 640
idle_range = 600
recovery_linear = 80
recovery_angle = -57
goal_rod = {'maxActuation': 228, 'playerSpacing': 182, 'rodX': 1125, 'numPlayers': 3}
two_rod = {'maxActuation': 356, 'playerSpacing': 237, 'rodX': 975, 'numPlayers': 2}
five_rod = {'maxActuation': 115, 'playerSpacing': 120, 'rodX': 675, 'numPlayers': 5}
three_rod = {'maxActuation': 181, 'playerSpacing': 207, 'rodX': 375, 'numPlayers': 3}
table = {'robot_goalX': 1200, 'robot_goalY': 350, 'player_goalX': 0, 'player_goalY': 350, 'goalWidth': 200, 'width': 685, 'length': 1200} |
#! /usr/bin/env python3.6
#a = 'str'
a = '32'
print(f'float(a) = {float(a)}')
print(f'int(a) = {int(a)}')
if(isinstance(a, str)):
print("Yes, it is string.")
else:
print("No, it is not string.")
| a = '32'
print(f'float(a) = {float(a)}')
print(f'int(a) = {int(a)}')
if isinstance(a, str):
print('Yes, it is string.')
else:
print('No, it is not string.') |
class TreeNode:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
def is_valid_BST(node, min, max):
if node == None:
return True
if (min is not None and node.val <= min) or (max is not None and max <= node.val):
return False
return is_valid_BST(node.left, min, node.val) and is_valid_BST(node.right, node.val, max)
| class Treenode:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
def is_valid_bst(node, min, max):
if node == None:
return True
if min is not None and node.val <= min or (max is not None and max <= node.val):
return False
return is_valid_bst(node.left, min, node.val) and is_valid_bst(node.right, node.val, max) |
class lagrange(object):
def __init__(self, eval_x = 0):
self._eval_x = eval_x
self._extrapolations = []
def add_point(self, x, y):
new_extraps = [(y, x)]
for past_extrap, x_old in self._extrapolations:
new_val = ((self._eval_x - x) * past_extrap \
+ (x_old - self._eval_x) * new_extraps[-1][0])\
/ (x_old - x)
new_extraps.append((new_val, x_old))
self._extrapolations = new_extraps
return self.estimate
@property
def estimate(self):
return self._extrapolations[-1][0]
if __name__ == "__main__":
interpolator = lagrange(eval_x = 0)
print(interpolator.add_point(1,2))
print(interpolator.add_point(0.5,3))
print(interpolator.add_point(0.25,3.75))
print(interpolator.add_point(0.125,4.25))
print(interpolator.add_point(0.0625,4.5))
| class Lagrange(object):
def __init__(self, eval_x=0):
self._eval_x = eval_x
self._extrapolations = []
def add_point(self, x, y):
new_extraps = [(y, x)]
for (past_extrap, x_old) in self._extrapolations:
new_val = ((self._eval_x - x) * past_extrap + (x_old - self._eval_x) * new_extraps[-1][0]) / (x_old - x)
new_extraps.append((new_val, x_old))
self._extrapolations = new_extraps
return self.estimate
@property
def estimate(self):
return self._extrapolations[-1][0]
if __name__ == '__main__':
interpolator = lagrange(eval_x=0)
print(interpolator.add_point(1, 2))
print(interpolator.add_point(0.5, 3))
print(interpolator.add_point(0.25, 3.75))
print(interpolator.add_point(0.125, 4.25))
print(interpolator.add_point(0.0625, 4.5)) |
# -*- coding: utf-8 -*-
__version__ = '1.0.0'
default_app_config = 'webmap.apps.WebmapConfig'
| __version__ = '1.0.0'
default_app_config = 'webmap.apps.WebmapConfig' |
#Get a string which is n (non-negative integer) copies of a given string
#
#function to display the string
def dispfunc(iteration):
output=str("")
for i in range(iteration):
output=output+entry
print(output)
#
entry=str(input("\nenter a string : "))
displaynumber=int(input("how many times must it be displayed? : "))
dispfunc(displaynumber)
#experimental
feedback=str(input("\nwould you try it for the stringlength? : "))
if feedback == "yes" or "Yes" or "YES" or "yeah":
dispfunc(len(entry))
#program ends here | def dispfunc(iteration):
output = str('')
for i in range(iteration):
output = output + entry
print(output)
entry = str(input('\nenter a string : '))
displaynumber = int(input('how many times must it be displayed? : '))
dispfunc(displaynumber)
feedback = str(input('\nwould you try it for the stringlength? : '))
if feedback == 'yes' or 'Yes' or 'YES' or 'yeah':
dispfunc(len(entry)) |
spaces = int(input())
steps =0
while(spaces > 0):
if(spaces >= 5):
spaces -= 5
steps += 1
elif(spaces >= 4):
spaces -= 4
steps += 1
elif(spaces >= 3):
spaces -= 3
steps += 1
elif(spaces >= 2):
spaces -= 2
steps += 1
elif(spaces >= 1):
spaces -= 1
steps += 1
print(str(steps))
| spaces = int(input())
steps = 0
while spaces > 0:
if spaces >= 5:
spaces -= 5
steps += 1
elif spaces >= 4:
spaces -= 4
steps += 1
elif spaces >= 3:
spaces -= 3
steps += 1
elif spaces >= 2:
spaces -= 2
steps += 1
elif spaces >= 1:
spaces -= 1
steps += 1
print(str(steps)) |
# Straightforward implementation of the Singleton Pattern
class Logger(object):
_instance = None
def __new__(cls):
if cls._instance is None:
print('Creating the object')
cls._instance = super(Logger, cls).__new__(cls)
# Put any initialization here.
return cls._instance
log1 = Logger()
print(log1)
log2 = Logger()
print(log2)
print('Are they the same object?', log1 is log2)
| class Logger(object):
_instance = None
def __new__(cls):
if cls._instance is None:
print('Creating the object')
cls._instance = super(Logger, cls).__new__(cls)
return cls._instance
log1 = logger()
print(log1)
log2 = logger()
print(log2)
print('Are they the same object?', log1 is log2) |
load("@rules_pkg//:providers.bzl", "PackageFilesInfo", "PackageSymlinkInfo", "PackageFilegroupInfo")
def _runfile_path(ctx, file, runfiles_dir):
path = file.short_path
if path.startswith(".."):
return path.replace("..", runfiles_dir)
if not file.owner.workspace_name:
return "/".join([runfiles_dir, ctx.workspace_name, path])
return path
def _runfiles_impl(ctx):
default = ctx.attr.binary[DefaultInfo]
executable = default.files_to_run.executable
manifest = default.files_to_run.runfiles_manifest
runfiles_dir = manifest.short_path.replace(manifest.basename, "")[:-1]
files = depset(transitive = [default.files, default.default_runfiles.files])
fileMap = {
executable.short_path: executable
}
for file in files.to_list():
fileMap[_runfile_path(ctx, file, runfiles_dir)] = file
files = depset([executable], transitive = [files])
symlinks = []
for symlink in default.data_runfiles.root_symlinks.to_list():
info = PackageSymlinkInfo(
source = "/%s" % _runfile_path(ctx, symlink.target_file, runfiles_dir),
destination = "/%s" % "/".join([runfiles_dir, symlink.path]),
attributes = { "mode": "0777" }
)
symlinks.append([info, ctx.label])
return [
PackageFilegroupInfo(
pkg_dirs = [],
pkg_files = [
[PackageFilesInfo(
dest_src_map = fileMap,
attributes = {},
), ctx.label]
],
pkg_symlinks = symlinks,
),
DefaultInfo(files = files),
]
expand_runfiles = rule(
implementation = _runfiles_impl,
attrs = {
"binary": attr.label()
}
) | load('@rules_pkg//:providers.bzl', 'PackageFilesInfo', 'PackageSymlinkInfo', 'PackageFilegroupInfo')
def _runfile_path(ctx, file, runfiles_dir):
path = file.short_path
if path.startswith('..'):
return path.replace('..', runfiles_dir)
if not file.owner.workspace_name:
return '/'.join([runfiles_dir, ctx.workspace_name, path])
return path
def _runfiles_impl(ctx):
default = ctx.attr.binary[DefaultInfo]
executable = default.files_to_run.executable
manifest = default.files_to_run.runfiles_manifest
runfiles_dir = manifest.short_path.replace(manifest.basename, '')[:-1]
files = depset(transitive=[default.files, default.default_runfiles.files])
file_map = {executable.short_path: executable}
for file in files.to_list():
fileMap[_runfile_path(ctx, file, runfiles_dir)] = file
files = depset([executable], transitive=[files])
symlinks = []
for symlink in default.data_runfiles.root_symlinks.to_list():
info = package_symlink_info(source='/%s' % _runfile_path(ctx, symlink.target_file, runfiles_dir), destination='/%s' % '/'.join([runfiles_dir, symlink.path]), attributes={'mode': '0777'})
symlinks.append([info, ctx.label])
return [package_filegroup_info(pkg_dirs=[], pkg_files=[[package_files_info(dest_src_map=fileMap, attributes={}), ctx.label]], pkg_symlinks=symlinks), default_info(files=files)]
expand_runfiles = rule(implementation=_runfiles_impl, attrs={'binary': attr.label()}) |
# You can also nest for loops with
# while loops. Check it out!
for i in range(4):
print("For loop: " + str(i))
x = i
while x >= 0:
print(" While loop: " + str(x))
x = x - 1
| for i in range(4):
print('For loop: ' + str(i))
x = i
while x >= 0:
print(' While loop: ' + str(x))
x = x - 1 |
##list of integers
student_score= [99, 88, 60]
##printing out that list
print(student_score)
##printing all the integers in a range
print(list(range(1,10)))
##printing out all the integers in a range skipping one every time
print(list(range(1,10,2)))
## manipulating a string and printting all the modifications
x = "hello"
y = x.upper()
z = x.title()
print(x, y, z) | student_score = [99, 88, 60]
print(student_score)
print(list(range(1, 10)))
print(list(range(1, 10, 2)))
x = 'hello'
y = x.upper()
z = x.title()
print(x, y, z) |
def harmonic(a, b):
return (2*a*b)/(a + b);
a, b = map(int, input().split())
print(harmonic(a, b))
| def harmonic(a, b):
return 2 * a * b / (a + b)
(a, b) = map(int, input().split())
print(harmonic(a, b)) |
# from recipes.decor.tests import test_cases as tcx
# pylint: disable-all
def test_expose_decor():
@expose.show
def foo(a, b=1, *args, c=2, **kws):
pass
foo(88, 12, 11, c=4, y=1)
def test_expose_decor():
@expose.args
def foo(a, b=1, *args, c=2, **kws):
pass
foo(88, 12, 11, c=4, y=1)
# # print(i)
# # print(sig)
# # print(ba)
# ba.apply_defaults()
# # print(ba)
# print(f'{ba!s}'.replace('<BoundArguments ', fun.__qualname__).rstrip('>'))
# # print('*'*88)
# from IPython import embed
# embed(header="Embedded interpreter at 'test_expose.py':32")
| def test_expose_decor():
@expose.show
def foo(a, b=1, *args, c=2, **kws):
pass
foo(88, 12, 11, c=4, y=1)
def test_expose_decor():
@expose.args
def foo(a, b=1, *args, c=2, **kws):
pass
foo(88, 12, 11, c=4, y=1) |
def fill_bin_num(dataframe, feature, bin_feature, bin_size, stat_measure, min_bin=None, max_bin=None, default_val='No'):
if min_bin is None:
min_bin = dataframe[bin_feature].min()
if max_bin is None:
max_bin = dataframe[bin_feature].max()
new_dataframe = dataframe.copy()
df_meancat = pd.DataFrame(columns=['interval', 'stat_measure'])
for num_bin, subset in dataframe.groupby(pd.cut(dataframe[bin_feature], np.arange(min_bin, max_bin+bin_size, bin_size), include_lowest=True)):
if stat_measure is 'mean':
row = [num_bin, subset[feature].mean()]
elif stat_measure is 'mode':
mode_ar = subset[feature].mode().values
if len(mode_ar) > 0:
row = [num_bin, mode_ar[0]]
else:
row = [num_bin, default_val]
else:
raise Exception('Unknown statistical measure: ' + stat_measure)
df_meancat.loc[len(df_meancat)] = row
for index, row_df in dataframe[dataframe[feature].isna()].iterrows():
for _, row_meancat in df_meancat.iterrows():
if row_df[bin_feature] in row_meancat['interval']:
new_dataframe.at[index, feature] = row_meancat['stat_measure']
return new_dataframe
def make_dummy_cols(dataframe, column, prefix, drop_dummy):
dummy = pd.get_dummies(dataframe[column], prefix=prefix)
dummy = dummy.drop(columns=prefix+'_'+drop_dummy)
dataframe = pd.concat([dataframe, dummy], axis=1)
dataframe = dataframe.drop(columns=column)
return dataframe
def cleaning(dataframe_raw):
dataframe = dataframe_raw.copy()
dataframe = dataframe.set_index('ID')
dataframe.loc[(dataframe['Age']<=13) & (dataframe['Education'].isna()), 'Education'] = 'Lower School/Kindergarten'
dataframe.loc[(dataframe['Age']==14) & (dataframe['Education'].isna()), 'Education'] = '8th Grade'
dataframe.loc[(dataframe['Age']<=17) & (dataframe['Education'].isna()), 'Education'] = '9 - 11th Grade'
dataframe.loc[(dataframe['Age']<=21) & (dataframe['Education'].isna()), 'Education'] = 'High School'
dataframe['Education'] = dataframe['Education'].fillna('Some College')
dataframe.loc[(dataframe['Age']<=20) & (dataframe['MaritalStatus'].isna()), 'MaritalStatus'] = 'NeverMarried'
dataframe.at[dataframe['MaritalStatus'].isna(), 'MaritalStatus'] = fill_bin_num(dataframe, 'MaritalStatus', 'Age', 5, 'mode',20)
dataframe = dataframe.drop(columns=['HHIncome'])
dataframe.loc[dataframe['HHIncomeMid'].isna(), 'HHIncomeMid'] = dataframe['HHIncomeMid'].mean()
dataframe.loc[dataframe['Poverty'].isna(), 'Poverty'] = dataframe['Poverty'].mean()
dataframe.loc[dataframe['HomeRooms'].isna(), 'HomeRooms'] = dataframe['HomeRooms'].mean()
dataframe.loc[dataframe['HomeOwn'].isna(), 'HomeOwn'] = dataframe['HomeOwn'].mode().values[0]
dataframe.loc[(dataframe['Work'].isna()) & (dataframe['Education'].isna()) & (dataframe['Age']<=20), 'Work'] = 'NotWorking'
dataframe.loc[dataframe['Work'].isna(), 'Work'] = dataframe['Work'].mode().values[0]
dataframe = fill_bin_num(dataframe, 'Weight', 'Age', 2, 'mean')
dataframe = dataframe.drop(columns=['HeadCirc'])
for index, row in dataframe.iterrows():
if np.isnan(row['Height']) and not np.isnan(row['Length']):
dataframe.at[index, 'Height'] = row['Length']
dataframe = fill_bin_num(dataframe, 'Height', 'Age', 2, 'mean')
dataframe = dataframe.drop(columns=['Length'])
for index, row in dataframe[dataframe['BMI'].isna()].iterrows():
dataframe.at[index, 'BMI'] = row['Weight'] / ((row['Height']/100)**2)
dataframe = dataframe.drop(columns='BMICatUnder20yrs')
dataframe = dataframe.drop(columns='BMI_WHO')
dataframe = fill_bin_num(dataframe, 'Pulse', 'Age', 10, 'mean')
dataframe.loc[(dataframe['Age']<10) & (dataframe['BPSysAve'].isna()), 'BPSysAve'] = 105
dataframe = fill_bin_num(dataframe, 'BPSysAve', 'Age', 5, 'mean', 10)
dataframe.loc[(dataframe['Age']<10) & (dataframe['BPDiaAve'].isna()), 'BPDiaAve'] = 60
dataframe = fill_bin_num(dataframe, 'BPDiaAve', 'Age', 5, 'mean', 10)
dataframe = dataframe.drop(columns='BPSys1')
dataframe = dataframe.drop(columns='BPDia1')
dataframe = dataframe.drop(columns='BPSys2')
dataframe = dataframe.drop(columns='BPDia2')
dataframe = dataframe.drop(columns='BPSys3')
dataframe = dataframe.drop(columns='BPDia3')
dataframe = dataframe.drop(columns=['Testosterone'])
dataframe.loc[(dataframe['Age']<10) & (dataframe['DirectChol'].isna()), 'DirectChol'] = 0
dataframe = fill_bin_num(dataframe, 'DirectChol', 'Age', 5, 'mean', 10)
dataframe.loc[(dataframe['Age']<10) & (dataframe['TotChol'].isna()), 'TotChol'] = 0
dataframe = fill_bin_num(dataframe, 'TotChol', 'Age', 5, 'mean', 10)
dataframe = dataframe.drop(columns=['UrineVol1'])
dataframe = dataframe.drop(columns=['UrineFlow1'])
dataframe = dataframe.drop(columns=['UrineVol2'])
dataframe = dataframe.drop(columns=['UrineFlow2'])
dataframe['Diabetes'] = dataframe['Diabetes'].fillna('No')
dataframe['DiabetesAge'] = dataframe['DiabetesAge'].fillna(0)
dataframe.loc[(dataframe['Age']<=12) & (dataframe['HealthGen'].isna()), 'HealthGen'] = 'Good'
dataframe = fill_bin_num(dataframe, 'HealthGen', 'Age', 5, 'mode', 10)
dataframe.loc[(dataframe['Age']<=12) & (dataframe['DaysMentHlthBad'].isna()), 'DaysMentHlthBad'] = 0
dataframe = fill_bin_num(dataframe, 'DaysMentHlthBad', 'Age', 5, 'mean', 10)
dataframe.loc[(dataframe['Age']<=15) & (dataframe['LittleInterest'].isna()), 'LittleInterest'] = 'None'
dataframe = fill_bin_num(dataframe, 'LittleInterest', 'Age', 5, 'mode', 15)
dataframe.loc[(dataframe['Age']<=12) & (dataframe['DaysMentHlthBad'].isna()), 'DaysMentHlthBad'] = 0
dataframe = fill_bin_num(dataframe, 'DaysMentHlthBad', 'Age', 5, 'mean', 10)
for index, row in dataframe.iterrows():
if np.isnan(row['nBabies']) and not np.isnan(row['nPregnancies']):
dataframe.at[index, 'nBabies'] = row['nPregnancies']
dataframe['nBabies'] = dataframe['nBabies'].fillna(0)
dataframe['nPregnancies'] = dataframe['nPregnancies'].fillna(0)
dataframe['Age1stBaby'] = dataframe['Age1stBaby'].fillna(0)
dataframe.loc[(dataframe['Age']==0) & (dataframe['SleepHrsNight'].isna()), 'SleepHrsNight'] = 14
dataframe.loc[(dataframe['Age']<=2) & (dataframe['SleepHrsNight'].isna()), 'SleepHrsNight'] = 12
dataframe.loc[(dataframe['Age']<=5) & (dataframe['SleepHrsNight'].isna()), 'SleepHrsNight'] = 10
dataframe.loc[(dataframe['Age']<=10) & (dataframe['SleepHrsNight'].isna()), 'SleepHrsNight'] = 9
dataframe.loc[(dataframe['Age']<=15) & (dataframe['SleepHrsNight'].isna()), 'SleepHrsNight'] = 8
dataframe['SleepHrsNight'] = dataframe['SleepHrsNight'].fillna(dataframe_raw['SleepHrsNight'].mean())
dataframe['SleepTrouble'] = dataframe['SleepTrouble'].fillna('No')
dataframe.loc[(dataframe['Age']<=4) & (dataframe['PhysActive'].isna()), 'PhysActive'] = 'No'
dataframe = fill_bin_num(dataframe, 'PhysActive', 'Age', 2, 'mode', 16)
dataframe['PhysActive'] = dataframe['PhysActive'].fillna('Yes') # Big assumption here. All kids between 4 and 16 are physically active
dataframe = dataframe.drop(columns=['PhysActiveDays'])
dataframe = dataframe.drop(columns=['TVHrsDay'])
dataframe = dataframe.drop(columns=['TVHrsDayChild'])
dataframe = dataframe.drop(columns=['CompHrsDay'])
dataframe = dataframe.drop(columns=['CompHrsDayChild'])
dataframe.loc[(dataframe['Age']<18) & (dataframe['Alcohol12PlusYr'].isna()), 'Alcohol12PlusYr'] = 'No'
dataframe = fill_bin_num(dataframe, 'Alcohol12PlusYr', 'Age', 5, 'mode', 18)
dataframe.loc[(dataframe['Age']<18) & (dataframe['AlcoholDay'].isna()), 'AlcoholDay'] = 0
dataframe = fill_bin_num(dataframe, 'AlcoholDay', 'Age', 5, 'mean', 18)
dataframe.loc[(dataframe['Age']<18) & (dataframe['AlcoholYear'].isna()), 'AlcoholYear'] = 0
dataframe = fill_bin_num(dataframe, 'AlcoholYear', 'Age', 5, 'mean', 18)
dataframe.loc[(dataframe['Age']<20) & (dataframe['SmokeNow'].isna()), 'SmokeNow'] = 'No'
dataframe = fill_bin_num(dataframe, 'SmokeNow', 'Age', 5, 'mode', 20)
dataframe['Smoke100'] = dataframe['Smoke100'].fillna('No')
dataframe['Smoke100n'] = dataframe['Smoke100n'].fillna('No')
dataframe.loc[(dataframe['SmokeNow']=='No') & (dataframe['SmokeAge'].isna()), 'SmokeAge'] = 0
dataframe = fill_bin_num(dataframe, 'SmokeAge', 'Age', 5, 'mean', 20)
dataframe.loc[(dataframe['Age']<18) & (dataframe['Marijuana'].isna()), 'Marijuana'] = 'No'
dataframe.loc[(dataframe['Marijuana'].isna()) & (dataframe['SmokeNow']=='No'), 'Marijuana'] = 'No'
dataframe = fill_bin_num(dataframe, 'Marijuana', 'Age', 5, 'mode', 20)
dataframe.loc[(dataframe['Marijuana']=='No') & (dataframe['AgeFirstMarij'].isna()), 'AgeFirstMarij'] = 0
dataframe = fill_bin_num(dataframe, 'AgeFirstMarij', 'Age', 5, 'mean', 20)
dataframe.loc[(dataframe['Marijuana']=='No') & (dataframe['RegularMarij'].isna()), 'RegularMarij'] = 'No'
dataframe = fill_bin_num(dataframe, 'RegularMarij', 'Age', 5, 'mode', 20)
dataframe.loc[(dataframe['RegularMarij']=='No') & (dataframe['AgeRegMarij'].isna()), 'AgeRegMarij'] = 0
dataframe = fill_bin_num(dataframe, 'AgeRegMarij', 'Age', 5, 'mean', 20)
dataframe.loc[(dataframe['Age']<18) & (dataframe['HardDrugs'].isna()), 'HardDrugs'] = 'No'
dataframe = fill_bin_num(dataframe, 'HardDrugs', 'Age', 5, 'mode', 18)
mode_sex_age = dataframe['SexAge'].mode()[0]
dataframe.loc[(dataframe['Age']<=mode_sex_age) & (dataframe['SexEver'].isna()), 'SexEver'] = 'No'
dataframe['SexEver'] = dataframe['SexEver'].fillna('Yes')
dataframe.loc[(dataframe['SexEver']=='No') & (dataframe['SexAge'].isna()), 'SexAge'] = 0
dataframe.loc[(dataframe['SexAge'].isna() & (dataframe['Age']<mode_sex_age)), 'SexAge'] = dataframe.loc[(dataframe['SexAge'].isna() & (dataframe['Age']<mode_sex_age)), 'Age']
dataframe['SexAge'] = dataframe['SexAge'].fillna(mode_sex_age)
dataframe.loc[(dataframe['SexEver']=='No') & (dataframe['SexNumPartnLife'].isna()), 'SexNumPartnLife'] = 0
dataframe = fill_bin_num(dataframe, 'SexNumPartnLife', 'Age', 5, 'mean')
dataframe['SexNumPartnLife'] = dataframe_raw.loc[(dataframe_raw['Age'] >= 60) & (dataframe_raw['Age'] <= 70), 'SexNumPartnLife'].mode()[0] # Missing values for the elderly. Assumed that lifetime sex partners do not increase after 60.
dataframe.loc[(dataframe['SexEver']=='No') & (dataframe['SexNumPartYear'].isna()), 'SexNumPartYear'] = 0
dataframe = fill_bin_num(dataframe, 'SexNumPartYear', 'Age', 10, 'mean')
dataframe['SexNumPartYear'] = dataframe['SexNumPartYear'].fillna(0)
dataframe = dataframe.drop(columns=['SameSex'])
dataframe = dataframe.drop(columns=['SexOrientation'])
dataframe['PregnantNow'] = dataframe['PregnantNow'].fillna('No')
# Making dummy variables
dataframe['male'] = 1*(dataframe['Gender'] == 'male')
dataframe = dataframe.drop(columns=['Gender'])
dataframe['white'] = np.where(dataframe['Race1'] == 'white',1,0)
dataframe = dataframe.drop(columns=['Race1'])
dataframe = make_dummy_cols(dataframe, 'Education', 'education', '8th Grade')
dataframe = make_dummy_cols(dataframe, 'MaritalStatus', 'maritalstatus', 'Separated')
dataframe = make_dummy_cols(dataframe, 'HomeOwn', 'homeown', 'Other')
dataframe = make_dummy_cols(dataframe, 'Work', 'work', 'Looking')
dataframe['Diabetes'] = np.where(dataframe['Diabetes'] == 'Yes',1,0)
dataframe = make_dummy_cols(dataframe, 'HealthGen', 'healthgen', 'Poor')
dataframe = make_dummy_cols(dataframe, 'LittleInterest', 'littleinterest', 'None')
dataframe = make_dummy_cols(dataframe, 'Depressed', 'depressed', 'None')
dataframe['SleepTrouble'] = np.where(dataframe['SleepTrouble'] == 'Yes',1,0)
dataframe['PhysActive'] = np.where(dataframe['PhysActive'] == 'Yes',1,0)
dataframe['Alcohol12PlusYr'] = np.where(dataframe['Alcohol12PlusYr'] == 'Yes',1,0)
dataframe['SmokeNow'] = np.where(dataframe['SmokeNow'] == 'Yes',1,0)
dataframe['Smoke100'] = np.where(dataframe['Smoke100'] == 'Yes',1,0)
dataframe['Smoke100n'] = np.where(dataframe['Smoke100n'] == 'Yes',1,0)
dataframe['Marijuana'] = np.where(dataframe['Marijuana'] == 'Yes',1,0)
dataframe['RegularMarij'] = np.where(dataframe['RegularMarij'] == 'Yes',1,0)
dataframe['HardDrugs'] = np.where(dataframe['HardDrugs'] == 'Yes',1,0)
dataframe['SexEver'] = np.where(dataframe['SexEver'] == 'Yes',1,0)
dataframe['PregnantNow'] = np.where(dataframe['PregnantNow'] == 'Yes',1,0)
return dataframe | def fill_bin_num(dataframe, feature, bin_feature, bin_size, stat_measure, min_bin=None, max_bin=None, default_val='No'):
if min_bin is None:
min_bin = dataframe[bin_feature].min()
if max_bin is None:
max_bin = dataframe[bin_feature].max()
new_dataframe = dataframe.copy()
df_meancat = pd.DataFrame(columns=['interval', 'stat_measure'])
for (num_bin, subset) in dataframe.groupby(pd.cut(dataframe[bin_feature], np.arange(min_bin, max_bin + bin_size, bin_size), include_lowest=True)):
if stat_measure is 'mean':
row = [num_bin, subset[feature].mean()]
elif stat_measure is 'mode':
mode_ar = subset[feature].mode().values
if len(mode_ar) > 0:
row = [num_bin, mode_ar[0]]
else:
row = [num_bin, default_val]
else:
raise exception('Unknown statistical measure: ' + stat_measure)
df_meancat.loc[len(df_meancat)] = row
for (index, row_df) in dataframe[dataframe[feature].isna()].iterrows():
for (_, row_meancat) in df_meancat.iterrows():
if row_df[bin_feature] in row_meancat['interval']:
new_dataframe.at[index, feature] = row_meancat['stat_measure']
return new_dataframe
def make_dummy_cols(dataframe, column, prefix, drop_dummy):
dummy = pd.get_dummies(dataframe[column], prefix=prefix)
dummy = dummy.drop(columns=prefix + '_' + drop_dummy)
dataframe = pd.concat([dataframe, dummy], axis=1)
dataframe = dataframe.drop(columns=column)
return dataframe
def cleaning(dataframe_raw):
dataframe = dataframe_raw.copy()
dataframe = dataframe.set_index('ID')
dataframe.loc[(dataframe['Age'] <= 13) & dataframe['Education'].isna(), 'Education'] = 'Lower School/Kindergarten'
dataframe.loc[(dataframe['Age'] == 14) & dataframe['Education'].isna(), 'Education'] = '8th Grade'
dataframe.loc[(dataframe['Age'] <= 17) & dataframe['Education'].isna(), 'Education'] = '9 - 11th Grade'
dataframe.loc[(dataframe['Age'] <= 21) & dataframe['Education'].isna(), 'Education'] = 'High School'
dataframe['Education'] = dataframe['Education'].fillna('Some College')
dataframe.loc[(dataframe['Age'] <= 20) & dataframe['MaritalStatus'].isna(), 'MaritalStatus'] = 'NeverMarried'
dataframe.at[dataframe['MaritalStatus'].isna(), 'MaritalStatus'] = fill_bin_num(dataframe, 'MaritalStatus', 'Age', 5, 'mode', 20)
dataframe = dataframe.drop(columns=['HHIncome'])
dataframe.loc[dataframe['HHIncomeMid'].isna(), 'HHIncomeMid'] = dataframe['HHIncomeMid'].mean()
dataframe.loc[dataframe['Poverty'].isna(), 'Poverty'] = dataframe['Poverty'].mean()
dataframe.loc[dataframe['HomeRooms'].isna(), 'HomeRooms'] = dataframe['HomeRooms'].mean()
dataframe.loc[dataframe['HomeOwn'].isna(), 'HomeOwn'] = dataframe['HomeOwn'].mode().values[0]
dataframe.loc[dataframe['Work'].isna() & dataframe['Education'].isna() & (dataframe['Age'] <= 20), 'Work'] = 'NotWorking'
dataframe.loc[dataframe['Work'].isna(), 'Work'] = dataframe['Work'].mode().values[0]
dataframe = fill_bin_num(dataframe, 'Weight', 'Age', 2, 'mean')
dataframe = dataframe.drop(columns=['HeadCirc'])
for (index, row) in dataframe.iterrows():
if np.isnan(row['Height']) and (not np.isnan(row['Length'])):
dataframe.at[index, 'Height'] = row['Length']
dataframe = fill_bin_num(dataframe, 'Height', 'Age', 2, 'mean')
dataframe = dataframe.drop(columns=['Length'])
for (index, row) in dataframe[dataframe['BMI'].isna()].iterrows():
dataframe.at[index, 'BMI'] = row['Weight'] / (row['Height'] / 100) ** 2
dataframe = dataframe.drop(columns='BMICatUnder20yrs')
dataframe = dataframe.drop(columns='BMI_WHO')
dataframe = fill_bin_num(dataframe, 'Pulse', 'Age', 10, 'mean')
dataframe.loc[(dataframe['Age'] < 10) & dataframe['BPSysAve'].isna(), 'BPSysAve'] = 105
dataframe = fill_bin_num(dataframe, 'BPSysAve', 'Age', 5, 'mean', 10)
dataframe.loc[(dataframe['Age'] < 10) & dataframe['BPDiaAve'].isna(), 'BPDiaAve'] = 60
dataframe = fill_bin_num(dataframe, 'BPDiaAve', 'Age', 5, 'mean', 10)
dataframe = dataframe.drop(columns='BPSys1')
dataframe = dataframe.drop(columns='BPDia1')
dataframe = dataframe.drop(columns='BPSys2')
dataframe = dataframe.drop(columns='BPDia2')
dataframe = dataframe.drop(columns='BPSys3')
dataframe = dataframe.drop(columns='BPDia3')
dataframe = dataframe.drop(columns=['Testosterone'])
dataframe.loc[(dataframe['Age'] < 10) & dataframe['DirectChol'].isna(), 'DirectChol'] = 0
dataframe = fill_bin_num(dataframe, 'DirectChol', 'Age', 5, 'mean', 10)
dataframe.loc[(dataframe['Age'] < 10) & dataframe['TotChol'].isna(), 'TotChol'] = 0
dataframe = fill_bin_num(dataframe, 'TotChol', 'Age', 5, 'mean', 10)
dataframe = dataframe.drop(columns=['UrineVol1'])
dataframe = dataframe.drop(columns=['UrineFlow1'])
dataframe = dataframe.drop(columns=['UrineVol2'])
dataframe = dataframe.drop(columns=['UrineFlow2'])
dataframe['Diabetes'] = dataframe['Diabetes'].fillna('No')
dataframe['DiabetesAge'] = dataframe['DiabetesAge'].fillna(0)
dataframe.loc[(dataframe['Age'] <= 12) & dataframe['HealthGen'].isna(), 'HealthGen'] = 'Good'
dataframe = fill_bin_num(dataframe, 'HealthGen', 'Age', 5, 'mode', 10)
dataframe.loc[(dataframe['Age'] <= 12) & dataframe['DaysMentHlthBad'].isna(), 'DaysMentHlthBad'] = 0
dataframe = fill_bin_num(dataframe, 'DaysMentHlthBad', 'Age', 5, 'mean', 10)
dataframe.loc[(dataframe['Age'] <= 15) & dataframe['LittleInterest'].isna(), 'LittleInterest'] = 'None'
dataframe = fill_bin_num(dataframe, 'LittleInterest', 'Age', 5, 'mode', 15)
dataframe.loc[(dataframe['Age'] <= 12) & dataframe['DaysMentHlthBad'].isna(), 'DaysMentHlthBad'] = 0
dataframe = fill_bin_num(dataframe, 'DaysMentHlthBad', 'Age', 5, 'mean', 10)
for (index, row) in dataframe.iterrows():
if np.isnan(row['nBabies']) and (not np.isnan(row['nPregnancies'])):
dataframe.at[index, 'nBabies'] = row['nPregnancies']
dataframe['nBabies'] = dataframe['nBabies'].fillna(0)
dataframe['nPregnancies'] = dataframe['nPregnancies'].fillna(0)
dataframe['Age1stBaby'] = dataframe['Age1stBaby'].fillna(0)
dataframe.loc[(dataframe['Age'] == 0) & dataframe['SleepHrsNight'].isna(), 'SleepHrsNight'] = 14
dataframe.loc[(dataframe['Age'] <= 2) & dataframe['SleepHrsNight'].isna(), 'SleepHrsNight'] = 12
dataframe.loc[(dataframe['Age'] <= 5) & dataframe['SleepHrsNight'].isna(), 'SleepHrsNight'] = 10
dataframe.loc[(dataframe['Age'] <= 10) & dataframe['SleepHrsNight'].isna(), 'SleepHrsNight'] = 9
dataframe.loc[(dataframe['Age'] <= 15) & dataframe['SleepHrsNight'].isna(), 'SleepHrsNight'] = 8
dataframe['SleepHrsNight'] = dataframe['SleepHrsNight'].fillna(dataframe_raw['SleepHrsNight'].mean())
dataframe['SleepTrouble'] = dataframe['SleepTrouble'].fillna('No')
dataframe.loc[(dataframe['Age'] <= 4) & dataframe['PhysActive'].isna(), 'PhysActive'] = 'No'
dataframe = fill_bin_num(dataframe, 'PhysActive', 'Age', 2, 'mode', 16)
dataframe['PhysActive'] = dataframe['PhysActive'].fillna('Yes')
dataframe = dataframe.drop(columns=['PhysActiveDays'])
dataframe = dataframe.drop(columns=['TVHrsDay'])
dataframe = dataframe.drop(columns=['TVHrsDayChild'])
dataframe = dataframe.drop(columns=['CompHrsDay'])
dataframe = dataframe.drop(columns=['CompHrsDayChild'])
dataframe.loc[(dataframe['Age'] < 18) & dataframe['Alcohol12PlusYr'].isna(), 'Alcohol12PlusYr'] = 'No'
dataframe = fill_bin_num(dataframe, 'Alcohol12PlusYr', 'Age', 5, 'mode', 18)
dataframe.loc[(dataframe['Age'] < 18) & dataframe['AlcoholDay'].isna(), 'AlcoholDay'] = 0
dataframe = fill_bin_num(dataframe, 'AlcoholDay', 'Age', 5, 'mean', 18)
dataframe.loc[(dataframe['Age'] < 18) & dataframe['AlcoholYear'].isna(), 'AlcoholYear'] = 0
dataframe = fill_bin_num(dataframe, 'AlcoholYear', 'Age', 5, 'mean', 18)
dataframe.loc[(dataframe['Age'] < 20) & dataframe['SmokeNow'].isna(), 'SmokeNow'] = 'No'
dataframe = fill_bin_num(dataframe, 'SmokeNow', 'Age', 5, 'mode', 20)
dataframe['Smoke100'] = dataframe['Smoke100'].fillna('No')
dataframe['Smoke100n'] = dataframe['Smoke100n'].fillna('No')
dataframe.loc[(dataframe['SmokeNow'] == 'No') & dataframe['SmokeAge'].isna(), 'SmokeAge'] = 0
dataframe = fill_bin_num(dataframe, 'SmokeAge', 'Age', 5, 'mean', 20)
dataframe.loc[(dataframe['Age'] < 18) & dataframe['Marijuana'].isna(), 'Marijuana'] = 'No'
dataframe.loc[dataframe['Marijuana'].isna() & (dataframe['SmokeNow'] == 'No'), 'Marijuana'] = 'No'
dataframe = fill_bin_num(dataframe, 'Marijuana', 'Age', 5, 'mode', 20)
dataframe.loc[(dataframe['Marijuana'] == 'No') & dataframe['AgeFirstMarij'].isna(), 'AgeFirstMarij'] = 0
dataframe = fill_bin_num(dataframe, 'AgeFirstMarij', 'Age', 5, 'mean', 20)
dataframe.loc[(dataframe['Marijuana'] == 'No') & dataframe['RegularMarij'].isna(), 'RegularMarij'] = 'No'
dataframe = fill_bin_num(dataframe, 'RegularMarij', 'Age', 5, 'mode', 20)
dataframe.loc[(dataframe['RegularMarij'] == 'No') & dataframe['AgeRegMarij'].isna(), 'AgeRegMarij'] = 0
dataframe = fill_bin_num(dataframe, 'AgeRegMarij', 'Age', 5, 'mean', 20)
dataframe.loc[(dataframe['Age'] < 18) & dataframe['HardDrugs'].isna(), 'HardDrugs'] = 'No'
dataframe = fill_bin_num(dataframe, 'HardDrugs', 'Age', 5, 'mode', 18)
mode_sex_age = dataframe['SexAge'].mode()[0]
dataframe.loc[(dataframe['Age'] <= mode_sex_age) & dataframe['SexEver'].isna(), 'SexEver'] = 'No'
dataframe['SexEver'] = dataframe['SexEver'].fillna('Yes')
dataframe.loc[(dataframe['SexEver'] == 'No') & dataframe['SexAge'].isna(), 'SexAge'] = 0
dataframe.loc[dataframe['SexAge'].isna() & (dataframe['Age'] < mode_sex_age), 'SexAge'] = dataframe.loc[dataframe['SexAge'].isna() & (dataframe['Age'] < mode_sex_age), 'Age']
dataframe['SexAge'] = dataframe['SexAge'].fillna(mode_sex_age)
dataframe.loc[(dataframe['SexEver'] == 'No') & dataframe['SexNumPartnLife'].isna(), 'SexNumPartnLife'] = 0
dataframe = fill_bin_num(dataframe, 'SexNumPartnLife', 'Age', 5, 'mean')
dataframe['SexNumPartnLife'] = dataframe_raw.loc[(dataframe_raw['Age'] >= 60) & (dataframe_raw['Age'] <= 70), 'SexNumPartnLife'].mode()[0]
dataframe.loc[(dataframe['SexEver'] == 'No') & dataframe['SexNumPartYear'].isna(), 'SexNumPartYear'] = 0
dataframe = fill_bin_num(dataframe, 'SexNumPartYear', 'Age', 10, 'mean')
dataframe['SexNumPartYear'] = dataframe['SexNumPartYear'].fillna(0)
dataframe = dataframe.drop(columns=['SameSex'])
dataframe = dataframe.drop(columns=['SexOrientation'])
dataframe['PregnantNow'] = dataframe['PregnantNow'].fillna('No')
dataframe['male'] = 1 * (dataframe['Gender'] == 'male')
dataframe = dataframe.drop(columns=['Gender'])
dataframe['white'] = np.where(dataframe['Race1'] == 'white', 1, 0)
dataframe = dataframe.drop(columns=['Race1'])
dataframe = make_dummy_cols(dataframe, 'Education', 'education', '8th Grade')
dataframe = make_dummy_cols(dataframe, 'MaritalStatus', 'maritalstatus', 'Separated')
dataframe = make_dummy_cols(dataframe, 'HomeOwn', 'homeown', 'Other')
dataframe = make_dummy_cols(dataframe, 'Work', 'work', 'Looking')
dataframe['Diabetes'] = np.where(dataframe['Diabetes'] == 'Yes', 1, 0)
dataframe = make_dummy_cols(dataframe, 'HealthGen', 'healthgen', 'Poor')
dataframe = make_dummy_cols(dataframe, 'LittleInterest', 'littleinterest', 'None')
dataframe = make_dummy_cols(dataframe, 'Depressed', 'depressed', 'None')
dataframe['SleepTrouble'] = np.where(dataframe['SleepTrouble'] == 'Yes', 1, 0)
dataframe['PhysActive'] = np.where(dataframe['PhysActive'] == 'Yes', 1, 0)
dataframe['Alcohol12PlusYr'] = np.where(dataframe['Alcohol12PlusYr'] == 'Yes', 1, 0)
dataframe['SmokeNow'] = np.where(dataframe['SmokeNow'] == 'Yes', 1, 0)
dataframe['Smoke100'] = np.where(dataframe['Smoke100'] == 'Yes', 1, 0)
dataframe['Smoke100n'] = np.where(dataframe['Smoke100n'] == 'Yes', 1, 0)
dataframe['Marijuana'] = np.where(dataframe['Marijuana'] == 'Yes', 1, 0)
dataframe['RegularMarij'] = np.where(dataframe['RegularMarij'] == 'Yes', 1, 0)
dataframe['HardDrugs'] = np.where(dataframe['HardDrugs'] == 'Yes', 1, 0)
dataframe['SexEver'] = np.where(dataframe['SexEver'] == 'Yes', 1, 0)
dataframe['PregnantNow'] = np.where(dataframe['PregnantNow'] == 'Yes', 1, 0)
return dataframe |
def main():
num = int(input("introduce un numero:"))
for x in range (1,num):
print(x, end=",")
else:
print(num, end="") | def main():
num = int(input('introduce un numero:'))
for x in range(1, num):
print(x, end=',')
else:
print(num, end='') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.