content
stringlengths
7
1.05M
fixed_cases
stringlengths
1
1.28M
def _flatten(x): result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, basestring): result.extend(_flatten(el)) else: result.append(el) return result class Utils: @staticmethod def _deleteEmpty(str): return str != "" @staticmethod def _getInt(val): if val.replace('.','',1).isdigit(): val = str(int(float(val))) return val # from http://stackoverflow.com/questions/406121/flattening-a-shallow-list-in-python _flatten = staticmethod(_flatten) @staticmethod def _isInfinity(value): return value == "Infinity" or value == "-Infinity"
def _flatten(x): result = [] for el in x: if hasattr(el, '__iter__') and (not isinstance(el, basestring)): result.extend(_flatten(el)) else: result.append(el) return result class Utils: @staticmethod def _delete_empty(str): return str != '' @staticmethod def _get_int(val): if val.replace('.', '', 1).isdigit(): val = str(int(float(val))) return val _flatten = staticmethod(_flatten) @staticmethod def _is_infinity(value): return value == 'Infinity' or value == '-Infinity'
# Length of Last Word # https://www.interviewbit.com/problems/length-of-last-word/ # # Given a string s consists of upper/lower-case alphabets and empty space characters ' ', return the length of last word in the string. # # If the last word does not exist, return 0. # # Note: A word is defined as a character sequence consists of non-space characters only. # # Example: # # Given s = "Hello World", # # return 5 as length("World") = 5. # # Please make sure you try to solve this problem without using library functions. Make sure you only traverse the string once. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Approach I # def lengthOfLastWord(self, A): # i, length = len(A) - 1, 0 # if i >= 0: # while i >= 0 and A[i].isspace(): # i -= 1 # # while i >= 0 and not A[i].isspace(): # length += 1 # i -= 1 # # return length class Solution: # @param A : string # @return an integer def lengthOfLastWord(self, A): words = A.split() return len(words[-1]) if len(words) > 0 else 0 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # if __name__ == "__main__": s = Solution() print(s.lengthOfLastWord("")) print(s.lengthOfLastWord("ana voli milovana"))
class Solution: def length_of_last_word(self, A): words = A.split() return len(words[-1]) if len(words) > 0 else 0 if __name__ == '__main__': s = solution() print(s.lengthOfLastWord('')) print(s.lengthOfLastWord('ana voli milovana'))
# # @lc app=leetcode id=845 lang=python3 # # [845] Longest Mountain in Array # # @lc code=start class Solution: def longestMountain(self, A) -> int: tend = [] for i in range(0, len(A) - 1): if A[i + 1] > A[i]: tend.append(1) elif A[i + 1] == A[i]: tend.append(0) else: tend.append(-1) peeks = [] i = 0 pre = 0 tmp = 0 has_up = 0 while i < len(tend): if tend[i] == 0: if tmp > 0 and pre == -1: peeks.append(tmp) pre = 0 tmp = 0 has_up = 0 elif tend[i] == 1: if pre != -1: tmp += 1 pre = 1 has_up = 1 else: if tmp > 0: peeks.append(tmp) pre = 1 has_up = 1 tmp = 1 else: if has_up: pre = -1 tmp += 1 i += 1 if tmp and pre == -1: peeks.append(tmp) return max(peeks) + 1 if peeks else 0 if __name__ == '__main__': a = Solution() b = a.longestMountain([0,1,2,3,4,5,4,3,2,1,0]) print(b) # @lc code=end
class Solution: def longest_mountain(self, A) -> int: tend = [] for i in range(0, len(A) - 1): if A[i + 1] > A[i]: tend.append(1) elif A[i + 1] == A[i]: tend.append(0) else: tend.append(-1) peeks = [] i = 0 pre = 0 tmp = 0 has_up = 0 while i < len(tend): if tend[i] == 0: if tmp > 0 and pre == -1: peeks.append(tmp) pre = 0 tmp = 0 has_up = 0 elif tend[i] == 1: if pre != -1: tmp += 1 pre = 1 has_up = 1 else: if tmp > 0: peeks.append(tmp) pre = 1 has_up = 1 tmp = 1 elif has_up: pre = -1 tmp += 1 i += 1 if tmp and pre == -1: peeks.append(tmp) return max(peeks) + 1 if peeks else 0 if __name__ == '__main__': a = solution() b = a.longestMountain([0, 1, 2, 3, 4, 5, 4, 3, 2, 1, 0]) print(b)
#replace blank space with / def replace(a): temp=a.split(" ") temp2='/' for i in range(0,len(temp)): temp2=temp2+temp[i]+'/' print(temp2) a=raw_input("enter string") replace(a)
def replace(a): temp = a.split(' ') temp2 = '/' for i in range(0, len(temp)): temp2 = temp2 + temp[i] + '/' print(temp2) a = raw_input('enter string') replace(a)
def main(request, response): response.headers.set("Access-Control-Allow-Origin", "*") response.headers.set("Access-Control-Max-Age", 0) response.headers.set('Access-Control-Allow-Headers', "x-test") if request.method == "OPTIONS": if not request.headers.get("User-Agent"): response.content = "FAIL: User-Agent header missing in preflight request." response.status = 400 else: if request.headers.get("User-Agent"): response.content = "PASS" else: response.content = "FAIL: User-Agent header missing in request" response.status = 400
def main(request, response): response.headers.set('Access-Control-Allow-Origin', '*') response.headers.set('Access-Control-Max-Age', 0) response.headers.set('Access-Control-Allow-Headers', 'x-test') if request.method == 'OPTIONS': if not request.headers.get('User-Agent'): response.content = 'FAIL: User-Agent header missing in preflight request.' response.status = 400 elif request.headers.get('User-Agent'): response.content = 'PASS' else: response.content = 'FAIL: User-Agent header missing in request' response.status = 400
class HealthCheck: _instance = None def __new__(cls, *args, **kwargs): if not HealthCheck._instance: HealthCheck._instance = super(HealthCheck, cls).__new__(cls, *args, **kwargs) return HealthCheck._instance def __init__(self): self._servers = [] def add_server(self): self._servers.append("Server 1") self._servers.append("Server 2") self._servers.append("Server 3") self._servers.append("Server 4") def change_server(self): self._servers.pop() self._servers.append("Server 5") hc1 = HealthCheck() hc2 = HealthCheck() print(hc1, hc2) hc1.add_server() print("Schedule heath check for servers (1)..") for i in range(4): print("Checking ", hc1._servers[i]) hc2.change_server() print("Schedule health check for servers (2)..") for i in range(4): print("Checking ", hc2._servers[i])
class Healthcheck: _instance = None def __new__(cls, *args, **kwargs): if not HealthCheck._instance: HealthCheck._instance = super(HealthCheck, cls).__new__(cls, *args, **kwargs) return HealthCheck._instance def __init__(self): self._servers = [] def add_server(self): self._servers.append('Server 1') self._servers.append('Server 2') self._servers.append('Server 3') self._servers.append('Server 4') def change_server(self): self._servers.pop() self._servers.append('Server 5') hc1 = health_check() hc2 = health_check() print(hc1, hc2) hc1.add_server() print('Schedule heath check for servers (1)..') for i in range(4): print('Checking ', hc1._servers[i]) hc2.change_server() print('Schedule health check for servers (2)..') for i in range(4): print('Checking ', hc2._servers[i])
n=int(input("Enter a Number - ")) for i in range (1,n+1): if (n%i==0): print (i)
n = int(input('Enter a Number - ')) for i in range(1, n + 1): if n % i == 0: print(i)
print("hello.") def test_hello(): print("\ntesting the words 'hello' and 'goodbye'\n") assert "hello" > "goodbye" def test_add(): assert 1==2-1
print('hello.') def test_hello(): print("\ntesting the words 'hello' and 'goodbye'\n") assert 'hello' > 'goodbye' def test_add(): assert 1 == 2 - 1
# suites SIMPLE = 'simple' ARGS = 'args' GENERATOR = 'generator' LAZY_GENERATOR = 'lazy_generator' FIXTURE = 'fixture' FIXTURE_ARGS = 'fixture_args' FIXTURE_GENERATOR = 'fixture_generator' FIXTURE_LAZY_GENERATOR = 'fixture_lazy_generator' FIXTURE_BUILDER = 'fixture_builder' FIXTURE_BUILDER_ARGS = 'fixture_builder_args' FIXTURE_BUILDER_GENERATOR = 'fixture_builder_generator' FIXTURE_BUILDER_LAZY_GENERATOR = 'fixture_builder_lazy_generator' # dataset used to compare different backends # sltbench supports: all # googlebench supports: SIMPLE && FIXTURE # nonius supports: SIMPLE COMPARABLE = [ SIMPLE, ] # all suites ALL = [ SIMPLE, ARGS, GENERATOR, LAZY_GENERATOR, FIXTURE, FIXTURE_ARGS, FIXTURE_GENERATOR, FIXTURE_LAZY_GENERATOR, FIXTURE_BUILDER, FIXTURE_BUILDER_ARGS, FIXTURE_BUILDER_GENERATOR, FIXTURE_BUILDER_LAZY_GENERATOR ] # available input ALL_INPUT = ['comparable', 'all'] + ALL def create(args): if args.dataset == 'comparable': return COMPARABLE if args.dataset == 'all': return ALL return [args.dataset]
simple = 'simple' args = 'args' generator = 'generator' lazy_generator = 'lazy_generator' fixture = 'fixture' fixture_args = 'fixture_args' fixture_generator = 'fixture_generator' fixture_lazy_generator = 'fixture_lazy_generator' fixture_builder = 'fixture_builder' fixture_builder_args = 'fixture_builder_args' fixture_builder_generator = 'fixture_builder_generator' fixture_builder_lazy_generator = 'fixture_builder_lazy_generator' comparable = [SIMPLE] all = [SIMPLE, ARGS, GENERATOR, LAZY_GENERATOR, FIXTURE, FIXTURE_ARGS, FIXTURE_GENERATOR, FIXTURE_LAZY_GENERATOR, FIXTURE_BUILDER, FIXTURE_BUILDER_ARGS, FIXTURE_BUILDER_GENERATOR, FIXTURE_BUILDER_LAZY_GENERATOR] all_input = ['comparable', 'all'] + ALL def create(args): if args.dataset == 'comparable': return COMPARABLE if args.dataset == 'all': return ALL return [args.dataset]
#User function Template for python3 class Solution: def subsetSums(self, arr, N): # code here def subset(arr,N,ind,sum,res): if ind==N: res.append(sum) return subset(arr,N,ind+1,sum+arr[ind],res) subset(arr,N,ind+1,sum,res) re=[] subset(arr,N,0,0,re) return re #{ # Driver Code Starts #Initial Template for Python 3 if __name__ == '__main__': T=int(input()) for i in range(T): N = int(input()) arr = [int(x) for x in input().split()] ob = Solution() ans = ob.subsetSums(arr, N) ans.sort() for x in ans: print(x,end=" ") print("") # } Driver Code Ends
class Solution: def subset_sums(self, arr, N): def subset(arr, N, ind, sum, res): if ind == N: res.append(sum) return subset(arr, N, ind + 1, sum + arr[ind], res) subset(arr, N, ind + 1, sum, res) re = [] subset(arr, N, 0, 0, re) return re if __name__ == '__main__': t = int(input()) for i in range(T): n = int(input()) arr = [int(x) for x in input().split()] ob = solution() ans = ob.subsetSums(arr, N) ans.sort() for x in ans: print(x, end=' ') print('')
# https://www.interviewbit.com/problems/merge-intervals/ # Definition for an interval. # class Interval: # def __init__(self, s=0, e=0): # self.start = s # self.end = e class Solution: # @param intervals, a list of Intervals # @param new_interval, a Interval # @return a list of Interval def insert(self, intervals, new_interval): #intervals.append(new_interval) #intervals.sort(key=lambda x: x.start) # sorting not needed if len(intervals) == 0: return [new_interval] # inserting new interval in log(n) time low = 0 high = len(intervals) if new_interval.start <= intervals[0].start: intervals.insert(0, new_interval) elif new_interval.start >= intervals[-1].start: intervals.append(new_interval) else: while low <= high: mid = (low + high) // 2 if intervals[mid].start <= new_interval.start and intervals[mid+1].start > new_interval.start: break elif intervals[mid].start > new_interval.start: high = mid - 1 else: low = mid + 1 intervals.insert(mid+1, new_interval) # merge in O(n) time merged = [intervals[0]] for i in range(1,len(intervals)): if intervals[i].start <= merged[-1].end: merged[-1].end = max(merged[-1].end, intervals[i].end) # merging action else: merged.append(intervals[i]) return merged
class Solution: def insert(self, intervals, new_interval): if len(intervals) == 0: return [new_interval] low = 0 high = len(intervals) if new_interval.start <= intervals[0].start: intervals.insert(0, new_interval) elif new_interval.start >= intervals[-1].start: intervals.append(new_interval) else: while low <= high: mid = (low + high) // 2 if intervals[mid].start <= new_interval.start and intervals[mid + 1].start > new_interval.start: break elif intervals[mid].start > new_interval.start: high = mid - 1 else: low = mid + 1 intervals.insert(mid + 1, new_interval) merged = [intervals[0]] for i in range(1, len(intervals)): if intervals[i].start <= merged[-1].end: merged[-1].end = max(merged[-1].end, intervals[i].end) else: merged.append(intervals[i]) return merged
RELEASE_HUMAN = "104" RELEASE_MOUSE = "104" ASSEMBLY_HUMAN = f"Homo_sapiens.GRCh38.{RELEASE_HUMAN}" ASSEMBLY_MOUSE = f"Mus_musculus.GRCm39.{RELEASE_MOUSE}" CELLTYPES = ["adventitial cell", "endothelial cell", "acinar cell", "pancreatic PP cell", "type B pancreatic cell"] CL_VERSION = "v2021-08-10"
release_human = '104' release_mouse = '104' assembly_human = f'Homo_sapiens.GRCh38.{RELEASE_HUMAN}' assembly_mouse = f'Mus_musculus.GRCm39.{RELEASE_MOUSE}' celltypes = ['adventitial cell', 'endothelial cell', 'acinar cell', 'pancreatic PP cell', 'type B pancreatic cell'] cl_version = 'v2021-08-10'
def subarray_sum_non_negative(lst, target_sum): ''' Simple 2-pointer-window. ''' window_idx_left = 0 window_idx_right = 1 current_sum = lst[0] while True: if current_sum == target_sum: return window_idx_left, window_idx_right - 1 if window_idx_right >= len(lst): break if current_sum < target_sum: current_sum += lst[window_idx_right] window_idx_right += 1 else: current_sum -= lst[window_idx_left] window_idx_left += 1 if window_idx_left == window_idx_right: assert (current_sum == 0) if window_idx_right < len(lst): current_sum += lst[window_idx_right] window_idx_right += 1 return -1, -1 def main(): lst = [5, 1, 3, 4, 2] sum = 4 i, j = subarray_sum_non_negative(lst, sum) print(f'{i}, {j}') if __name__ == "__main__": main()
def subarray_sum_non_negative(lst, target_sum): """ Simple 2-pointer-window. """ window_idx_left = 0 window_idx_right = 1 current_sum = lst[0] while True: if current_sum == target_sum: return (window_idx_left, window_idx_right - 1) if window_idx_right >= len(lst): break if current_sum < target_sum: current_sum += lst[window_idx_right] window_idx_right += 1 else: current_sum -= lst[window_idx_left] window_idx_left += 1 if window_idx_left == window_idx_right: assert current_sum == 0 if window_idx_right < len(lst): current_sum += lst[window_idx_right] window_idx_right += 1 return (-1, -1) def main(): lst = [5, 1, 3, 4, 2] sum = 4 (i, j) = subarray_sum_non_negative(lst, sum) print(f'{i}, {j}') if __name__ == '__main__': main()
### Mock Config ### env = { "name": "mock_env", "render": False, } agent = { "name": "mock_agent", "network": "mock_network", } optim = { "name": "mock_optim", "lr": 0.0001, } train = { "training": True, "load_path": None, "run_step": 100000, "print_period": 1000, "save_period": 10000, "eval_iteration": 10, "record": False, "record_period": None, # distributed setting "update_period": 32, "num_workers": 8, }
env = {'name': 'mock_env', 'render': False} agent = {'name': 'mock_agent', 'network': 'mock_network'} optim = {'name': 'mock_optim', 'lr': 0.0001} train = {'training': True, 'load_path': None, 'run_step': 100000, 'print_period': 1000, 'save_period': 10000, 'eval_iteration': 10, 'record': False, 'record_period': None, 'update_period': 32, 'num_workers': 8}
#!/usr/bin/python3 """ imports Flask instance for gunicorn configurations gunicorn --bind 127.0.0.1:8003 wsgi.wsgi_amazon.amazon.app """ amazon = __import__('app', globals(), locals(), ['*']) if __name__ == "__main__": """runs the main flask app""" amazon.app.run()
""" imports Flask instance for gunicorn configurations gunicorn --bind 127.0.0.1:8003 wsgi.wsgi_amazon.amazon.app """ amazon = __import__('app', globals(), locals(), ['*']) if __name__ == '__main__': 'runs the main flask app' amazon.app.run()
APIS = [{ 'field_name': 'SymbolDescription', 'field_price': 'AvgPrice', 'field_symbol': 'Symbol', 'name': 'SASE', 'root': 'http://www.sase.ba', 'params': { 'type': 19 }, 'request_type': "POST", 'status': 'FeedServices/HandlerChart.ashx', 'type': 'json' }, { 'field_name': 'Description', 'field_price': 'AvgPrice', 'field_symbol': 'Code', 'name': 'BL berza', 'root': 'https://www.blberza.com', 'params': { 'langId': 1 }, 'request_type': "GET", 'status': 'services/defaultTicker.ashx', 'type': 'json' }]
apis = [{'field_name': 'SymbolDescription', 'field_price': 'AvgPrice', 'field_symbol': 'Symbol', 'name': 'SASE', 'root': 'http://www.sase.ba', 'params': {'type': 19}, 'request_type': 'POST', 'status': 'FeedServices/HandlerChart.ashx', 'type': 'json'}, {'field_name': 'Description', 'field_price': 'AvgPrice', 'field_symbol': 'Code', 'name': 'BL berza', 'root': 'https://www.blberza.com', 'params': {'langId': 1}, 'request_type': 'GET', 'status': 'services/defaultTicker.ashx', 'type': 'json'}]
ah1 = input() ah2 = input() if len(ah1) < len(ah2): print("no") else: print("go")
ah1 = input() ah2 = input() if len(ah1) < len(ah2): print('no') else: print('go')
class Node: def __init__(self, value=None, next_=None): self.value = value self.next_ = next_ class Stack: def __init__(self, top=None): self.top = top def push(self, value): new_node = Node(value, self.top) self.top = new_node def pop(self): if not self.top: raise TypeError remove_node = self.top self.top = remove_node.next_ return remove_node def peek(self): if not self.top: raise TypeError return self.top def isEmpty(self): if not self.top: return True else: return False class Queue: def __init__(self, front=None, back=None): self.front = front self.back = back def enqueue(self, value): current_last = self.back current_last.next_ = Node(value) self.back = current_last.next_ def dequeue(self): if not self.front: raise TypeError remove_node = self.front self.front = remove_node.next_ return remove_node def peek(self): if not self.front: raise TypeError return self.front def isEmpty(self): if not self.front: return True else: return False
class Node: def __init__(self, value=None, next_=None): self.value = value self.next_ = next_ class Stack: def __init__(self, top=None): self.top = top def push(self, value): new_node = node(value, self.top) self.top = new_node def pop(self): if not self.top: raise TypeError remove_node = self.top self.top = remove_node.next_ return remove_node def peek(self): if not self.top: raise TypeError return self.top def is_empty(self): if not self.top: return True else: return False class Queue: def __init__(self, front=None, back=None): self.front = front self.back = back def enqueue(self, value): current_last = self.back current_last.next_ = node(value) self.back = current_last.next_ def dequeue(self): if not self.front: raise TypeError remove_node = self.front self.front = remove_node.next_ return remove_node def peek(self): if not self.front: raise TypeError return self.front def is_empty(self): if not self.front: return True else: return False
#: The AWS access key. Should look something like this:: #: #: AUTH = {'aws_access_key_id': 'XXXXXXXXXXXXXXXXX', #: 'aws_secret_access_key': 'aaaaaaaaaaaa\BBBBBBBBB\dsaddad'} #: AUTH = {} #: The default AWS region to use with the commands where REGION is supported. DEFAULT_REGION = 'eu-west-1' #: Default ssh user if the ``awsfab-ssh-user`` tag is not set EC2_INSTANCE_DEFAULT_SSHUSER = 'root' #: Directories to search for "<key_name>.pem". These paths are filtered through #: os.path.expanduser, so paths like ``~/.ssh/`` works. KEYPAIR_PATH = ['.', '~/.ssh/'] #: Extra SSH arguments. Used with ``ssh`` and ``rsync``. EXTRA_SSH_ARGS = '-o StrictHostKeyChecking=no' #: Configuration for ec2_launch_instance (see the docs) EC2_LAUNCH_CONFIGS = {} #: S3 bucket suffix. This is used for all tasks taking bucketname as parameter. #: The actual bucketname used become:: #: #: S3_BUCKET_PATTERN.format(bucketname=bucketname) #: #: This is typically used to add your domain name or company name to all bucket #: names, but avoid having to type the entire name for each task. Examples:: #: #: S3_BUCKET_PATTERN = '{bucketname}.example.com' #: S3_BUCKET_PATTERN = 'example.com.{bucketname}' #: #: The default, ``"{bucketname}"``, uses the bucket name as provided by the #: user without any changes. #: #: .. seealso:: #: :meth:`awsfabrictasks.s3.api.S3ConnectionWrapper.get_bucket_using_pattern`, #: :func:`awsfabrictasks.s3.api.settingsformat_bucketname` S3_BUCKET_PATTERN = '{bucketname}'
auth = {} default_region = 'eu-west-1' ec2_instance_default_sshuser = 'root' keypair_path = ['.', '~/.ssh/'] extra_ssh_args = '-o StrictHostKeyChecking=no' ec2_launch_configs = {} s3_bucket_pattern = '{bucketname}'
""" SwFTP is an FTP and SFTP interface for Openstack Swift See COPYING for license information. """ VERSION = '1.0.7' USER_AGENT = 'SwFTP v%s' % VERSION __title__ = 'swftp' __version__ = VERSION __author__ = 'SoftLayer Technologies, Inc.' __license__ = 'MIT' __copyright__ = 'Copyright 2014 SoftLayer Technologies, Inc.'
""" SwFTP is an FTP and SFTP interface for Openstack Swift See COPYING for license information. """ version = '1.0.7' user_agent = 'SwFTP v%s' % VERSION __title__ = 'swftp' __version__ = VERSION __author__ = 'SoftLayer Technologies, Inc.' __license__ = 'MIT' __copyright__ = 'Copyright 2014 SoftLayer Technologies, Inc.'
# This software and supporting documentation are distributed by # Institut Federatif de Recherche 49 # CEA/NeuroSpin, Batiment 145, # 91191 Gif-sur-Yvette cedex # France # # This software is governed by the CeCILL-B license under # French law and abiding by the rules of distribution of free software. # You can use, modify and/or redistribute the software under the # terms of the CeCILL-B license as circulated by CEA, CNRS # and INRIA at the following URL "http://www.cecill.info". # # As a counterpart to the access to the source code and rights to copy, # modify and redistribute granted by the license, users are provided only # with a limited warranty and the software's author, the holder of the # economic rights, and the successive licensors have only limited # liability. # # In this respect, the user's attention is drawn to the risks associated # with loading, using, modifying and/or developing or reproducing the # software by the user in light of its specific status of free software, # that may mean that it is complicated to manipulate, and that also # therefore means that it is reserved for developers and experienced # professionals having in-depth computer knowledge. Users are therefore # encouraged to load and test the software's suitability as regards their # requirements in conditions enabling the security of their systems and/or # data to be ensured and, more generally, to use and operate it in the # same conditions as regards security. # # The fact that you are presently reading this means that you have had # knowledge of the CeCILL-B license and that you accept its terms. typessub.update( { 'Moment<Void>' : { 'typecode' : 'Moment_VOID', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : 'new Moment<Void>', 'NumType' : 'PyArray_OBJECT', 'PyType' : 'Moment_VOID', 'sipClass' : 'Moment_VOID', 'typeinclude' : \ '#include <aims/moment/moment.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipMoment_VOID.h"\n' '#endif\n' '#ifndef PYAIMSALGO_MOMENT_VOID_DEFINED\n' '#define PYAIMSALGO_MOMENT_VOID_DEFINED\n' 'inline int pyaimsalgoMoment_VOID_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, sipClass_Moment_VOID, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoMoment_VOID_Check', }, 'Samplable<float,3>' : \ { 'typecode' : 'Samplable_FLOAT_3', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'Samplable_FLOAT_3', 'sipClass' : 'Samplable_FLOAT_3', 'typeinclude' : \ '#include <aims/resampling/samplable.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipSamplable_FLOAT_3.h"\n' '#endif\n' '#ifndef PYAIMSALGO_SAMPLABLE_FLOAT_3_DEFINED\n' '#define PYAIMSALGO_SAMPLABLE_FLOAT_3_DEFINED\n' 'inline int pyaimsalgoSamplable_FLOAT_3_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, Samplable_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoSamplable_FLOAT_3_Check', }, 'BucketMapSampler<float,3>' : \ { 'typecode' : 'BucketMapSampler_FLOAT_3', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'BucketMapSampler_FLOAT_3', 'sipClass' : 'BucketMapSampler_FLOAT_3', 'typeinclude' : \ '#include <aims/resampling/bucketmapsampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipBucketMapSampler_FLOAT_3.h"\n' '#endif\n' '#ifndef PYAIMSALGO_BUCKETMAPSAMPLER_FLOAT_3_DEFINED\n' '#define PYAIMSALGO_BUCKETMAPSAMPLER_FLOAT_3_DEFINED\n' 'inline int pyaimsalgoBucketMapSampler_FLOAT_3_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, BucketMapSampler_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoBucketMapSampler_FLOAT_3_Check', }, 'GeneralSampler<float,3>' : \ { 'typecode' : 'GeneralSampler_FLOAT_3', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'GeneralSampler_FLOAT_3', 'sipClass' : 'GeneralSampler_FLOAT_3', 'typeinclude' : \ '#include <aims/resampling/generalsampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipGeneralSampler_FLOAT_3.h"\n' '#endif\n' '#ifndef PYAIMSALGO_GENERALSAMPLER_FLOAT_3_DEFINED\n' '#define PYAIMSALGO_GENERALSAMPLER_FLOAT_3_DEFINED\n' 'inline int pyaimsalgoGeneralSampler_FLOAT_3_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, GeneralSampler_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoGeneralSampler_FLOAT_3_Check', }, 'Polynomial<float,3>' : \ { 'typecode' : 'Polynomial_FLOAT_3', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'Polynomial_FLOAT_3', 'sipClass' : 'Polynomial_FLOAT_3', 'typeinclude' : \ '#include <aims/resampling/polynomial.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipPolynomial_FLOAT_3.h"\n' '#endif\n' '#ifndef PYAIMSALGO_POLYNOMIAL_FLOAT_3_DEFINED\n' '#define PYAIMSALGO_POLYNOMIAL_FLOAT_3_DEFINED\n' 'inline int pyaimsalgoPolynomial_FLOAT_3_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, Polynomial_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoPolynomial_FLOAT_3_Check', }, 'Resampler<int16_t>' : \ { 'typecode' : 'Resampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'Resampler_S16', 'sipClass' : 'Resampler_S16', 'typeinclude' : \ '#include <aims/resampling/resampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_RESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_RESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, Resampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoResampler_S16_Check', }, 'SplineResampler<int16_t>' : \ { 'typecode' : 'SplineResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'SplineResampler_S16', 'sipClass' : 'SplineResampler_S16', 'typeinclude' : \ '#include <aims/resampling/splineresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipSplineResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_SPLINERESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_SPLINERESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoSplineResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, SplineResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoSplineResampler_S16_Check', }, 'MaskLinearResampler<int16_t>' : \ { 'typecode' : 'MaskLinearResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'MaskLinearResampler_S16', 'sipClass' : 'MaskLinearResampler_S16', 'typeinclude' : \ '#include <aims/resampling/masklinresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipMaskLinearResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_MASKLINEARRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_MASKLINEARRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoMaskLinearResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, MaskLinearResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoMaskLinearResampler_S16_Check', }, 'NearestNeighborResampler<int16_t>' : \ { 'typecode' : 'NearestNeighborResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'NearestNeighborResampler_S16', 'sipClass' : 'NearestNeighborResampler_S16', 'typeinclude' : \ '#include <aims/resampling/nearestneighborresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipNearestNeighborResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_NEARESTNEIGHBORRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_NEARESTNEIGHBORRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoNearestNeighborResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, NearestNeighborResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoNearestNeighborResampler_S16_Check', }, 'CubicResampler<int16_t>' : \ { 'typecode' : 'CubicResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'CubicResampler_S16', 'sipClass' : 'CubicResampler_S16', 'typeinclude' : \ '#include <aims/resampling/cubicresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipCubicResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_CUBICRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_CUBICRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoCubicResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, CubicResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoCubicResampler_S16_Check', }, 'QuinticResampler<int16_t>' : \ { 'typecode' : 'QuinticResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'QuinticResampler_S16', 'sipClass' : 'QuinticResampler_S16', 'typeinclude' : \ '#include <aims/resampling/quinticresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipQuinticResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_QUINTICRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_QUINTICRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoQuinticResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, QuinticResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoQuinticResampler_S16_Check', }, 'SixthOrderResampler<int16_t>' : \ { 'typecode' : 'SixthOrderResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'SixthOrderResampler_S16', 'sipClass' : 'SixthOrderResampler_S16', 'typeinclude' : \ '#include <aims/resampling/sixthorderresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipSixthOrderResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_SIXTHORDERRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_SIXTHORDERRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoSixthOrderResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, SixthOrderResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoSixthOrderResampler_S16_Check', }, 'SeventhOrderResampler<int16_t>' : \ { 'typecode' : 'SeventhOrderResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'SeventhOrderResampler_S16', 'sipClass' : 'SeventhOrderResampler_S16', 'typeinclude' : \ '#include <aims/resampling/seventhorderresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipSeventhOrderResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_SEVENTHORDERRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_SEVENTHORDERRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoSeventhOrderResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, SeventhOrderResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoSeventhOrderResampler_S16_Check', }, 'LinearResampler<int16_t>' : \ { 'typecode' : 'LinearResampler_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'LinearResampler_S16', 'sipClass' : 'LinearResampler_S16', 'typeinclude' : \ '#include <aims/resampling/linearresampler.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipLinearResampler_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_LINEARRESAMPLER_S16_DEFINED\n' '#define PYAIMSALGO_LINEARRESAMPLER_S16_DEFINED\n' 'inline int pyaimsalgoLinearResampler_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, LinearResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoLinearResampler_S16_Check', }, 'ResamplerFactory<int16_t>' : \ { 'typecode' : 'ResamplerFactory_S16', 'pyFromC' : '', 'CFromPy' : '', 'castFromSip' : '', 'deref' : '*', 'pyderef' : '*', 'address' : '&', 'pyaddress' : '&', 'defScalar' : '', 'new' : '', 'NumType' : '', 'PyType' : 'ResamplerFactory_S16', 'sipClass' : 'ResamplerFactory_S16', 'typeinclude' : \ '#include <aims/resampling/resamplerfactory.h>', 'sipinclude' : '#if SIP_VERSION < 0x040700\n' '#include "sipaimsalgosipResamplerFactory_S16.h"\n' '#endif\n' '#ifndef PYAIMSALGO_RESAMPLERFACTORY_S16_DEFINED\n' '#define PYAIMSALGO_RESAMPLERFACTORY_S16_DEFINED\n' 'inline int pyaimsalgoResamplerFactory_S16_Check( PyObject* o )\n' '{ return sipCanConvertToInstance( o, ResamplerFactory_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n' '#endif', 'module' : 'aimsalgo', 'testPyType' : 'pyaimsalgoResamplerFactory_S16_Check', }, 'aims::FfdTransformation': classInAimsNamespace( 'aims/registration/ffd.h', 'FfdTransformation'), 'aims::SplineFfd': classInAimsNamespace( 'aims/registration/ffd.h', 'SplineFfd'), 'aims::TrilinearFfd': classInAimsNamespace( 'aims/registration/ffd.h', 'TrilinearFfd'), 'aims::GeometricProperties': classInAimsNamespace( 'aims/mesh/geometric.h', 'GeometricProperties'), } ) completeTypesSub( typessub )
typessub.update({'Moment<Void>': {'typecode': 'Moment_VOID', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': 'new Moment<Void>', 'NumType': 'PyArray_OBJECT', 'PyType': 'Moment_VOID', 'sipClass': 'Moment_VOID', 'typeinclude': '#include <aims/moment/moment.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipMoment_VOID.h"\n#endif\n#ifndef PYAIMSALGO_MOMENT_VOID_DEFINED\n#define PYAIMSALGO_MOMENT_VOID_DEFINED\ninline int pyaimsalgoMoment_VOID_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, sipClass_Moment_VOID, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoMoment_VOID_Check'}, 'Samplable<float,3>': {'typecode': 'Samplable_FLOAT_3', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'Samplable_FLOAT_3', 'sipClass': 'Samplable_FLOAT_3', 'typeinclude': '#include <aims/resampling/samplable.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipSamplable_FLOAT_3.h"\n#endif\n#ifndef PYAIMSALGO_SAMPLABLE_FLOAT_3_DEFINED\n#define PYAIMSALGO_SAMPLABLE_FLOAT_3_DEFINED\ninline int pyaimsalgoSamplable_FLOAT_3_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, Samplable_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoSamplable_FLOAT_3_Check'}, 'BucketMapSampler<float,3>': {'typecode': 'BucketMapSampler_FLOAT_3', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'BucketMapSampler_FLOAT_3', 'sipClass': 'BucketMapSampler_FLOAT_3', 'typeinclude': '#include <aims/resampling/bucketmapsampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipBucketMapSampler_FLOAT_3.h"\n#endif\n#ifndef PYAIMSALGO_BUCKETMAPSAMPLER_FLOAT_3_DEFINED\n#define PYAIMSALGO_BUCKETMAPSAMPLER_FLOAT_3_DEFINED\ninline int pyaimsalgoBucketMapSampler_FLOAT_3_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, BucketMapSampler_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoBucketMapSampler_FLOAT_3_Check'}, 'GeneralSampler<float,3>': {'typecode': 'GeneralSampler_FLOAT_3', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'GeneralSampler_FLOAT_3', 'sipClass': 'GeneralSampler_FLOAT_3', 'typeinclude': '#include <aims/resampling/generalsampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipGeneralSampler_FLOAT_3.h"\n#endif\n#ifndef PYAIMSALGO_GENERALSAMPLER_FLOAT_3_DEFINED\n#define PYAIMSALGO_GENERALSAMPLER_FLOAT_3_DEFINED\ninline int pyaimsalgoGeneralSampler_FLOAT_3_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, GeneralSampler_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoGeneralSampler_FLOAT_3_Check'}, 'Polynomial<float,3>': {'typecode': 'Polynomial_FLOAT_3', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'Polynomial_FLOAT_3', 'sipClass': 'Polynomial_FLOAT_3', 'typeinclude': '#include <aims/resampling/polynomial.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipPolynomial_FLOAT_3.h"\n#endif\n#ifndef PYAIMSALGO_POLYNOMIAL_FLOAT_3_DEFINED\n#define PYAIMSALGO_POLYNOMIAL_FLOAT_3_DEFINED\ninline int pyaimsalgoPolynomial_FLOAT_3_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, Polynomial_FLOAT_3, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoPolynomial_FLOAT_3_Check'}, 'Resampler<int16_t>': {'typecode': 'Resampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'Resampler_S16', 'sipClass': 'Resampler_S16', 'typeinclude': '#include <aims/resampling/resampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_RESAMPLER_S16_DEFINED\n#define PYAIMSALGO_RESAMPLER_S16_DEFINED\ninline int pyaimsalgoResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, Resampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoResampler_S16_Check'}, 'SplineResampler<int16_t>': {'typecode': 'SplineResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'SplineResampler_S16', 'sipClass': 'SplineResampler_S16', 'typeinclude': '#include <aims/resampling/splineresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipSplineResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_SPLINERESAMPLER_S16_DEFINED\n#define PYAIMSALGO_SPLINERESAMPLER_S16_DEFINED\ninline int pyaimsalgoSplineResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, SplineResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoSplineResampler_S16_Check'}, 'MaskLinearResampler<int16_t>': {'typecode': 'MaskLinearResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'MaskLinearResampler_S16', 'sipClass': 'MaskLinearResampler_S16', 'typeinclude': '#include <aims/resampling/masklinresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipMaskLinearResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_MASKLINEARRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_MASKLINEARRESAMPLER_S16_DEFINED\ninline int pyaimsalgoMaskLinearResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, MaskLinearResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoMaskLinearResampler_S16_Check'}, 'NearestNeighborResampler<int16_t>': {'typecode': 'NearestNeighborResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'NearestNeighborResampler_S16', 'sipClass': 'NearestNeighborResampler_S16', 'typeinclude': '#include <aims/resampling/nearestneighborresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipNearestNeighborResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_NEARESTNEIGHBORRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_NEARESTNEIGHBORRESAMPLER_S16_DEFINED\ninline int pyaimsalgoNearestNeighborResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, NearestNeighborResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoNearestNeighborResampler_S16_Check'}, 'CubicResampler<int16_t>': {'typecode': 'CubicResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'CubicResampler_S16', 'sipClass': 'CubicResampler_S16', 'typeinclude': '#include <aims/resampling/cubicresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipCubicResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_CUBICRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_CUBICRESAMPLER_S16_DEFINED\ninline int pyaimsalgoCubicResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, CubicResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoCubicResampler_S16_Check'}, 'QuinticResampler<int16_t>': {'typecode': 'QuinticResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'QuinticResampler_S16', 'sipClass': 'QuinticResampler_S16', 'typeinclude': '#include <aims/resampling/quinticresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipQuinticResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_QUINTICRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_QUINTICRESAMPLER_S16_DEFINED\ninline int pyaimsalgoQuinticResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, QuinticResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoQuinticResampler_S16_Check'}, 'SixthOrderResampler<int16_t>': {'typecode': 'SixthOrderResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'SixthOrderResampler_S16', 'sipClass': 'SixthOrderResampler_S16', 'typeinclude': '#include <aims/resampling/sixthorderresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipSixthOrderResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_SIXTHORDERRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_SIXTHORDERRESAMPLER_S16_DEFINED\ninline int pyaimsalgoSixthOrderResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, SixthOrderResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoSixthOrderResampler_S16_Check'}, 'SeventhOrderResampler<int16_t>': {'typecode': 'SeventhOrderResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'SeventhOrderResampler_S16', 'sipClass': 'SeventhOrderResampler_S16', 'typeinclude': '#include <aims/resampling/seventhorderresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipSeventhOrderResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_SEVENTHORDERRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_SEVENTHORDERRESAMPLER_S16_DEFINED\ninline int pyaimsalgoSeventhOrderResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, SeventhOrderResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoSeventhOrderResampler_S16_Check'}, 'LinearResampler<int16_t>': {'typecode': 'LinearResampler_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'LinearResampler_S16', 'sipClass': 'LinearResampler_S16', 'typeinclude': '#include <aims/resampling/linearresampler.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipLinearResampler_S16.h"\n#endif\n#ifndef PYAIMSALGO_LINEARRESAMPLER_S16_DEFINED\n#define PYAIMSALGO_LINEARRESAMPLER_S16_DEFINED\ninline int pyaimsalgoLinearResampler_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, LinearResampler_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoLinearResampler_S16_Check'}, 'ResamplerFactory<int16_t>': {'typecode': 'ResamplerFactory_S16', 'pyFromC': '', 'CFromPy': '', 'castFromSip': '', 'deref': '*', 'pyderef': '*', 'address': '&', 'pyaddress': '&', 'defScalar': '', 'new': '', 'NumType': '', 'PyType': 'ResamplerFactory_S16', 'sipClass': 'ResamplerFactory_S16', 'typeinclude': '#include <aims/resampling/resamplerfactory.h>', 'sipinclude': '#if SIP_VERSION < 0x040700\n#include "sipaimsalgosipResamplerFactory_S16.h"\n#endif\n#ifndef PYAIMSALGO_RESAMPLERFACTORY_S16_DEFINED\n#define PYAIMSALGO_RESAMPLERFACTORY_S16_DEFINED\ninline int pyaimsalgoResamplerFactory_S16_Check( PyObject* o )\n{ return sipCanConvertToInstance( o, ResamplerFactory_S16, SIP_NOT_NONE | SIP_NO_CONVERTORS ); }\n#endif', 'module': 'aimsalgo', 'testPyType': 'pyaimsalgoResamplerFactory_S16_Check'}, 'aims::FfdTransformation': class_in_aims_namespace('aims/registration/ffd.h', 'FfdTransformation'), 'aims::SplineFfd': class_in_aims_namespace('aims/registration/ffd.h', 'SplineFfd'), 'aims::TrilinearFfd': class_in_aims_namespace('aims/registration/ffd.h', 'TrilinearFfd'), 'aims::GeometricProperties': class_in_aims_namespace('aims/mesh/geometric.h', 'GeometricProperties')}) complete_types_sub(typessub)
cups = [True, False, False] steps = list(input()) for c in list(steps): if c == 'A': _ = cups[0] cups[0] = cups[1] cups[1] = _ elif c == 'B': _ = cups[1] cups[1] = cups[2] cups[2] = _ else: _ = cups[0] cups[0] = cups[2] cups[2] = _ print(cups.index(True)+1)
cups = [True, False, False] steps = list(input()) for c in list(steps): if c == 'A': _ = cups[0] cups[0] = cups[1] cups[1] = _ elif c == 'B': _ = cups[1] cups[1] = cups[2] cups[2] = _ else: _ = cups[0] cups[0] = cups[2] cups[2] = _ print(cups.index(True) + 1)
PASSWORD = "Lq#QHMnpyk6Y+.]" def check(selenium_obj, host): current_host = f"http://na2.{host}/" selenium_obj.get(current_host) selenium_obj.add_cookie({'name': 'token', 'value': PASSWORD, 'path': '/'})
password = 'Lq#QHMnpyk6Y+.]' def check(selenium_obj, host): current_host = f'http://na2.{host}/' selenium_obj.get(current_host) selenium_obj.add_cookie({'name': 'token', 'value': PASSWORD, 'path': '/'})
LEFT_PADDING = ' ' # using spaces instead of tabs ('/t') creates more consistent results #DIGRAPH_START = 'digraph G { \n' DIGRAPH_END = ' }' # todo: fix padding for sub-graphs class Graph_Dot_Render: def __init__(self, graph, sub_graphs, graph_name='G', graph_type='digraph'): self.graph_name = graph_name self.graph_type = graph_type self.dot_code = "" self.extra_dot_code = "" self.label = "" self.node_params = {} self.concentrate = None self.size = None self.rank_dir = None self.rank_sep = None self.ranks = {} self.graph = graph self.sub_graphs = sub_graphs pass #self.graph_dot = graph_dot # helpers def join_params(self,params): return ' '.join([f'{key}="{value}"' for key, value in params.items()]) def parse_into_params(self, source, skip_names): params = "" for param_name,param_value in source.items(): if param_name in skip_names : continue if param_value: params += f'{param_name}="{param_value}" ' return params def edge_link(self, edge_node): # add support for record shape linking in edges if ':' in edge_node: items = edge_node.split(':') # todo: refactor how this is done return f'"{items[0]}":"{items[1]}"' else: return f'"{edge_node}"' # render main def render(self): self.dot_code = f'{self.graph_type} {self.graph_name} {{' ( self.add_rand_dir() .add_rank_sep() .add_size() .add_label() .add_node_params() .add_concentrate() .add_sub_graphs() ) self.add_line().add_comment ('###### Nodes #######') for node in self.graph.nodes(): key = node.get('key') label = node.get('value') or key params = self.parse_into_params(node, ['key']) if params: self.add_line(f'"{key}" [{params}]') else: self.add_line(f'"{key}" ["label"="{label}"]') self.add_line().add_comment('###### Edges #######') for edge in self.graph.edges(): from_key = self.edge_link(edge.get('from')) to_key = self.edge_link(edge.get('to')) params = self.parse_into_params(edge, ['from','to']) self.add_line(f' {from_key} -> {to_key} [{params}]') (self.add_ranks() .add_extra_dot_code()) self.dot_code += DIGRAPH_END return self.dot_code def add_sub_graphs(self): for sub_graph in self.sub_graphs: self.add_line().add_line(sub_graph.render.render()) return self # render methods def add_concentrate(self): if self.concentrate: self.add_line('concentrate=true') return self def add_extra_dot_code(self): if self.extra_dot_code: self.dot_code += self.extra_dot_code return self def add_label(self): if self.label: self.add_line(f'label="{self.label}";') \ .add_line('labelloc = "t"') # default to put label at the top return self def add_line(self, value=''): # todo: refactor all add_*** methods into separate 'build' class self.dot_code += f'{LEFT_PADDING}{value} \n' return self def add_size(self): if self.size: self.add_line(f'size = "{self.size},{self.size}"') return self def add_rand_dir(self): if self.rank_dir: self.add_line(f'rankdir={self.rank_dir};') return self def add_rank_sep(self): if self.rank_sep: self.add_line(f'ranksep={self.rank_sep};') return self def add_comment(self, value): return self.add_line(f'#{value} \n') def add_node_params(self): if self.node_params: self.add_line(f'node [{self.join_params(self.node_params)}]') return self def add_ranks(self): for rank, node_ids in self.ranks.items(): node_list = ', '.join(['"%s"' % node_id for node_id in node_ids]) self.add_line(f'{{ rank={rank}; {node_list} }}') return self
left_padding = ' ' digraph_end = ' }' class Graph_Dot_Render: def __init__(self, graph, sub_graphs, graph_name='G', graph_type='digraph'): self.graph_name = graph_name self.graph_type = graph_type self.dot_code = '' self.extra_dot_code = '' self.label = '' self.node_params = {} self.concentrate = None self.size = None self.rank_dir = None self.rank_sep = None self.ranks = {} self.graph = graph self.sub_graphs = sub_graphs pass def join_params(self, params): return ' '.join([f'{key}="{value}"' for (key, value) in params.items()]) def parse_into_params(self, source, skip_names): params = '' for (param_name, param_value) in source.items(): if param_name in skip_names: continue if param_value: params += f'{param_name}="{param_value}" ' return params def edge_link(self, edge_node): if ':' in edge_node: items = edge_node.split(':') return f'"{items[0]}":"{items[1]}"' else: return f'"{edge_node}"' def render(self): self.dot_code = f'{self.graph_type} {self.graph_name} {{' self.add_rand_dir().add_rank_sep().add_size().add_label().add_node_params().add_concentrate().add_sub_graphs() self.add_line().add_comment('###### Nodes #######') for node in self.graph.nodes(): key = node.get('key') label = node.get('value') or key params = self.parse_into_params(node, ['key']) if params: self.add_line(f'"{key}" [{params}]') else: self.add_line(f'"{key}" ["label"="{label}"]') self.add_line().add_comment('###### Edges #######') for edge in self.graph.edges(): from_key = self.edge_link(edge.get('from')) to_key = self.edge_link(edge.get('to')) params = self.parse_into_params(edge, ['from', 'to']) self.add_line(f' {from_key} -> {to_key} [{params}]') self.add_ranks().add_extra_dot_code() self.dot_code += DIGRAPH_END return self.dot_code def add_sub_graphs(self): for sub_graph in self.sub_graphs: self.add_line().add_line(sub_graph.render.render()) return self def add_concentrate(self): if self.concentrate: self.add_line('concentrate=true') return self def add_extra_dot_code(self): if self.extra_dot_code: self.dot_code += self.extra_dot_code return self def add_label(self): if self.label: self.add_line(f'label="{self.label}";').add_line('labelloc = "t"') return self def add_line(self, value=''): self.dot_code += f'{LEFT_PADDING}{value} \n' return self def add_size(self): if self.size: self.add_line(f'size = "{self.size},{self.size}"') return self def add_rand_dir(self): if self.rank_dir: self.add_line(f'rankdir={self.rank_dir};') return self def add_rank_sep(self): if self.rank_sep: self.add_line(f'ranksep={self.rank_sep};') return self def add_comment(self, value): return self.add_line(f'#{value} \n') def add_node_params(self): if self.node_params: self.add_line(f'node [{self.join_params(self.node_params)}]') return self def add_ranks(self): for (rank, node_ids) in self.ranks.items(): node_list = ', '.join(['"%s"' % node_id for node_id in node_ids]) self.add_line(f'{{ rank={rank}; {node_list} }}') return self
def to_pandas_table(self, ): self.lock.aquire() try: asks_tbl = pd.DataFrame(data=self._asks, index=range(len(self._asks))) asks_tbl = pd.DataFrame(data=self._bids, index=range(len(self._bids))) finally: self.lock.release()
def to_pandas_table(self): self.lock.aquire() try: asks_tbl = pd.DataFrame(data=self._asks, index=range(len(self._asks))) asks_tbl = pd.DataFrame(data=self._bids, index=range(len(self._bids))) finally: self.lock.release()
"""Constants for the Govee LED strips integration.""" DOMAIN = "dw_spectrum" CONF_DISABLE_ATTRIBUTE_UPDATES = "disable_attribute_updates" CONF_OFFLINE_IS_OFF = "offline_is_off" CONF_USE_ASSUMED_STATE = "use_assumed_state"
"""Constants for the Govee LED strips integration.""" domain = 'dw_spectrum' conf_disable_attribute_updates = 'disable_attribute_updates' conf_offline_is_off = 'offline_is_off' conf_use_assumed_state = 'use_assumed_state'
print('hello, world !') print('---------------') for v in range(0, 5+1): if v % 2 == 0: print('hello, world !') else: print(v, " is odd")
print('hello, world !') print('---------------') for v in range(0, 5 + 1): if v % 2 == 0: print('hello, world !') else: print(v, ' is odd')
def barplot(x_data, y_data, error_data, x_label="", y_label="", title=""): _, ax = plt.subplots() # Draw bars, position them in the center of the tick mark on the x-axis ax.bar(x_data, y_data, color = '#539caf', align = 'center') # Draw error bars to show standard deviation, set ls to 'none' # to remove line between points ax.errorbar(x_data, y_data, yerr = error_data, color = '#297083', ls = 'none', lw = 2, capthick = 2) ax.set_ylabel(y_label) ax.set_xlabel(x_label) ax.set_title(title) def stackedbarplot(x_data, y_data_list, colors, y_data_names="", x_label="", y_label="", title=""): _, ax = plt.subplots() # Draw bars, one category at a time for i in range(0, len(y_data_list)): if i == 0: ax.bar(x_data, y_data_list[i], color = colors[i], align = 'center', label = y_data_names[i]) else: # For each category after the first, the bottom of the # bar will be the top of the last category ax.bar(x_data, y_data_list[i], color = colors[i], bottom = y_data_list[i - 1], align = 'center', label = y_data_names[i]) ax.set_ylabel(y_label) ax.set_xlabel(x_label) ax.set_title(title) ax.legend(loc = 'upper right') def groupedbarplot(x_data, y_data_list, colors, y_data_names="", x_label="", y_label="", title=""): _, ax = plt.subplots() # Total width for all bars at one x location total_width = 0.8 # Width of each individual bar ind_width = total_width / len(y_data_list) # This centers each cluster of bars about the x tick mark alteration = np.arange(-(total_width/2), total_width/2, ind_width) # Draw bars, one category at a time for i in range(0, len(y_data_list)): # Move the bar to the right on the x-axis so it doesn't # overlap with previously drawn ones ax.bar(x_data + alteration[i], y_data_list[i], color = colors[i], label = y_data_names[i], width = ind_width) ax.set_ylabel(y_label) ax.set_xlabel(x_label) ax.set_title(title) ax.legend(loc = 'upper right')
def barplot(x_data, y_data, error_data, x_label='', y_label='', title=''): (_, ax) = plt.subplots() ax.bar(x_data, y_data, color='#539caf', align='center') ax.errorbar(x_data, y_data, yerr=error_data, color='#297083', ls='none', lw=2, capthick=2) ax.set_ylabel(y_label) ax.set_xlabel(x_label) ax.set_title(title) def stackedbarplot(x_data, y_data_list, colors, y_data_names='', x_label='', y_label='', title=''): (_, ax) = plt.subplots() for i in range(0, len(y_data_list)): if i == 0: ax.bar(x_data, y_data_list[i], color=colors[i], align='center', label=y_data_names[i]) else: ax.bar(x_data, y_data_list[i], color=colors[i], bottom=y_data_list[i - 1], align='center', label=y_data_names[i]) ax.set_ylabel(y_label) ax.set_xlabel(x_label) ax.set_title(title) ax.legend(loc='upper right') def groupedbarplot(x_data, y_data_list, colors, y_data_names='', x_label='', y_label='', title=''): (_, ax) = plt.subplots() total_width = 0.8 ind_width = total_width / len(y_data_list) alteration = np.arange(-(total_width / 2), total_width / 2, ind_width) for i in range(0, len(y_data_list)): ax.bar(x_data + alteration[i], y_data_list[i], color=colors[i], label=y_data_names[i], width=ind_width) ax.set_ylabel(y_label) ax.set_xlabel(x_label) ax.set_title(title) ax.legend(loc='upper right')
# Definition for an interval. class Interval: def __init__(self, s=0, e=0): self.start = s self.end = e class Solution: def merge(self, intervals): """ :type intervals: List[Interval] :rtype: List[Interval] """ if not intervals: return [] intervals_sorted = sorted(intervals, key=lambda x: x.start) results = [intervals_sorted[0]] for interval in intervals_sorted[1:]: if interval.start <= results[-1].end: results[-1].end = max(results[-1].end, interval.end) else: results.append(interval) return results
class Interval: def __init__(self, s=0, e=0): self.start = s self.end = e class Solution: def merge(self, intervals): """ :type intervals: List[Interval] :rtype: List[Interval] """ if not intervals: return [] intervals_sorted = sorted(intervals, key=lambda x: x.start) results = [intervals_sorted[0]] for interval in intervals_sorted[1:]: if interval.start <= results[-1].end: results[-1].end = max(results[-1].end, interval.end) else: results.append(interval) return results
class ListNode: def __init__(self, x): self.val = x self.next = None def __eq__(self, other): if isinstance(other, ListNode): m = self n = other while m and n: if m.val != n.val: return False m = m.next n = n.next if m or n: return False return True return False def __str__(self): n = self s = '' while n: s += '->' s += str(n.val) n = n.next return s def makeListNode(list_int): a = None cur = None for i in list_int: tmp = ListNode(int(i)) tmp.next = None if not a: a = tmp cur = a else: cur.next = tmp cur = cur.next return a
class Listnode: def __init__(self, x): self.val = x self.next = None def __eq__(self, other): if isinstance(other, ListNode): m = self n = other while m and n: if m.val != n.val: return False m = m.next n = n.next if m or n: return False return True return False def __str__(self): n = self s = '' while n: s += '->' s += str(n.val) n = n.next return s def make_list_node(list_int): a = None cur = None for i in list_int: tmp = list_node(int(i)) tmp.next = None if not a: a = tmp cur = a else: cur.next = tmp cur = cur.next return a
del_items(0x80121AA0) SetType(0x80121AA0, "struct THEME_LOC themeLoc[50]") del_items(0x801221E8) SetType(0x801221E8, "int OldBlock[4]") del_items(0x801221F8) SetType(0x801221F8, "unsigned char L5dungeon[80][80]") del_items(0x80121E88) SetType(0x80121E88, "struct ShadowStruct SPATS[37]") del_items(0x80121F8C) SetType(0x80121F8C, "unsigned char BSTYPES[206]") del_items(0x8012205C) SetType(0x8012205C, "unsigned char L5BTYPES[206]") del_items(0x8012212C) SetType(0x8012212C, "unsigned char STAIRSUP[34]") del_items(0x80122150) SetType(0x80122150, "unsigned char L5STAIRSUP[34]") del_items(0x80122174) SetType(0x80122174, "unsigned char STAIRSDOWN[26]") del_items(0x80122190) SetType(0x80122190, "unsigned char LAMPS[10]") del_items(0x8012219C) SetType(0x8012219C, "unsigned char PWATERIN[74]") del_items(0x80121A90) SetType(0x80121A90, "unsigned char L5ConvTbl[16]") del_items(0x8012A428) SetType(0x8012A428, "struct ROOMNODE RoomList[81]") del_items(0x8012AA7C) SetType(0x8012AA7C, "unsigned char predungeon[40][40]") del_items(0x80128BB8) SetType(0x80128BB8, "int Dir_Xadd[5]") del_items(0x80128BCC) SetType(0x80128BCC, "int Dir_Yadd[5]") del_items(0x80128BE0) SetType(0x80128BE0, "struct ShadowStruct SPATSL2[2]") del_items(0x80128BF0) SetType(0x80128BF0, "unsigned char BTYPESL2[161]") del_items(0x80128C94) SetType(0x80128C94, "unsigned char BSTYPESL2[161]") del_items(0x80128D38) SetType(0x80128D38, "unsigned char VARCH1[18]") del_items(0x80128D4C) SetType(0x80128D4C, "unsigned char VARCH2[18]") del_items(0x80128D60) SetType(0x80128D60, "unsigned char VARCH3[18]") del_items(0x80128D74) SetType(0x80128D74, "unsigned char VARCH4[18]") del_items(0x80128D88) SetType(0x80128D88, "unsigned char VARCH5[18]") del_items(0x80128D9C) SetType(0x80128D9C, "unsigned char VARCH6[18]") del_items(0x80128DB0) SetType(0x80128DB0, "unsigned char VARCH7[18]") del_items(0x80128DC4) SetType(0x80128DC4, "unsigned char VARCH8[18]") del_items(0x80128DD8) SetType(0x80128DD8, "unsigned char VARCH9[18]") del_items(0x80128DEC) SetType(0x80128DEC, "unsigned char VARCH10[18]") del_items(0x80128E00) SetType(0x80128E00, "unsigned char VARCH11[18]") del_items(0x80128E14) SetType(0x80128E14, "unsigned char VARCH12[18]") del_items(0x80128E28) SetType(0x80128E28, "unsigned char VARCH13[18]") del_items(0x80128E3C) SetType(0x80128E3C, "unsigned char VARCH14[18]") del_items(0x80128E50) SetType(0x80128E50, "unsigned char VARCH15[18]") del_items(0x80128E64) SetType(0x80128E64, "unsigned char VARCH16[18]") del_items(0x80128E78) SetType(0x80128E78, "unsigned char VARCH17[14]") del_items(0x80128E88) SetType(0x80128E88, "unsigned char VARCH18[14]") del_items(0x80128E98) SetType(0x80128E98, "unsigned char VARCH19[14]") del_items(0x80128EA8) SetType(0x80128EA8, "unsigned char VARCH20[14]") del_items(0x80128EB8) SetType(0x80128EB8, "unsigned char VARCH21[14]") del_items(0x80128EC8) SetType(0x80128EC8, "unsigned char VARCH22[14]") del_items(0x80128ED8) SetType(0x80128ED8, "unsigned char VARCH23[14]") del_items(0x80128EE8) SetType(0x80128EE8, "unsigned char VARCH24[14]") del_items(0x80128EF8) SetType(0x80128EF8, "unsigned char VARCH25[18]") del_items(0x80128F0C) SetType(0x80128F0C, "unsigned char VARCH26[18]") del_items(0x80128F20) SetType(0x80128F20, "unsigned char VARCH27[18]") del_items(0x80128F34) SetType(0x80128F34, "unsigned char VARCH28[18]") del_items(0x80128F48) SetType(0x80128F48, "unsigned char VARCH29[18]") del_items(0x80128F5C) SetType(0x80128F5C, "unsigned char VARCH30[18]") del_items(0x80128F70) SetType(0x80128F70, "unsigned char VARCH31[18]") del_items(0x80128F84) SetType(0x80128F84, "unsigned char VARCH32[18]") del_items(0x80128F98) SetType(0x80128F98, "unsigned char VARCH33[18]") del_items(0x80128FAC) SetType(0x80128FAC, "unsigned char VARCH34[18]") del_items(0x80128FC0) SetType(0x80128FC0, "unsigned char VARCH35[18]") del_items(0x80128FD4) SetType(0x80128FD4, "unsigned char VARCH36[18]") del_items(0x80128FE8) SetType(0x80128FE8, "unsigned char VARCH37[18]") del_items(0x80128FFC) SetType(0x80128FFC, "unsigned char VARCH38[18]") del_items(0x80129010) SetType(0x80129010, "unsigned char VARCH39[18]") del_items(0x80129024) SetType(0x80129024, "unsigned char VARCH40[18]") del_items(0x80129038) SetType(0x80129038, "unsigned char HARCH1[14]") del_items(0x80129048) SetType(0x80129048, "unsigned char HARCH2[14]") del_items(0x80129058) SetType(0x80129058, "unsigned char HARCH3[14]") del_items(0x80129068) SetType(0x80129068, "unsigned char HARCH4[14]") del_items(0x80129078) SetType(0x80129078, "unsigned char HARCH5[14]") del_items(0x80129088) SetType(0x80129088, "unsigned char HARCH6[14]") del_items(0x80129098) SetType(0x80129098, "unsigned char HARCH7[14]") del_items(0x801290A8) SetType(0x801290A8, "unsigned char HARCH8[14]") del_items(0x801290B8) SetType(0x801290B8, "unsigned char HARCH9[14]") del_items(0x801290C8) SetType(0x801290C8, "unsigned char HARCH10[14]") del_items(0x801290D8) SetType(0x801290D8, "unsigned char HARCH11[14]") del_items(0x801290E8) SetType(0x801290E8, "unsigned char HARCH12[14]") del_items(0x801290F8) SetType(0x801290F8, "unsigned char HARCH13[14]") del_items(0x80129108) SetType(0x80129108, "unsigned char HARCH14[14]") del_items(0x80129118) SetType(0x80129118, "unsigned char HARCH15[14]") del_items(0x80129128) SetType(0x80129128, "unsigned char HARCH16[14]") del_items(0x80129138) SetType(0x80129138, "unsigned char HARCH17[14]") del_items(0x80129148) SetType(0x80129148, "unsigned char HARCH18[14]") del_items(0x80129158) SetType(0x80129158, "unsigned char HARCH19[14]") del_items(0x80129168) SetType(0x80129168, "unsigned char HARCH20[14]") del_items(0x80129178) SetType(0x80129178, "unsigned char HARCH21[14]") del_items(0x80129188) SetType(0x80129188, "unsigned char HARCH22[14]") del_items(0x80129198) SetType(0x80129198, "unsigned char HARCH23[14]") del_items(0x801291A8) SetType(0x801291A8, "unsigned char HARCH24[14]") del_items(0x801291B8) SetType(0x801291B8, "unsigned char HARCH25[14]") del_items(0x801291C8) SetType(0x801291C8, "unsigned char HARCH26[14]") del_items(0x801291D8) SetType(0x801291D8, "unsigned char HARCH27[14]") del_items(0x801291E8) SetType(0x801291E8, "unsigned char HARCH28[14]") del_items(0x801291F8) SetType(0x801291F8, "unsigned char HARCH29[14]") del_items(0x80129208) SetType(0x80129208, "unsigned char HARCH30[14]") del_items(0x80129218) SetType(0x80129218, "unsigned char HARCH31[14]") del_items(0x80129228) SetType(0x80129228, "unsigned char HARCH32[14]") del_items(0x80129238) SetType(0x80129238, "unsigned char HARCH33[14]") del_items(0x80129248) SetType(0x80129248, "unsigned char HARCH34[14]") del_items(0x80129258) SetType(0x80129258, "unsigned char HARCH35[14]") del_items(0x80129268) SetType(0x80129268, "unsigned char HARCH36[14]") del_items(0x80129278) SetType(0x80129278, "unsigned char HARCH37[14]") del_items(0x80129288) SetType(0x80129288, "unsigned char HARCH38[14]") del_items(0x80129298) SetType(0x80129298, "unsigned char HARCH39[14]") del_items(0x801292A8) SetType(0x801292A8, "unsigned char HARCH40[14]") del_items(0x801292B8) SetType(0x801292B8, "unsigned char USTAIRS[34]") del_items(0x801292DC) SetType(0x801292DC, "unsigned char DSTAIRS[34]") del_items(0x80129300) SetType(0x80129300, "unsigned char WARPSTAIRS[34]") del_items(0x80129324) SetType(0x80129324, "unsigned char CRUSHCOL[20]") del_items(0x80129338) SetType(0x80129338, "unsigned char BIG1[10]") del_items(0x80129344) SetType(0x80129344, "unsigned char BIG2[10]") del_items(0x80129350) SetType(0x80129350, "unsigned char BIG5[10]") del_items(0x8012935C) SetType(0x8012935C, "unsigned char BIG8[10]") del_items(0x80129368) SetType(0x80129368, "unsigned char BIG9[10]") del_items(0x80129374) SetType(0x80129374, "unsigned char BIG10[10]") del_items(0x80129380) SetType(0x80129380, "unsigned char PANCREAS1[32]") del_items(0x801293A0) SetType(0x801293A0, "unsigned char PANCREAS2[32]") del_items(0x801293C0) SetType(0x801293C0, "unsigned char CTRDOOR1[20]") del_items(0x801293D4) SetType(0x801293D4, "unsigned char CTRDOOR2[20]") del_items(0x801293E8) SetType(0x801293E8, "unsigned char CTRDOOR3[20]") del_items(0x801293FC) SetType(0x801293FC, "unsigned char CTRDOOR4[20]") del_items(0x80129410) SetType(0x80129410, "unsigned char CTRDOOR5[20]") del_items(0x80129424) SetType(0x80129424, "unsigned char CTRDOOR6[20]") del_items(0x80129438) SetType(0x80129438, "unsigned char CTRDOOR7[20]") del_items(0x8012944C) SetType(0x8012944C, "unsigned char CTRDOOR8[20]") del_items(0x80129460) SetType(0x80129460, "int Patterns[10][100]") del_items(0x80130470) SetType(0x80130470, "unsigned char lockout[40][40]") del_items(0x801301D0) SetType(0x801301D0, "unsigned char L3ConvTbl[16]") del_items(0x801301E0) SetType(0x801301E0, "unsigned char L3UP[20]") del_items(0x801301F4) SetType(0x801301F4, "unsigned char L3DOWN[20]") del_items(0x80130208) SetType(0x80130208, "unsigned char L3HOLDWARP[20]") del_items(0x8013021C) SetType(0x8013021C, "unsigned char L3TITE1[34]") del_items(0x80130240) SetType(0x80130240, "unsigned char L3TITE2[34]") del_items(0x80130264) SetType(0x80130264, "unsigned char L3TITE3[34]") del_items(0x80130288) SetType(0x80130288, "unsigned char L3TITE6[42]") del_items(0x801302B4) SetType(0x801302B4, "unsigned char L3TITE7[42]") del_items(0x801302E0) SetType(0x801302E0, "unsigned char L3TITE8[20]") del_items(0x801302F4) SetType(0x801302F4, "unsigned char L3TITE9[20]") del_items(0x80130308) SetType(0x80130308, "unsigned char L3TITE10[20]") del_items(0x8013031C) SetType(0x8013031C, "unsigned char L3TITE11[20]") del_items(0x80130330) SetType(0x80130330, "unsigned char L3ISLE1[14]") del_items(0x80130340) SetType(0x80130340, "unsigned char L3ISLE2[14]") del_items(0x80130350) SetType(0x80130350, "unsigned char L3ISLE3[14]") del_items(0x80130360) SetType(0x80130360, "unsigned char L3ISLE4[14]") del_items(0x80130370) SetType(0x80130370, "unsigned char L3ISLE5[10]") del_items(0x8013037C) SetType(0x8013037C, "unsigned char L3ANVIL[244]") del_items(0x8013528C) SetType(0x8013528C, "unsigned char dung[20][20]") del_items(0x8013541C) SetType(0x8013541C, "unsigned char hallok[20]") del_items(0x80135430) SetType(0x80135430, "unsigned char L4dungeon[80][80]") del_items(0x80136D30) SetType(0x80136D30, "unsigned char L4ConvTbl[16]") del_items(0x80136D40) SetType(0x80136D40, "unsigned char L4USTAIRS[42]") del_items(0x80136D6C) SetType(0x80136D6C, "unsigned char L4TWARP[42]") del_items(0x80136D98) SetType(0x80136D98, "unsigned char L4DSTAIRS[52]") del_items(0x80136DCC) SetType(0x80136DCC, "unsigned char L4PENTA[52]") del_items(0x80136E00) SetType(0x80136E00, "unsigned char L4PENTA2[52]") del_items(0x80136E34) SetType(0x80136E34, "unsigned char L4BTYPES[140]")
del_items(2148670112) set_type(2148670112, 'struct THEME_LOC themeLoc[50]') del_items(2148671976) set_type(2148671976, 'int OldBlock[4]') del_items(2148671992) set_type(2148671992, 'unsigned char L5dungeon[80][80]') del_items(2148671112) set_type(2148671112, 'struct ShadowStruct SPATS[37]') del_items(2148671372) set_type(2148671372, 'unsigned char BSTYPES[206]') del_items(2148671580) set_type(2148671580, 'unsigned char L5BTYPES[206]') del_items(2148671788) set_type(2148671788, 'unsigned char STAIRSUP[34]') del_items(2148671824) set_type(2148671824, 'unsigned char L5STAIRSUP[34]') del_items(2148671860) set_type(2148671860, 'unsigned char STAIRSDOWN[26]') del_items(2148671888) set_type(2148671888, 'unsigned char LAMPS[10]') del_items(2148671900) set_type(2148671900, 'unsigned char PWATERIN[74]') del_items(2148670096) set_type(2148670096, 'unsigned char L5ConvTbl[16]') del_items(2148705320) set_type(2148705320, 'struct ROOMNODE RoomList[81]') del_items(2148706940) set_type(2148706940, 'unsigned char predungeon[40][40]') del_items(2148699064) set_type(2148699064, 'int Dir_Xadd[5]') del_items(2148699084) set_type(2148699084, 'int Dir_Yadd[5]') del_items(2148699104) set_type(2148699104, 'struct ShadowStruct SPATSL2[2]') del_items(2148699120) set_type(2148699120, 'unsigned char BTYPESL2[161]') del_items(2148699284) set_type(2148699284, 'unsigned char BSTYPESL2[161]') del_items(2148699448) set_type(2148699448, 'unsigned char VARCH1[18]') del_items(2148699468) set_type(2148699468, 'unsigned char VARCH2[18]') del_items(2148699488) set_type(2148699488, 'unsigned char VARCH3[18]') del_items(2148699508) set_type(2148699508, 'unsigned char VARCH4[18]') del_items(2148699528) set_type(2148699528, 'unsigned char VARCH5[18]') del_items(2148699548) set_type(2148699548, 'unsigned char VARCH6[18]') del_items(2148699568) set_type(2148699568, 'unsigned char VARCH7[18]') del_items(2148699588) set_type(2148699588, 'unsigned char VARCH8[18]') del_items(2148699608) set_type(2148699608, 'unsigned char VARCH9[18]') del_items(2148699628) set_type(2148699628, 'unsigned char VARCH10[18]') del_items(2148699648) set_type(2148699648, 'unsigned char VARCH11[18]') del_items(2148699668) set_type(2148699668, 'unsigned char VARCH12[18]') del_items(2148699688) set_type(2148699688, 'unsigned char VARCH13[18]') del_items(2148699708) set_type(2148699708, 'unsigned char VARCH14[18]') del_items(2148699728) set_type(2148699728, 'unsigned char VARCH15[18]') del_items(2148699748) set_type(2148699748, 'unsigned char VARCH16[18]') del_items(2148699768) set_type(2148699768, 'unsigned char VARCH17[14]') del_items(2148699784) set_type(2148699784, 'unsigned char VARCH18[14]') del_items(2148699800) set_type(2148699800, 'unsigned char VARCH19[14]') del_items(2148699816) set_type(2148699816, 'unsigned char VARCH20[14]') del_items(2148699832) set_type(2148699832, 'unsigned char VARCH21[14]') del_items(2148699848) set_type(2148699848, 'unsigned char VARCH22[14]') del_items(2148699864) set_type(2148699864, 'unsigned char VARCH23[14]') del_items(2148699880) set_type(2148699880, 'unsigned char VARCH24[14]') del_items(2148699896) set_type(2148699896, 'unsigned char VARCH25[18]') del_items(2148699916) set_type(2148699916, 'unsigned char VARCH26[18]') del_items(2148699936) set_type(2148699936, 'unsigned char VARCH27[18]') del_items(2148699956) set_type(2148699956, 'unsigned char VARCH28[18]') del_items(2148699976) set_type(2148699976, 'unsigned char VARCH29[18]') del_items(2148699996) set_type(2148699996, 'unsigned char VARCH30[18]') del_items(2148700016) set_type(2148700016, 'unsigned char VARCH31[18]') del_items(2148700036) set_type(2148700036, 'unsigned char VARCH32[18]') del_items(2148700056) set_type(2148700056, 'unsigned char VARCH33[18]') del_items(2148700076) set_type(2148700076, 'unsigned char VARCH34[18]') del_items(2148700096) set_type(2148700096, 'unsigned char VARCH35[18]') del_items(2148700116) set_type(2148700116, 'unsigned char VARCH36[18]') del_items(2148700136) set_type(2148700136, 'unsigned char VARCH37[18]') del_items(2148700156) set_type(2148700156, 'unsigned char VARCH38[18]') del_items(2148700176) set_type(2148700176, 'unsigned char VARCH39[18]') del_items(2148700196) set_type(2148700196, 'unsigned char VARCH40[18]') del_items(2148700216) set_type(2148700216, 'unsigned char HARCH1[14]') del_items(2148700232) set_type(2148700232, 'unsigned char HARCH2[14]') del_items(2148700248) set_type(2148700248, 'unsigned char HARCH3[14]') del_items(2148700264) set_type(2148700264, 'unsigned char HARCH4[14]') del_items(2148700280) set_type(2148700280, 'unsigned char HARCH5[14]') del_items(2148700296) set_type(2148700296, 'unsigned char HARCH6[14]') del_items(2148700312) set_type(2148700312, 'unsigned char HARCH7[14]') del_items(2148700328) set_type(2148700328, 'unsigned char HARCH8[14]') del_items(2148700344) set_type(2148700344, 'unsigned char HARCH9[14]') del_items(2148700360) set_type(2148700360, 'unsigned char HARCH10[14]') del_items(2148700376) set_type(2148700376, 'unsigned char HARCH11[14]') del_items(2148700392) set_type(2148700392, 'unsigned char HARCH12[14]') del_items(2148700408) set_type(2148700408, 'unsigned char HARCH13[14]') del_items(2148700424) set_type(2148700424, 'unsigned char HARCH14[14]') del_items(2148700440) set_type(2148700440, 'unsigned char HARCH15[14]') del_items(2148700456) set_type(2148700456, 'unsigned char HARCH16[14]') del_items(2148700472) set_type(2148700472, 'unsigned char HARCH17[14]') del_items(2148700488) set_type(2148700488, 'unsigned char HARCH18[14]') del_items(2148700504) set_type(2148700504, 'unsigned char HARCH19[14]') del_items(2148700520) set_type(2148700520, 'unsigned char HARCH20[14]') del_items(2148700536) set_type(2148700536, 'unsigned char HARCH21[14]') del_items(2148700552) set_type(2148700552, 'unsigned char HARCH22[14]') del_items(2148700568) set_type(2148700568, 'unsigned char HARCH23[14]') del_items(2148700584) set_type(2148700584, 'unsigned char HARCH24[14]') del_items(2148700600) set_type(2148700600, 'unsigned char HARCH25[14]') del_items(2148700616) set_type(2148700616, 'unsigned char HARCH26[14]') del_items(2148700632) set_type(2148700632, 'unsigned char HARCH27[14]') del_items(2148700648) set_type(2148700648, 'unsigned char HARCH28[14]') del_items(2148700664) set_type(2148700664, 'unsigned char HARCH29[14]') del_items(2148700680) set_type(2148700680, 'unsigned char HARCH30[14]') del_items(2148700696) set_type(2148700696, 'unsigned char HARCH31[14]') del_items(2148700712) set_type(2148700712, 'unsigned char HARCH32[14]') del_items(2148700728) set_type(2148700728, 'unsigned char HARCH33[14]') del_items(2148700744) set_type(2148700744, 'unsigned char HARCH34[14]') del_items(2148700760) set_type(2148700760, 'unsigned char HARCH35[14]') del_items(2148700776) set_type(2148700776, 'unsigned char HARCH36[14]') del_items(2148700792) set_type(2148700792, 'unsigned char HARCH37[14]') del_items(2148700808) set_type(2148700808, 'unsigned char HARCH38[14]') del_items(2148700824) set_type(2148700824, 'unsigned char HARCH39[14]') del_items(2148700840) set_type(2148700840, 'unsigned char HARCH40[14]') del_items(2148700856) set_type(2148700856, 'unsigned char USTAIRS[34]') del_items(2148700892) set_type(2148700892, 'unsigned char DSTAIRS[34]') del_items(2148700928) set_type(2148700928, 'unsigned char WARPSTAIRS[34]') del_items(2148700964) set_type(2148700964, 'unsigned char CRUSHCOL[20]') del_items(2148700984) set_type(2148700984, 'unsigned char BIG1[10]') del_items(2148700996) set_type(2148700996, 'unsigned char BIG2[10]') del_items(2148701008) set_type(2148701008, 'unsigned char BIG5[10]') del_items(2148701020) set_type(2148701020, 'unsigned char BIG8[10]') del_items(2148701032) set_type(2148701032, 'unsigned char BIG9[10]') del_items(2148701044) set_type(2148701044, 'unsigned char BIG10[10]') del_items(2148701056) set_type(2148701056, 'unsigned char PANCREAS1[32]') del_items(2148701088) set_type(2148701088, 'unsigned char PANCREAS2[32]') del_items(2148701120) set_type(2148701120, 'unsigned char CTRDOOR1[20]') del_items(2148701140) set_type(2148701140, 'unsigned char CTRDOOR2[20]') del_items(2148701160) set_type(2148701160, 'unsigned char CTRDOOR3[20]') del_items(2148701180) set_type(2148701180, 'unsigned char CTRDOOR4[20]') del_items(2148701200) set_type(2148701200, 'unsigned char CTRDOOR5[20]') del_items(2148701220) set_type(2148701220, 'unsigned char CTRDOOR6[20]') del_items(2148701240) set_type(2148701240, 'unsigned char CTRDOOR7[20]') del_items(2148701260) set_type(2148701260, 'unsigned char CTRDOOR8[20]') del_items(2148701280) set_type(2148701280, 'int Patterns[10][100]') del_items(2148729968) set_type(2148729968, 'unsigned char lockout[40][40]') del_items(2148729296) set_type(2148729296, 'unsigned char L3ConvTbl[16]') del_items(2148729312) set_type(2148729312, 'unsigned char L3UP[20]') del_items(2148729332) set_type(2148729332, 'unsigned char L3DOWN[20]') del_items(2148729352) set_type(2148729352, 'unsigned char L3HOLDWARP[20]') del_items(2148729372) set_type(2148729372, 'unsigned char L3TITE1[34]') del_items(2148729408) set_type(2148729408, 'unsigned char L3TITE2[34]') del_items(2148729444) set_type(2148729444, 'unsigned char L3TITE3[34]') del_items(2148729480) set_type(2148729480, 'unsigned char L3TITE6[42]') del_items(2148729524) set_type(2148729524, 'unsigned char L3TITE7[42]') del_items(2148729568) set_type(2148729568, 'unsigned char L3TITE8[20]') del_items(2148729588) set_type(2148729588, 'unsigned char L3TITE9[20]') del_items(2148729608) set_type(2148729608, 'unsigned char L3TITE10[20]') del_items(2148729628) set_type(2148729628, 'unsigned char L3TITE11[20]') del_items(2148729648) set_type(2148729648, 'unsigned char L3ISLE1[14]') del_items(2148729664) set_type(2148729664, 'unsigned char L3ISLE2[14]') del_items(2148729680) set_type(2148729680, 'unsigned char L3ISLE3[14]') del_items(2148729696) set_type(2148729696, 'unsigned char L3ISLE4[14]') del_items(2148729712) set_type(2148729712, 'unsigned char L3ISLE5[10]') del_items(2148729724) set_type(2148729724, 'unsigned char L3ANVIL[244]') del_items(2148749964) set_type(2148749964, 'unsigned char dung[20][20]') del_items(2148750364) set_type(2148750364, 'unsigned char hallok[20]') del_items(2148750384) set_type(2148750384, 'unsigned char L4dungeon[80][80]') del_items(2148756784) set_type(2148756784, 'unsigned char L4ConvTbl[16]') del_items(2148756800) set_type(2148756800, 'unsigned char L4USTAIRS[42]') del_items(2148756844) set_type(2148756844, 'unsigned char L4TWARP[42]') del_items(2148756888) set_type(2148756888, 'unsigned char L4DSTAIRS[52]') del_items(2148756940) set_type(2148756940, 'unsigned char L4PENTA[52]') del_items(2148756992) set_type(2148756992, 'unsigned char L4PENTA2[52]') del_items(2148757044) set_type(2148757044, 'unsigned char L4BTYPES[140]')
# BST implementation - insertion, finding, traversals, deletion # Author - rudrajit1729 # Utility class represents individual nodes in BST class Node: def __init__(self, key = 0): self.left = None self.right = None self.value = key # Utility function to insert node in BST def insert(root, value): # Base Case if root is None: return Node(value) # Value smaller than root, goes into left subtree if root.value > value: if root.left is None: root.left = Node(value) else: root.left = insert(root.left, value) # value larger than or equal to root, goes into right subtree else: if root.right is None: root.right = Node(value) else: root.right = insert(root.right, value) return root # Utility functions for traversals def inorder(root): if root: inorder(root.left) print(root.value, end = " ") inorder(root.right) def preorder(root): if root: print(root.value, end = " ") preorder(root.left) preorder(root.right) def postorder(root): if root: postorder(root.left) postorder(root.right) print(root.value, end = " ") # Getting the minimum value node of the tree def minValNode(root): curr = root while curr.left: curr = curr.left return curr # Utility function for deleting a node def delete(root, key): # Base Case if root is None: return root # If key is smaller than root value it's in left subtree if key < root.value: root.left = delete(root.left, key) # If key is larger than root value it's in right subtree elif key > root.value: root.right = delete(root.right, key) # Key matches the root value else: # Node with one or no child if root.left is None: temp = root.right root = None return temp elif root.right is None: temp = root.left root = None return temp # Node with two children: Replace by inorder successor(smallest in right subtree) temp = minValNode(root.right) # Value of the inorder successor stored in node root.value = temp.value # Delete the inorder successor from the right subtree root.right = delete(root.right, temp.value) return root # Utility function for finding a key in BST def findNode(root, key): if root is None: return False if key < root.value: return findNode(root.left, key) elif key > root.value: return findNode(root.right, key) else: return True # Demonstration method def main(): r''' 49 / \ 46 79 / \ / \ 43 45 64 83 ''' root = None root = insert(root, 49) root = insert(root, 46) root = insert(root, 43) root = insert(root, 45) root = insert(root, 79) root = insert(root, 64) root = insert(root, 83) print("In order traversal") inorder(root) root = delete(root, 43) print("\nIn order traversal") inorder(root) root = delete(root, 46) print("\nIn order traversal") inorder(root) root = delete(root, 79) print("\nIn order traversal") inorder(root) print("\nFinding a node demo:\n") print("Finding 83 : ", findNode(root, 83)) print("\nFinding 43 : ", findNode(root, 43)) if __name__ == "__main__": main()
class Node: def __init__(self, key=0): self.left = None self.right = None self.value = key def insert(root, value): if root is None: return node(value) if root.value > value: if root.left is None: root.left = node(value) else: root.left = insert(root.left, value) elif root.right is None: root.right = node(value) else: root.right = insert(root.right, value) return root def inorder(root): if root: inorder(root.left) print(root.value, end=' ') inorder(root.right) def preorder(root): if root: print(root.value, end=' ') preorder(root.left) preorder(root.right) def postorder(root): if root: postorder(root.left) postorder(root.right) print(root.value, end=' ') def min_val_node(root): curr = root while curr.left: curr = curr.left return curr def delete(root, key): if root is None: return root if key < root.value: root.left = delete(root.left, key) elif key > root.value: root.right = delete(root.right, key) else: if root.left is None: temp = root.right root = None return temp elif root.right is None: temp = root.left root = None return temp temp = min_val_node(root.right) root.value = temp.value root.right = delete(root.right, temp.value) return root def find_node(root, key): if root is None: return False if key < root.value: return find_node(root.left, key) elif key > root.value: return find_node(root.right, key) else: return True def main(): """ 49 / \\ 46 79 / \\ / \\ 43 45 64 83 """ root = None root = insert(root, 49) root = insert(root, 46) root = insert(root, 43) root = insert(root, 45) root = insert(root, 79) root = insert(root, 64) root = insert(root, 83) print('In order traversal') inorder(root) root = delete(root, 43) print('\nIn order traversal') inorder(root) root = delete(root, 46) print('\nIn order traversal') inorder(root) root = delete(root, 79) print('\nIn order traversal') inorder(root) print('\nFinding a node demo:\n') print('Finding 83 : ', find_node(root, 83)) print('\nFinding 43 : ', find_node(root, 43)) if __name__ == '__main__': main()
def lengthOfLongestSubstring(s: str) -> int: arr = list(s) res = len(arr) res_l = res if res == 0: return res res = 0 for i in range(res_l): tmp = [arr[i]] if i > (res_l/2) and i< res: return res for j in range(1,res_l - i): if arr[i+j] in tmp: if len(tmp) > res: res = len(tmp) break else: tmp.append(arr[i+j]) if len(tmp) > res: res = len(tmp) return res def lengthOfLongestSubstring2(s: str) -> int: l = len(s) if l < 2: return l start = 0 res = 1 tmp = {s[0]: 0} for i in range(1, l): # if char was seen before and if start of substring is before the char's previous index, # update substring to start from next char if s[i] in tmp and start <= tmp[s[i]]: start = tmp[s[i]] + 1 tmp[s[i]] = i res = max(res, i - start + 1) return res # print(lengthOfLongestSubstring('dvcdfc')) print(lengthOfLongestSubstring2('dvcdfc'))
def length_of_longest_substring(s: str) -> int: arr = list(s) res = len(arr) res_l = res if res == 0: return res res = 0 for i in range(res_l): tmp = [arr[i]] if i > res_l / 2 and i < res: return res for j in range(1, res_l - i): if arr[i + j] in tmp: if len(tmp) > res: res = len(tmp) break else: tmp.append(arr[i + j]) if len(tmp) > res: res = len(tmp) return res def length_of_longest_substring2(s: str) -> int: l = len(s) if l < 2: return l start = 0 res = 1 tmp = {s[0]: 0} for i in range(1, l): if s[i] in tmp and start <= tmp[s[i]]: start = tmp[s[i]] + 1 tmp[s[i]] = i res = max(res, i - start + 1) return res print(length_of_longest_substring2('dvcdfc'))
''' This file contains exceptions that can be thrown by the model, which will in turn be caught by the endpoint resource and converted into an appropriate status code ''' class ConsistencyError(Exception): """ConsistencyError is raised when there is disagreement between the metadata and storage layer on the existence of a block in a given vault""" def __init__(self, project_id, vault_id, block_id, msg=None): """Creates a new ConsistencyError Exception :param project_id: The project ID under which the vault is housed :param vault_id: The vault containing the block :param block_id: The ID of the block in question :param msg: additional contextual information """ self.project_id = project_id self.vault_id = vault_id self.block_id = block_id extended_msg = "[{0}/{1}] Block id: {2} Consistency Error "\ .format(project_id, vault_id, block_id) if msg: extended_msg = extended_msg + '[{0}]'.format(msg) Exception.__init__(self, extended_msg)
""" This file contains exceptions that can be thrown by the model, which will in turn be caught by the endpoint resource and converted into an appropriate status code """ class Consistencyerror(Exception): """ConsistencyError is raised when there is disagreement between the metadata and storage layer on the existence of a block in a given vault""" def __init__(self, project_id, vault_id, block_id, msg=None): """Creates a new ConsistencyError Exception :param project_id: The project ID under which the vault is housed :param vault_id: The vault containing the block :param block_id: The ID of the block in question :param msg: additional contextual information """ self.project_id = project_id self.vault_id = vault_id self.block_id = block_id extended_msg = '[{0}/{1}] Block id: {2} Consistency Error '.format(project_id, vault_id, block_id) if msg: extended_msg = extended_msg + '[{0}]'.format(msg) Exception.__init__(self, extended_msg)
annovar_to_ncbi = {} annovar_to_ncbi['EnterAnnovarGene']='CorrectNCBIGene' annovar_to_ncbi['KIAA1804']='MAP3K21' annovar_to_ncbi['FLJ33360']='LINC02145' annovar_to_ncbi['FLJ33581']='LINC01721' annovar_to_ncbi['FLJ46066']='LINC01994' annovar_to_ncbi['FLJ26245']='LINC02167' annovar_to_ncbi['LINC00273']='' # Reported as a cloning artifact thus removed from HGNC; annovar_to_ncbi['FLJ43879']='LINC01940' annovar_to_ncbi['ZNF664-FAM101A']='' # Read-through. annovar_to_ncbi['FLJ41200']='LINC01235' annovar_to_ncbi['FLJ41278']='LINC02389' annovar_to_ncbi['FLJ45079']='LINC01973' annovar_to_ncbi['FLJ26850']='LOC400710' annovar_to_ncbi['FLJ16171']='LINC01951' annovar_to_ncbi['FLJ10038']='GABPB1-IT1' annovar_to_ncbi['FLJ35934']='LINC02076' annovar_to_ncbi['MGC72080']='OR7E31P' annovar_to_ncbi['FLJ22763']='C3orf85' annovar_to_ncbi['FLJ36777']='LINC02447' annovar_to_ncbi['RBM48']='MGC16142' annovar_to_ncbi['FLJ41941']='LINC01634' annovar_to_ncbi['RP11-87M18.2']='PRPF31' annovar_to_ncbi['FLJ22763']='C3orf85' annovar_to_ncbi['FLJ31662']='LINC01761' annovar_to_ncbi['DKFZp434J0226']='LOC93429' annovar_to_ncbi['FLJ37505']='LINC02393'
annovar_to_ncbi = {} annovar_to_ncbi['EnterAnnovarGene'] = 'CorrectNCBIGene' annovar_to_ncbi['KIAA1804'] = 'MAP3K21' annovar_to_ncbi['FLJ33360'] = 'LINC02145' annovar_to_ncbi['FLJ33581'] = 'LINC01721' annovar_to_ncbi['FLJ46066'] = 'LINC01994' annovar_to_ncbi['FLJ26245'] = 'LINC02167' annovar_to_ncbi['LINC00273'] = '' annovar_to_ncbi['FLJ43879'] = 'LINC01940' annovar_to_ncbi['ZNF664-FAM101A'] = '' annovar_to_ncbi['FLJ41200'] = 'LINC01235' annovar_to_ncbi['FLJ41278'] = 'LINC02389' annovar_to_ncbi['FLJ45079'] = 'LINC01973' annovar_to_ncbi['FLJ26850'] = 'LOC400710' annovar_to_ncbi['FLJ16171'] = 'LINC01951' annovar_to_ncbi['FLJ10038'] = 'GABPB1-IT1' annovar_to_ncbi['FLJ35934'] = 'LINC02076' annovar_to_ncbi['MGC72080'] = 'OR7E31P' annovar_to_ncbi['FLJ22763'] = 'C3orf85' annovar_to_ncbi['FLJ36777'] = 'LINC02447' annovar_to_ncbi['RBM48'] = 'MGC16142' annovar_to_ncbi['FLJ41941'] = 'LINC01634' annovar_to_ncbi['RP11-87M18.2'] = 'PRPF31' annovar_to_ncbi['FLJ22763'] = 'C3orf85' annovar_to_ncbi['FLJ31662'] = 'LINC01761' annovar_to_ncbi['DKFZp434J0226'] = 'LOC93429' annovar_to_ncbi['FLJ37505'] = 'LINC02393'
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def get_height(self, root): node = root height = 0 while node is not None: height += 1 node = node.left return height def countNodes(self, root): if root is None: return 0 h = self.get_height(root) node = root count = 0 while node is not None: h -= 1 count += 1 if self.get_height(node.right) == h: count += (2 ** h) - 1 node = node.right else: count += (2 ** (h - 1)) - 1 node = node.left return count
class Solution: def get_height(self, root): node = root height = 0 while node is not None: height += 1 node = node.left return height def count_nodes(self, root): if root is None: return 0 h = self.get_height(root) node = root count = 0 while node is not None: h -= 1 count += 1 if self.get_height(node.right) == h: count += 2 ** h - 1 node = node.right else: count += 2 ** (h - 1) - 1 node = node.left return count
# Bubble Sort def bubbleSort(list): endIndex = len(list) - 1 while True: shouldContinue = False for i in range(endIndex): if list[i] > list[i + 1]: temp = list[i + 1] list[i + 1] = list[i] list[i] = temp shouldContinue = True if not shouldContinue: break endIndex -= 1 print("Finished bubble sorting list of {} elements.".format(len(list))) # Selection Sort def selectionSort(list): for i in range(len(list) - 1): lowestNumberIndex = i for j in range(i + 1, len(list)): if list[j] < list[lowestNumberIndex]: lowestNumberIndex = j if lowestNumberIndex != i: temp = list[i] list[i] = list[lowestNumberIndex] list[lowestNumberIndex] = temp print("Finished selection sorting list of {} elements.".format(len(list))) # Insertion Sort def insertionSort(list): for i in range(len(list) - 1): sortedEndIndex = i indexToInsert = i + 1 elementToInsert = list[sortedEndIndex + 1] for j in range(sortedEndIndex, -1, -1): if not elementToInsert < list[j]: break list[j + 1] = list[j] indexToInsert -= 1 list[indexToInsert] = elementToInsert print("Finished insertion sorting list of {} elements.".format(len(list))) # Quick Sort def quickSort(list, startIndex = 0, endIndex = None): if endIndex == None: endIndex = len(list) - 1 if startIndex < endIndex: pivot = partition(list, startIndex, endIndex) quickSort(list, startIndex, pivot - 1) quickSort(list, pivot + 1, endIndex) if startIndex == 0 and endIndex == len(list) - 1: print("Finished quick sorting list of {} elements.".format(len(list))) def partition(list, startIndex, endIndex): pivot = list[endIndex] lowerDivider = startIndex for higherDivider in range(startIndex, endIndex): if list[higherDivider] <= pivot: list[lowerDivider], list[higherDivider] = list[higherDivider], list[lowerDivider] lowerDivider += 1 list[lowerDivider], list[endIndex] = list[endIndex], list[lowerDivider] return lowerDivider # Merge Sort def mergeSort(list, startIndex = 0, endIndex = None): if endIndex == None: endIndex = len(list) - 1 if startIndex < endIndex: middleIndex = (startIndex + endIndex) // 2 mergeSort(list, startIndex, middleIndex) mergeSort(list, middleIndex + 1, endIndex) merge(list, startIndex, middleIndex, endIndex) if startIndex == 0 and endIndex == len(list) - 1: print("Finished merge sorting list of {} elements.".format(len(list))) def merge(list, startIndex, middleIndex, endIndex): left = list[startIndex:middleIndex + 1] right = list[middleIndex + 1:endIndex + 1] topLeft, topRight = 0, 0 for k in range(startIndex, endIndex + 1): if topLeft >= len(left): list[k] = right[topRight] topRight += 1 elif topRight >= len(right): list[k] = left[topLeft] topLeft += 1 elif left[topLeft] < right[topRight]: list[k] = left[topLeft] topLeft += 1 else: list[k] = right[topRight] topRight += 1 # Shell Sort def shellSort(list): gap = 1 listLength = len(list) while gap < listLength / 3: gap = gap * 3 + 1 while gap > 0: for i in range(gap, listLength): elementToInsert = list[i] indexToInsert = i for j in range(i, 0, -gap): if not elementToInsert < list[j - gap]: break list[j] = list[j - gap] indexToInsert -= gap list[indexToInsert] = elementToInsert gap = (gap - 1) // 3 print(f"Finished shell sorting list of {len(list)} elements.") # Counting Sort def countingSort(list): elementRange = range(len(list)) countArray = [0 for _ in elementRange] for element in list: countArray[element] += 1 for i in range(1, len(list)): countArray[i] += countArray[i-1] outputArray = [0 for _ in elementRange] for i in range(len(list)-1, -1, -1): elementPosition = countArray[list[i]] - 1 outputArray[elementPosition] = list[i] countArray[list[i]] -= 1 list[:] = outputArray[:] print(f"Finished counting sorting list of {len(list)} elements.") # Bucket Sort def bucketSort(intList, bucketNumber = None, isFirstCall = True): buckets = [] listLength = len(intList) if bucketNumber == None or not isFirstCall: bucketNumber = listLength bucketSize = (max(intList) - min(intList)) / bucketNumber if bucketSize == 0: return [buckets.append([]) for _ in range(bucketNumber)] minValue = min(intList) for i in range(listLength): targetIndex = int((intList[i] - minValue) / bucketSize) if targetIndex == bucketNumber: buckets[targetIndex - 1].append(intList[i]) else: buckets[targetIndex].append(intList[i]) # print(f"Buckets: {buckets}. Bucket Number = {bucketNumber}, Bucket Size = {bucketSize:.2f}") for bucket in buckets: if len(bucket) > 1: bucketSort(bucket, isFirstCall = False) intList.clear() for bucket in buckets: intList += bucket if isFirstCall: print(f"Finished bucket sorting list of {listLength} elements. Bucket Number: {bucketNumber}, Bucket Size: {bucketSize:.2f}") # Radix Sort def radixSort(list): maxItem = max(list) exp = 1 while maxItem // exp > 0: radixCountingSort(list, exp) exp *= 10 print(f"Finished radix sorting list of {len(list)} elements.") def radixCountingSort(list, exp): symbolNumber = 10 elementRange = range(len(list)) countArray = [0 for _ in range(symbolNumber)] for element in list: countArray[(element // exp) % 10] += 1 for i in range(1, symbolNumber): countArray[i] += countArray[i-1] outputArray = [0 for _ in elementRange] for i in range(len(list)-1, -1, -1): elementPosition = countArray[(list[i] // exp) % 10] - 1 outputArray[elementPosition] = list[i] countArray[(list[i] // exp) % 10] -= 1 list[:] = outputArray[:]
def bubble_sort(list): end_index = len(list) - 1 while True: should_continue = False for i in range(endIndex): if list[i] > list[i + 1]: temp = list[i + 1] list[i + 1] = list[i] list[i] = temp should_continue = True if not shouldContinue: break end_index -= 1 print('Finished bubble sorting list of {} elements.'.format(len(list))) def selection_sort(list): for i in range(len(list) - 1): lowest_number_index = i for j in range(i + 1, len(list)): if list[j] < list[lowestNumberIndex]: lowest_number_index = j if lowestNumberIndex != i: temp = list[i] list[i] = list[lowestNumberIndex] list[lowestNumberIndex] = temp print('Finished selection sorting list of {} elements.'.format(len(list))) def insertion_sort(list): for i in range(len(list) - 1): sorted_end_index = i index_to_insert = i + 1 element_to_insert = list[sortedEndIndex + 1] for j in range(sortedEndIndex, -1, -1): if not elementToInsert < list[j]: break list[j + 1] = list[j] index_to_insert -= 1 list[indexToInsert] = elementToInsert print('Finished insertion sorting list of {} elements.'.format(len(list))) def quick_sort(list, startIndex=0, endIndex=None): if endIndex == None: end_index = len(list) - 1 if startIndex < endIndex: pivot = partition(list, startIndex, endIndex) quick_sort(list, startIndex, pivot - 1) quick_sort(list, pivot + 1, endIndex) if startIndex == 0 and endIndex == len(list) - 1: print('Finished quick sorting list of {} elements.'.format(len(list))) def partition(list, startIndex, endIndex): pivot = list[endIndex] lower_divider = startIndex for higher_divider in range(startIndex, endIndex): if list[higherDivider] <= pivot: (list[lowerDivider], list[higherDivider]) = (list[higherDivider], list[lowerDivider]) lower_divider += 1 (list[lowerDivider], list[endIndex]) = (list[endIndex], list[lowerDivider]) return lowerDivider def merge_sort(list, startIndex=0, endIndex=None): if endIndex == None: end_index = len(list) - 1 if startIndex < endIndex: middle_index = (startIndex + endIndex) // 2 merge_sort(list, startIndex, middleIndex) merge_sort(list, middleIndex + 1, endIndex) merge(list, startIndex, middleIndex, endIndex) if startIndex == 0 and endIndex == len(list) - 1: print('Finished merge sorting list of {} elements.'.format(len(list))) def merge(list, startIndex, middleIndex, endIndex): left = list[startIndex:middleIndex + 1] right = list[middleIndex + 1:endIndex + 1] (top_left, top_right) = (0, 0) for k in range(startIndex, endIndex + 1): if topLeft >= len(left): list[k] = right[topRight] top_right += 1 elif topRight >= len(right): list[k] = left[topLeft] top_left += 1 elif left[topLeft] < right[topRight]: list[k] = left[topLeft] top_left += 1 else: list[k] = right[topRight] top_right += 1 def shell_sort(list): gap = 1 list_length = len(list) while gap < listLength / 3: gap = gap * 3 + 1 while gap > 0: for i in range(gap, listLength): element_to_insert = list[i] index_to_insert = i for j in range(i, 0, -gap): if not elementToInsert < list[j - gap]: break list[j] = list[j - gap] index_to_insert -= gap list[indexToInsert] = elementToInsert gap = (gap - 1) // 3 print(f'Finished shell sorting list of {len(list)} elements.') def counting_sort(list): element_range = range(len(list)) count_array = [0 for _ in elementRange] for element in list: countArray[element] += 1 for i in range(1, len(list)): countArray[i] += countArray[i - 1] output_array = [0 for _ in elementRange] for i in range(len(list) - 1, -1, -1): element_position = countArray[list[i]] - 1 outputArray[elementPosition] = list[i] countArray[list[i]] -= 1 list[:] = outputArray[:] print(f'Finished counting sorting list of {len(list)} elements.') def bucket_sort(intList, bucketNumber=None, isFirstCall=True): buckets = [] list_length = len(intList) if bucketNumber == None or not isFirstCall: bucket_number = listLength bucket_size = (max(intList) - min(intList)) / bucketNumber if bucketSize == 0: return [buckets.append([]) for _ in range(bucketNumber)] min_value = min(intList) for i in range(listLength): target_index = int((intList[i] - minValue) / bucketSize) if targetIndex == bucketNumber: buckets[targetIndex - 1].append(intList[i]) else: buckets[targetIndex].append(intList[i]) for bucket in buckets: if len(bucket) > 1: bucket_sort(bucket, isFirstCall=False) intList.clear() for bucket in buckets: int_list += bucket if isFirstCall: print(f'Finished bucket sorting list of {listLength} elements. Bucket Number: {bucketNumber}, Bucket Size: {bucketSize:.2f}') def radix_sort(list): max_item = max(list) exp = 1 while maxItem // exp > 0: radix_counting_sort(list, exp) exp *= 10 print(f'Finished radix sorting list of {len(list)} elements.') def radix_counting_sort(list, exp): symbol_number = 10 element_range = range(len(list)) count_array = [0 for _ in range(symbolNumber)] for element in list: countArray[element // exp % 10] += 1 for i in range(1, symbolNumber): countArray[i] += countArray[i - 1] output_array = [0 for _ in elementRange] for i in range(len(list) - 1, -1, -1): element_position = countArray[list[i] // exp % 10] - 1 outputArray[elementPosition] = list[i] countArray[list[i] // exp % 10] -= 1 list[:] = outputArray[:]
expected_output = { "vrf": { "default": { "address_family": { "ipv4": { "instance": { "1": { "router_id": "10.4.1.1", "base_topology_mtid": { "0": { "router_lsa_max_metric": {False: {}}, "start_time": "00:01:58.313", "time_elapsed": "00:54:43.859", } }, }, "65109": { "router_id": "10.0.187.164", "base_topology_mtid": { "0": { "router_lsa_max_metric": { True: { "advertise_lsa_metric": 16711680, "condition": "on startup for 5 seconds", "state": "inactive", "unset_reason": "timer expired, Originated for 5 seconds", "unset_time": "00:02:03.314", "unset_time_elapsed": "00:54:38.858", } }, "start_time": "00:01:58.314", "time_elapsed": "00:54:43.858", } }, }, } } } } } }
expected_output = {'vrf': {'default': {'address_family': {'ipv4': {'instance': {'1': {'router_id': '10.4.1.1', 'base_topology_mtid': {'0': {'router_lsa_max_metric': {False: {}}, 'start_time': '00:01:58.313', 'time_elapsed': '00:54:43.859'}}}, '65109': {'router_id': '10.0.187.164', 'base_topology_mtid': {'0': {'router_lsa_max_metric': {True: {'advertise_lsa_metric': 16711680, 'condition': 'on startup for 5 seconds', 'state': 'inactive', 'unset_reason': 'timer expired, Originated for 5 seconds', 'unset_time': '00:02:03.314', 'unset_time_elapsed': '00:54:38.858'}}, 'start_time': '00:01:58.314', 'time_elapsed': '00:54:43.858'}}}}}}}}}
#!/usr/bin/python # -*- coding: utf-8 -*- ## What is a Decorator ? # A decorator is the name used for a software design pattern. Decorators dynamically alter the # functionality of a function, method, or class without having to directly use subclasses or change # the source code of the function being decorated. def decorator_one(func): print("decorator_one ----1111----") def wrapper(*arg, **kwds): print("decorator_one ----3333----") func(*arg, **kwds) print("decorator_one ----2222----") return wrapper def decorator_two(func): print("decorator_two ----AAAA----") def wrapper(*arg, **kwds): print("decorator_two ----CCCC----") func(*arg, **kwds) print("decorator_two ----BBBB----") return wrapper @decorator_two @decorator_one def foo(): print("this is a demo string.") if __name__ == '__main__': foo() # decorator_one ----1111---- # decorator_one ----2222---- # decorator_two ----AAAA---- # decorator_two ----BBBB---- # decorator_two ----CCCC---- # decorator_one ----3333---- # this is a demo string. # References # https://wiki.python.org/moin/PythonDecorators # http://en.wikipedia.org/wiki/Decorator_pattern # http://www.python.org/peps/pep-0318.html # http://stackoverflow.com/questions/739654/how-to-make-a-chain-of-function-decorators # http://stackoverflow.com/questions/308999/what-does-functools-wraps-do
def decorator_one(func): print('decorator_one ----1111----') def wrapper(*arg, **kwds): print('decorator_one ----3333----') func(*arg, **kwds) print('decorator_one ----2222----') return wrapper def decorator_two(func): print('decorator_two ----AAAA----') def wrapper(*arg, **kwds): print('decorator_two ----CCCC----') func(*arg, **kwds) print('decorator_two ----BBBB----') return wrapper @decorator_two @decorator_one def foo(): print('this is a demo string.') if __name__ == '__main__': foo()
"""172. Factorial Trailing Zeroes""" class Solution(object): def trailingZeroes(self, n): """ :type n: int :rtype: int """ ## all trailing 0 is from factors 5 * 2. ## But sometimes one number may have several 5 factors, ## for example, 25 have two 5 factors, 125 have three 5 factors. ## In the n! operation, factors 2 is always ample. ## So we just count how many 5 factors in all number from 1 to n. return 0 if n == 0 else (n/5) + self.trailingZeroes(n/5)
"""172. Factorial Trailing Zeroes""" class Solution(object): def trailing_zeroes(self, n): """ :type n: int :rtype: int """ return 0 if n == 0 else n / 5 + self.trailingZeroes(n / 5)
URL_PATH = 'http://127.0.0.1:8000/api/v1/' TEST_USER_1 = { 'email': 'spongebob@krusty.com', 'password': 'SquarePants', } TEST_USER_2 = { 'email': 'patric_star@krusty.com', 'password': 'Star22', } TEST_URL_1 = { 'entered_url': 'https://github.com', 'short_code': 'spongecc', } TEST_URL_2 = { 'entered_url': 'https://instagram.com/', 'short_code': 'patriccc', } TEST_VIEW = { 'ip': '200.0.0.1', }
url_path = 'http://127.0.0.1:8000/api/v1/' test_user_1 = {'email': 'spongebob@krusty.com', 'password': 'SquarePants'} test_user_2 = {'email': 'patric_star@krusty.com', 'password': 'Star22'} test_url_1 = {'entered_url': 'https://github.com', 'short_code': 'spongecc'} test_url_2 = {'entered_url': 'https://instagram.com/', 'short_code': 'patriccc'} test_view = {'ip': '200.0.0.1'}
ls = [8, 4, 12, 2, 12, 4, 9, 1, 3, 5, 13, 3] ls1 = [] ls2 = [] ls.sort(reverse = -1) print(ls) total = sum(ls) half = total/2 print("Half = ", half) if total % 2 == 0: for i in ls: if sum(ls1) < sum(ls2): ls1.append(i) else: ls2.append(i) if sum(ls1) != sum(ls2): print("The entered elements aren't right!!") else: print("List 1 is:", ls1) print("The sum of list 1 is:", sum(ls1)) print("List 2 is:", ls2) print("The sum of list 2 is:", sum(ls2))
ls = [8, 4, 12, 2, 12, 4, 9, 1, 3, 5, 13, 3] ls1 = [] ls2 = [] ls.sort(reverse=-1) print(ls) total = sum(ls) half = total / 2 print('Half = ', half) if total % 2 == 0: for i in ls: if sum(ls1) < sum(ls2): ls1.append(i) else: ls2.append(i) if sum(ls1) != sum(ls2): print("The entered elements aren't right!!") else: print('List 1 is:', ls1) print('The sum of list 1 is:', sum(ls1)) print('List 2 is:', ls2) print('The sum of list 2 is:', sum(ls2))
""" Partition problem is to determine whether a given set can be partitioned into two subsets such that the sum of elements in both subsets is same. Examples: arr[] = {1, 5, 11, 5} Output: true The array can be partitioned as {1, 5, 5} and {11} """ def partition_equal_subset_sum(arr): target, n = sum(arr), len(arr) if target & 1: return False target >>= 1 t = [[True if x == 0 else False for x in range(target+1)] for x in range(n + 1)] for i in range(1, n+1): for j in range(1, target+1): if arr[i-1] <= j: t[i][j] = t[i-1][j] or t[i-1][j-arr[i-1]] else: t[i][j] = t[i-1][j] return t[-1][-1] if __name__ == "__main__": arr = [1,5,11,5] print(partition_equal_subset_sum(arr))
""" Partition problem is to determine whether a given set can be partitioned into two subsets such that the sum of elements in both subsets is same. Examples: arr[] = {1, 5, 11, 5} Output: true The array can be partitioned as {1, 5, 5} and {11} """ def partition_equal_subset_sum(arr): (target, n) = (sum(arr), len(arr)) if target & 1: return False target >>= 1 t = [[True if x == 0 else False for x in range(target + 1)] for x in range(n + 1)] for i in range(1, n + 1): for j in range(1, target + 1): if arr[i - 1] <= j: t[i][j] = t[i - 1][j] or t[i - 1][j - arr[i - 1]] else: t[i][j] = t[i - 1][j] return t[-1][-1] if __name__ == '__main__': arr = [1, 5, 11, 5] print(partition_equal_subset_sum(arr))
def count_substring(string, sub_string): count = 0 for i in range(len(string)): if string[i:].startswith(sub_string): count += 1 return count if __name__ == '__main__': print("Enter a string: ", end = ' ') string = input().strip() print("Enter substring: ", end = ' ') sub_string = input().strip() count = count_substring(string, sub_string) print("Count: ",count)
def count_substring(string, sub_string): count = 0 for i in range(len(string)): if string[i:].startswith(sub_string): count += 1 return count if __name__ == '__main__': print('Enter a string: ', end=' ') string = input().strip() print('Enter substring: ', end=' ') sub_string = input().strip() count = count_substring(string, sub_string) print('Count: ', count)
#! /usr/bin/env python3 def applyRule(rule, s): a, b = rule[0], rule[1] if a == s[0]: return b + s[1:] return None def applyRules(rules, ss): ret = [applyRule(r, s) for r in rules for s in ss] return [i for i in ret if i is not None] def search(rules, m): """ search from 'a', extend it one step with all rules, until to specific length """ ret = ['a'] for i in range(m - 1): ret = applyRules(rules, ret) return len(ret) def main(): line = [int(i) for i in input().split()] m, n = line[0], line[1] rules = [] for i in range(n): line = input().split() line.reverse() rules.append(line) l = search(rules, m) print(l) if __name__ == "__main__": main()
def apply_rule(rule, s): (a, b) = (rule[0], rule[1]) if a == s[0]: return b + s[1:] return None def apply_rules(rules, ss): ret = [apply_rule(r, s) for r in rules for s in ss] return [i for i in ret if i is not None] def search(rules, m): """ search from 'a', extend it one step with all rules, until to specific length """ ret = ['a'] for i in range(m - 1): ret = apply_rules(rules, ret) return len(ret) def main(): line = [int(i) for i in input().split()] (m, n) = (line[0], line[1]) rules = [] for i in range(n): line = input().split() line.reverse() rules.append(line) l = search(rules, m) print(l) if __name__ == '__main__': main()
def f(): try: f() finally: g()
def f(): try: f() finally: g()
# there is no guarantee of find the secret word in 10 guesses # for example ['aaaaaa', 'bbbbbb', ..., 'zzzzzz'] # the strategy really depends on the distribution of chars class Solution(object): def findSecretWord(self, wordlist, master): """ :type wordlist: List[Str] :type master: Master :rtype: None """ while True: picked = self.most_connected_word(wordlist) n = master.guess(picked) if n == len(picked): return # the secret word must be among the words that also share n chars with picked wordlist = [w for w in wordlist if self.similarity(w, picked) == n] # for each word, count how many other words have 0 similarity def most_connected_word(self, wordlist): counter = {} for w1 in wordlist: counter[w1] = counter.get(w1, 0) for w2 in wordlist: if self.similarity(w1, w2) > 0: counter[w1] += 1 return max(wordlist, key=lambda w: counter[w]) def similarity(self, w1, w2): sim = 0 for i in range(min(len(w1), len(w2))): if w1[i] == w2[i]: sim += 1 return sim
class Solution(object): def find_secret_word(self, wordlist, master): """ :type wordlist: List[Str] :type master: Master :rtype: None """ while True: picked = self.most_connected_word(wordlist) n = master.guess(picked) if n == len(picked): return wordlist = [w for w in wordlist if self.similarity(w, picked) == n] def most_connected_word(self, wordlist): counter = {} for w1 in wordlist: counter[w1] = counter.get(w1, 0) for w2 in wordlist: if self.similarity(w1, w2) > 0: counter[w1] += 1 return max(wordlist, key=lambda w: counter[w]) def similarity(self, w1, w2): sim = 0 for i in range(min(len(w1), len(w2))): if w1[i] == w2[i]: sim += 1 return sim
#-*- coding: utf-8 -*- config = { "code": { "exit": { "0001": "NETWORK \ubb38\uc7ac\ub85c \uc778\ud55c \uac15\uc81c\uc885\ub8cc" } }, "db": { "redis_db_bta": 1, "redis_db_common": 0, "redis_host": "gAAAAABYuEUdvRKvSL8P8LANdYsHkNPsN1VBL1P-jBD7XAf6Sr_Pd30pU-3jKJ4G9ue8Ywh6J3HmaUUx2ur-CWzKGx_jG_lcrMNLqM4ohHz498XAdQI_0vM=", "redis_password": "gAAAAABYuEUdHONVc2t3M1HV2Zm4ZYnwIhaHJlwbPINVxVu1zPda5QxH7AJSCFf6ZUljg6NFH1FQiUg65hU-XUHVURl8_6tBKg==", "redis_port": "gAAAAABYuEUdMrgDPkKbscjEgstJBchHnQHXAerbw_f4ZHGfXIydZ4YRAYAjv4rubG7NeQ3TfGaB_SRtau-sNUPEHonfzjnb-A==" }, "default": { "mode": "prod" }, "log": { "encoding": "utf-8", "file_size": 128, "formatter": "[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s", "path": "/home/pi/bta/logs", "prefix": "%Y/%m/%d", "suffix": "%H%M%S" } }
config = {'code': {'exit': {'0001': 'NETWORK 문재로 인한 강제종료'}}, 'db': {'redis_db_bta': 1, 'redis_db_common': 0, 'redis_host': 'gAAAAABYuEUdvRKvSL8P8LANdYsHkNPsN1VBL1P-jBD7XAf6Sr_Pd30pU-3jKJ4G9ue8Ywh6J3HmaUUx2ur-CWzKGx_jG_lcrMNLqM4ohHz498XAdQI_0vM=', 'redis_password': 'gAAAAABYuEUdHONVc2t3M1HV2Zm4ZYnwIhaHJlwbPINVxVu1zPda5QxH7AJSCFf6ZUljg6NFH1FQiUg65hU-XUHVURl8_6tBKg==', 'redis_port': 'gAAAAABYuEUdMrgDPkKbscjEgstJBchHnQHXAerbw_f4ZHGfXIydZ4YRAYAjv4rubG7NeQ3TfGaB_SRtau-sNUPEHonfzjnb-A=='}, 'default': {'mode': 'prod'}, 'log': {'encoding': 'utf-8', 'file_size': 128, 'formatter': '[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s', 'path': '/home/pi/bta/logs', 'prefix': '%Y/%m/%d', 'suffix': '%H%M%S'}}
a = 0 b = 1 while a != b: senha = 2002 x = int(input()) if x == senha: print("Acesso Permitido") break else: print("Senha Invalida")
a = 0 b = 1 while a != b: senha = 2002 x = int(input()) if x == senha: print('Acesso Permitido') break else: print('Senha Invalida')
# # PySNMP MIB module Unisphere-Data-SLEP-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Unisphere-Data-SLEP-MIB # Produced by pysmi-0.3.4 at Wed May 1 15:32:48 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint") InterfaceIndexOrZero, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero", "InterfaceIndex") NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup") iso, TimeTicks, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Integer32, Bits, Counter64, IpAddress, MibIdentifier, Counter32, ModuleIdentity, Gauge32, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "TimeTicks", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Integer32", "Bits", "Counter64", "IpAddress", "MibIdentifier", "Counter32", "ModuleIdentity", "Gauge32", "ObjectIdentity") TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "RowStatus", "DisplayString") usDataMibs, = mibBuilder.importSymbols("Unisphere-Data-MIBs", "usDataMibs") UsdEnable, UsdNextIfIndex = mibBuilder.importSymbols("Unisphere-Data-TC", "UsdEnable", "UsdNextIfIndex") usdSlepMIBS = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15)) usdSlepMIBS.setRevisions(('2001-04-03 19:10', '2000-01-03 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: usdSlepMIBS.setRevisionsDescriptions(('Add usdSledDownWhenLooped attribute.', 'Initial version of this MIB module.',)) if mibBuilder.loadTexts: usdSlepMIBS.setLastUpdated('200104031910Z') if mibBuilder.loadTexts: usdSlepMIBS.setOrganization('Unisphere Networks, Inc.') if mibBuilder.loadTexts: usdSlepMIBS.setContactInfo(' Unisphere Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886 USA Tel: +1 978 589 5800 E-mail: mib@UnisphereNetworks.com') if mibBuilder.loadTexts: usdSlepMIBS.setDescription('The Serial Line Encapulation Protocol (SLEP) MIB for the Unisphere Networks Inc. enterprise.') usdSlepObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1)) usdSlepIfLayer = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1)) usdSlepNextIfIndex = MibScalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 1), UsdNextIfIndex()).setMaxAccess("readonly") if mibBuilder.loadTexts: usdSlepNextIfIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepNextIfIndex.setDescription('Coordinate ifIndex value allocation for entries in usdSlepIfTable. A GET of this object returns the next available ifIndex value to be used to create an entry in the associated interface table; or zero, if no valid ifIndex value is available. This object also returns a value of zero when it is the lexicographic successor of a varbind presented in an SNMP GETNEXT or GETBULK request, for which circumstance it is assumed that ifIndex allocation is unintended. Successive GETs will typically return different values, thus avoiding collisions among cooperating management clients seeking to create table entries simultaneously.') usdSlepIfTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2), ) if mibBuilder.loadTexts: usdSlepIfTable.setStatus('current') if mibBuilder.loadTexts: usdSlepIfTable.setDescription('The parameters for the SLEP service on this interface.') usdSlepIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1), ).setIndexNames((0, "Unisphere-Data-SLEP-MIB", "usdSlepIfIndex")) if mibBuilder.loadTexts: usdSlepIfEntry.setStatus('current') if mibBuilder.loadTexts: usdSlepIfEntry.setDescription('The Parameters for a particular SLEP interface. Creating/deleting entries in this table causes corresponding entries for be created/deleted in ifTable/ifXTable/usdIfTable') usdSlepIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 1), InterfaceIndex()) if mibBuilder.loadTexts: usdSlepIfIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepIfIndex.setDescription('The ifIndex value of the corresponding ifEntry.') usdSlepKeepAliveTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 6553)).clone(10)).setUnits('seconds').setMaxAccess("readcreate") if mibBuilder.loadTexts: usdSlepKeepAliveTimer.setStatus('current') if mibBuilder.loadTexts: usdSlepKeepAliveTimer.setDescription('The interface keep alive timer for this entry. The time in seconds that this entity will wait for sending a keep-alive-message to the remote SLEP entity, and the time in seconds that this entity will wait for the reception of a keep-alive-message from the remote SLEP entity.') usdSlepIfLowerIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 3), InterfaceIndexOrZero()).setMaxAccess("readcreate") if mibBuilder.loadTexts: usdSlepIfLowerIfIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepIfLowerIfIndex.setDescription('The ifIndex of an interface over which this SLEP interface is to be layered. A value of zero indicates no layering. An implementation may choose to require that a nonzero value be configured at entry creation.') usdSlepIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: usdSlepIfRowStatus.setStatus('current') if mibBuilder.loadTexts: usdSlepIfRowStatus.setDescription('Controls creation/deletion of entries in this table according to the RowStatus textual convention, constrained to support the following values only: createAndGo destroy To create an entry in this table, the following entry objects MUST be explicitly configured: usdSlepIfRowStatus usdSlepIfLowerIfIndex In addition, when creating an entry the following conditions must hold: A value for usdSlepIfIndex must have been determined previously, by reading usdSlepNextIfIndex. The interface identified by usdSlepIfLowerIfIndex must exist, and must be an interface type that permits layering of SLEP Interface above it. A corresponding entry in ifTable/ifXTable/usdIfTable is created/destroyed as a result of creating/destroying an entry in this table. ') usdSlepDownWhenLooped = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 5), UsdEnable().clone('disable')).setMaxAccess("readcreate") if mibBuilder.loadTexts: usdSlepDownWhenLooped.setStatus('current') if mibBuilder.loadTexts: usdSlepDownWhenLooped.setDescription('The down-when-looped control for this entry. This attribute determines if loop detection is enabled for the interface. If set to disable, loop detection is disabled for the interface.') usdSlepIfStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3), ) if mibBuilder.loadTexts: usdSlepIfStatisticsTable.setStatus('current') if mibBuilder.loadTexts: usdSlepIfStatisticsTable.setDescription('The statistics for the SLEP service on this interface.') usdSlepIfStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1), ).setIndexNames((0, "Unisphere-Data-SLEP-MIB", "usdSlepIfStatsIndex")) if mibBuilder.loadTexts: usdSlepIfStatisticsEntry.setStatus('current') if mibBuilder.loadTexts: usdSlepIfStatisticsEntry.setDescription('The statistics for a particular SLEP interface.') usdSlepIfStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 1), InterfaceIndex()) if mibBuilder.loadTexts: usdSlepIfStatsIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepIfStatsIndex.setDescription('The ifIndex value of that identifies this entry.') usdSlepKeepAliveFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 2), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: usdSlepKeepAliveFailures.setStatus('current') if mibBuilder.loadTexts: usdSlepKeepAliveFailures.setDescription('The number of link drops due to keep-alive failures.') usdSlepLinkStatusTooLongPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: usdSlepLinkStatusTooLongPackets.setStatus('current') if mibBuilder.loadTexts: usdSlepLinkStatusTooLongPackets.setDescription('The number of packets that were too long for this SLEP entity.') usdSlepLinkStatusBadFCSs = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: usdSlepLinkStatusBadFCSs.setStatus('current') if mibBuilder.loadTexts: usdSlepLinkStatusBadFCSs.setDescription("The number of FCS's errors for this SLEP entity.") usdSlepConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4)) usdSlepCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 1)) usdSlepGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 2)) usdSlepCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 1, 1)).setObjects(("Unisphere-Data-SLEP-MIB", "usdSlepGroup")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usdSlepCompliance = usdSlepCompliance.setStatus('obsolete') if mibBuilder.loadTexts: usdSlepCompliance.setDescription('Obsolete compliance statement for entities which implement the Unisphere SLEP MIB. This compliance statement became obsolete when the usdSlepDownWhenLooped object was added.') usdSlepCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 1, 2)).setObjects(("Unisphere-Data-SLEP-MIB", "usdSlepGroup2")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usdSlepCompliance2 = usdSlepCompliance2.setStatus('current') if mibBuilder.loadTexts: usdSlepCompliance2.setDescription('The compliance statement for entities which implement the Unisphere SLEP MIB.') usdSlepGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 2, 1)).setObjects(("Unisphere-Data-SLEP-MIB", "usdSlepNextIfIndex"), ("Unisphere-Data-SLEP-MIB", "usdSlepKeepAliveTimer"), ("Unisphere-Data-SLEP-MIB", "usdSlepIfLowerIfIndex"), ("Unisphere-Data-SLEP-MIB", "usdSlepIfRowStatus"), ("Unisphere-Data-SLEP-MIB", "usdSlepKeepAliveFailures"), ("Unisphere-Data-SLEP-MIB", "usdSlepLinkStatusTooLongPackets"), ("Unisphere-Data-SLEP-MIB", "usdSlepLinkStatusBadFCSs")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usdSlepGroup = usdSlepGroup.setStatus('obsolete') if mibBuilder.loadTexts: usdSlepGroup.setDescription('Obsolete collection of objects providing management of SLEP interfaces in a Unisphere product. This group became obsolete when the usdSlepDownWhenLooped object was added.') usdSlepGroup2 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 2, 2)).setObjects(("Unisphere-Data-SLEP-MIB", "usdSlepNextIfIndex"), ("Unisphere-Data-SLEP-MIB", "usdSlepKeepAliveTimer"), ("Unisphere-Data-SLEP-MIB", "usdSlepIfLowerIfIndex"), ("Unisphere-Data-SLEP-MIB", "usdSlepIfRowStatus"), ("Unisphere-Data-SLEP-MIB", "usdSlepDownWhenLooped"), ("Unisphere-Data-SLEP-MIB", "usdSlepKeepAliveFailures"), ("Unisphere-Data-SLEP-MIB", "usdSlepLinkStatusTooLongPackets"), ("Unisphere-Data-SLEP-MIB", "usdSlepLinkStatusBadFCSs")) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usdSlepGroup2 = usdSlepGroup2.setStatus('current') if mibBuilder.loadTexts: usdSlepGroup2.setDescription('A collection of objects providing management of SLEP interfaces in a Unisphere product.') mibBuilder.exportSymbols("Unisphere-Data-SLEP-MIB", usdSlepCompliances=usdSlepCompliances, usdSlepGroup=usdSlepGroup, usdSlepGroups=usdSlepGroups, usdSlepIfStatsIndex=usdSlepIfStatsIndex, usdSlepIfEntry=usdSlepIfEntry, usdSlepObjects=usdSlepObjects, usdSlepIfStatisticsEntry=usdSlepIfStatisticsEntry, usdSlepKeepAliveTimer=usdSlepKeepAliveTimer, usdSlepCompliance2=usdSlepCompliance2, usdSlepLinkStatusBadFCSs=usdSlepLinkStatusBadFCSs, usdSlepIfLowerIfIndex=usdSlepIfLowerIfIndex, usdSlepIfLayer=usdSlepIfLayer, usdSlepCompliance=usdSlepCompliance, usdSlepConformance=usdSlepConformance, usdSlepMIBS=usdSlepMIBS, usdSlepIfRowStatus=usdSlepIfRowStatus, usdSlepLinkStatusTooLongPackets=usdSlepLinkStatusTooLongPackets, usdSlepNextIfIndex=usdSlepNextIfIndex, PYSNMP_MODULE_ID=usdSlepMIBS, usdSlepIfIndex=usdSlepIfIndex, usdSlepKeepAliveFailures=usdSlepKeepAliveFailures, usdSlepGroup2=usdSlepGroup2, usdSlepDownWhenLooped=usdSlepDownWhenLooped, usdSlepIfStatisticsTable=usdSlepIfStatisticsTable, usdSlepIfTable=usdSlepIfTable)
(octet_string, object_identifier, integer) = mibBuilder.importSymbols('ASN1', 'OctetString', 'ObjectIdentifier', 'Integer') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (constraints_union, constraints_intersection, value_range_constraint, single_value_constraint, value_size_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsUnion', 'ConstraintsIntersection', 'ValueRangeConstraint', 'SingleValueConstraint', 'ValueSizeConstraint') (interface_index_or_zero, interface_index) = mibBuilder.importSymbols('IF-MIB', 'InterfaceIndexOrZero', 'InterfaceIndex') (notification_group, module_compliance, object_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance', 'ObjectGroup') (iso, time_ticks, notification_type, mib_scalar, mib_table, mib_table_row, mib_table_column, unsigned32, integer32, bits, counter64, ip_address, mib_identifier, counter32, module_identity, gauge32, object_identity) = mibBuilder.importSymbols('SNMPv2-SMI', 'iso', 'TimeTicks', 'NotificationType', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'Unsigned32', 'Integer32', 'Bits', 'Counter64', 'IpAddress', 'MibIdentifier', 'Counter32', 'ModuleIdentity', 'Gauge32', 'ObjectIdentity') (textual_convention, row_status, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'RowStatus', 'DisplayString') (us_data_mibs,) = mibBuilder.importSymbols('Unisphere-Data-MIBs', 'usDataMibs') (usd_enable, usd_next_if_index) = mibBuilder.importSymbols('Unisphere-Data-TC', 'UsdEnable', 'UsdNextIfIndex') usd_slep_mibs = module_identity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15)) usdSlepMIBS.setRevisions(('2001-04-03 19:10', '2000-01-03 00:00')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: usdSlepMIBS.setRevisionsDescriptions(('Add usdSledDownWhenLooped attribute.', 'Initial version of this MIB module.')) if mibBuilder.loadTexts: usdSlepMIBS.setLastUpdated('200104031910Z') if mibBuilder.loadTexts: usdSlepMIBS.setOrganization('Unisphere Networks, Inc.') if mibBuilder.loadTexts: usdSlepMIBS.setContactInfo(' Unisphere Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886 USA Tel: +1 978 589 5800 E-mail: mib@UnisphereNetworks.com') if mibBuilder.loadTexts: usdSlepMIBS.setDescription('The Serial Line Encapulation Protocol (SLEP) MIB for the Unisphere Networks Inc. enterprise.') usd_slep_objects = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1)) usd_slep_if_layer = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1)) usd_slep_next_if_index = mib_scalar((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 1), usd_next_if_index()).setMaxAccess('readonly') if mibBuilder.loadTexts: usdSlepNextIfIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepNextIfIndex.setDescription('Coordinate ifIndex value allocation for entries in usdSlepIfTable. A GET of this object returns the next available ifIndex value to be used to create an entry in the associated interface table; or zero, if no valid ifIndex value is available. This object also returns a value of zero when it is the lexicographic successor of a varbind presented in an SNMP GETNEXT or GETBULK request, for which circumstance it is assumed that ifIndex allocation is unintended. Successive GETs will typically return different values, thus avoiding collisions among cooperating management clients seeking to create table entries simultaneously.') usd_slep_if_table = mib_table((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2)) if mibBuilder.loadTexts: usdSlepIfTable.setStatus('current') if mibBuilder.loadTexts: usdSlepIfTable.setDescription('The parameters for the SLEP service on this interface.') usd_slep_if_entry = mib_table_row((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1)).setIndexNames((0, 'Unisphere-Data-SLEP-MIB', 'usdSlepIfIndex')) if mibBuilder.loadTexts: usdSlepIfEntry.setStatus('current') if mibBuilder.loadTexts: usdSlepIfEntry.setDescription('The Parameters for a particular SLEP interface. Creating/deleting entries in this table causes corresponding entries for be created/deleted in ifTable/ifXTable/usdIfTable') usd_slep_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 1), interface_index()) if mibBuilder.loadTexts: usdSlepIfIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepIfIndex.setDescription('The ifIndex value of the corresponding ifEntry.') usd_slep_keep_alive_timer = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 2), integer32().subtype(subtypeSpec=value_range_constraint(0, 6553)).clone(10)).setUnits('seconds').setMaxAccess('readcreate') if mibBuilder.loadTexts: usdSlepKeepAliveTimer.setStatus('current') if mibBuilder.loadTexts: usdSlepKeepAliveTimer.setDescription('The interface keep alive timer for this entry. The time in seconds that this entity will wait for sending a keep-alive-message to the remote SLEP entity, and the time in seconds that this entity will wait for the reception of a keep-alive-message from the remote SLEP entity.') usd_slep_if_lower_if_index = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 3), interface_index_or_zero()).setMaxAccess('readcreate') if mibBuilder.loadTexts: usdSlepIfLowerIfIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepIfLowerIfIndex.setDescription('The ifIndex of an interface over which this SLEP interface is to be layered. A value of zero indicates no layering. An implementation may choose to require that a nonzero value be configured at entry creation.') usd_slep_if_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 4), row_status()).setMaxAccess('readcreate') if mibBuilder.loadTexts: usdSlepIfRowStatus.setStatus('current') if mibBuilder.loadTexts: usdSlepIfRowStatus.setDescription('Controls creation/deletion of entries in this table according to the RowStatus textual convention, constrained to support the following values only: createAndGo destroy To create an entry in this table, the following entry objects MUST be explicitly configured: usdSlepIfRowStatus usdSlepIfLowerIfIndex In addition, when creating an entry the following conditions must hold: A value for usdSlepIfIndex must have been determined previously, by reading usdSlepNextIfIndex. The interface identified by usdSlepIfLowerIfIndex must exist, and must be an interface type that permits layering of SLEP Interface above it. A corresponding entry in ifTable/ifXTable/usdIfTable is created/destroyed as a result of creating/destroying an entry in this table. ') usd_slep_down_when_looped = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 2, 1, 5), usd_enable().clone('disable')).setMaxAccess('readcreate') if mibBuilder.loadTexts: usdSlepDownWhenLooped.setStatus('current') if mibBuilder.loadTexts: usdSlepDownWhenLooped.setDescription('The down-when-looped control for this entry. This attribute determines if loop detection is enabled for the interface. If set to disable, loop detection is disabled for the interface.') usd_slep_if_statistics_table = mib_table((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3)) if mibBuilder.loadTexts: usdSlepIfStatisticsTable.setStatus('current') if mibBuilder.loadTexts: usdSlepIfStatisticsTable.setDescription('The statistics for the SLEP service on this interface.') usd_slep_if_statistics_entry = mib_table_row((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1)).setIndexNames((0, 'Unisphere-Data-SLEP-MIB', 'usdSlepIfStatsIndex')) if mibBuilder.loadTexts: usdSlepIfStatisticsEntry.setStatus('current') if mibBuilder.loadTexts: usdSlepIfStatisticsEntry.setDescription('The statistics for a particular SLEP interface.') usd_slep_if_stats_index = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 1), interface_index()) if mibBuilder.loadTexts: usdSlepIfStatsIndex.setStatus('current') if mibBuilder.loadTexts: usdSlepIfStatsIndex.setDescription('The ifIndex value of that identifies this entry.') usd_slep_keep_alive_failures = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 2), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: usdSlepKeepAliveFailures.setStatus('current') if mibBuilder.loadTexts: usdSlepKeepAliveFailures.setDescription('The number of link drops due to keep-alive failures.') usd_slep_link_status_too_long_packets = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 3), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: usdSlepLinkStatusTooLongPackets.setStatus('current') if mibBuilder.loadTexts: usdSlepLinkStatusTooLongPackets.setDescription('The number of packets that were too long for this SLEP entity.') usd_slep_link_status_bad_fc_ss = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 1, 1, 3, 1, 4), counter32()).setMaxAccess('readonly') if mibBuilder.loadTexts: usdSlepLinkStatusBadFCSs.setStatus('current') if mibBuilder.loadTexts: usdSlepLinkStatusBadFCSs.setDescription("The number of FCS's errors for this SLEP entity.") usd_slep_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4)) usd_slep_compliances = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 1)) usd_slep_groups = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 2)) usd_slep_compliance = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 1, 1)).setObjects(('Unisphere-Data-SLEP-MIB', 'usdSlepGroup')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usd_slep_compliance = usdSlepCompliance.setStatus('obsolete') if mibBuilder.loadTexts: usdSlepCompliance.setDescription('Obsolete compliance statement for entities which implement the Unisphere SLEP MIB. This compliance statement became obsolete when the usdSlepDownWhenLooped object was added.') usd_slep_compliance2 = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 1, 2)).setObjects(('Unisphere-Data-SLEP-MIB', 'usdSlepGroup2')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usd_slep_compliance2 = usdSlepCompliance2.setStatus('current') if mibBuilder.loadTexts: usdSlepCompliance2.setDescription('The compliance statement for entities which implement the Unisphere SLEP MIB.') usd_slep_group = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 2, 1)).setObjects(('Unisphere-Data-SLEP-MIB', 'usdSlepNextIfIndex'), ('Unisphere-Data-SLEP-MIB', 'usdSlepKeepAliveTimer'), ('Unisphere-Data-SLEP-MIB', 'usdSlepIfLowerIfIndex'), ('Unisphere-Data-SLEP-MIB', 'usdSlepIfRowStatus'), ('Unisphere-Data-SLEP-MIB', 'usdSlepKeepAliveFailures'), ('Unisphere-Data-SLEP-MIB', 'usdSlepLinkStatusTooLongPackets'), ('Unisphere-Data-SLEP-MIB', 'usdSlepLinkStatusBadFCSs')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usd_slep_group = usdSlepGroup.setStatus('obsolete') if mibBuilder.loadTexts: usdSlepGroup.setDescription('Obsolete collection of objects providing management of SLEP interfaces in a Unisphere product. This group became obsolete when the usdSlepDownWhenLooped object was added.') usd_slep_group2 = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 15, 4, 2, 2)).setObjects(('Unisphere-Data-SLEP-MIB', 'usdSlepNextIfIndex'), ('Unisphere-Data-SLEP-MIB', 'usdSlepKeepAliveTimer'), ('Unisphere-Data-SLEP-MIB', 'usdSlepIfLowerIfIndex'), ('Unisphere-Data-SLEP-MIB', 'usdSlepIfRowStatus'), ('Unisphere-Data-SLEP-MIB', 'usdSlepDownWhenLooped'), ('Unisphere-Data-SLEP-MIB', 'usdSlepKeepAliveFailures'), ('Unisphere-Data-SLEP-MIB', 'usdSlepLinkStatusTooLongPackets'), ('Unisphere-Data-SLEP-MIB', 'usdSlepLinkStatusBadFCSs')) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): usd_slep_group2 = usdSlepGroup2.setStatus('current') if mibBuilder.loadTexts: usdSlepGroup2.setDescription('A collection of objects providing management of SLEP interfaces in a Unisphere product.') mibBuilder.exportSymbols('Unisphere-Data-SLEP-MIB', usdSlepCompliances=usdSlepCompliances, usdSlepGroup=usdSlepGroup, usdSlepGroups=usdSlepGroups, usdSlepIfStatsIndex=usdSlepIfStatsIndex, usdSlepIfEntry=usdSlepIfEntry, usdSlepObjects=usdSlepObjects, usdSlepIfStatisticsEntry=usdSlepIfStatisticsEntry, usdSlepKeepAliveTimer=usdSlepKeepAliveTimer, usdSlepCompliance2=usdSlepCompliance2, usdSlepLinkStatusBadFCSs=usdSlepLinkStatusBadFCSs, usdSlepIfLowerIfIndex=usdSlepIfLowerIfIndex, usdSlepIfLayer=usdSlepIfLayer, usdSlepCompliance=usdSlepCompliance, usdSlepConformance=usdSlepConformance, usdSlepMIBS=usdSlepMIBS, usdSlepIfRowStatus=usdSlepIfRowStatus, usdSlepLinkStatusTooLongPackets=usdSlepLinkStatusTooLongPackets, usdSlepNextIfIndex=usdSlepNextIfIndex, PYSNMP_MODULE_ID=usdSlepMIBS, usdSlepIfIndex=usdSlepIfIndex, usdSlepKeepAliveFailures=usdSlepKeepAliveFailures, usdSlepGroup2=usdSlepGroup2, usdSlepDownWhenLooped=usdSlepDownWhenLooped, usdSlepIfStatisticsTable=usdSlepIfStatisticsTable, usdSlepIfTable=usdSlepIfTable)
with open("5letterscrabble.txt", 'r') as f: allwords = f.read() allletters = list(allwords) print(len(allletters)) allletters.sort() alphabet = "abcdefghijklmnopqrstuvwxyz" counts = {} for each in list(alphabet): #print(allletters.count(each)) counts[each] = allletters.count(each) counts2 = dict(sorted(counts.items(), reverse=True , key= lambda X:X[1])) for key, val in counts2.items(): print(key, ':', val) # top scoring letter word from 5letterwords, 5lettertop10k & 5letterscrabble: arose ''' Also: arles, earls, lares, laser, lears, rales, reals, seral, aster, rates, resat, stare, tares, tears Most vowels: adieu 2nd starter words if first blank with remaining vowels: until, build, built, input, fluid, unity, guild more consonants: think, night, light, might, child, thing '''
with open('5letterscrabble.txt', 'r') as f: allwords = f.read() allletters = list(allwords) print(len(allletters)) allletters.sort() alphabet = 'abcdefghijklmnopqrstuvwxyz' counts = {} for each in list(alphabet): counts[each] = allletters.count(each) counts2 = dict(sorted(counts.items(), reverse=True, key=lambda X: X[1])) for (key, val) in counts2.items(): print(key, ':', val) '\nAlso: arles, earls, lares, laser, lears, rales, reals, seral, \naster, rates, resat, stare, tares, tears\nMost vowels: adieu\n\n2nd starter words if first blank with remaining vowels: until, build, built, input, fluid, unity, guild\nmore consonants: think, night, light, might, child, thing\n'
def sum_n_natural_numbers(n: int) -> int: """:returns 1 + 2 + 3 + 4 + ... + n""" result = 0 for i in range(n + 1): result = result + i return result # print(sum_n_natural_numbers(0)) # print(sum_n_natural_numbers(1)) # print(sum_n_natural_numbers(2)) # print(sum_n_natural_numbers(3)) # print(sum_n_natural_numbers(4)) # print(sum_n_natural_numbers(5)) # print(sum_n_natural_numbers(6)) n = int(input()) print(sum_n_natural_numbers(n))
def sum_n_natural_numbers(n: int) -> int: """:returns 1 + 2 + 3 + 4 + ... + n""" result = 0 for i in range(n + 1): result = result + i return result n = int(input()) print(sum_n_natural_numbers(n))
def get_server_url(http_method, server_root, username, password): if username and password: return '%(http_method)s://%(user)s:%(pass)s@%(server)s' % { 'http_method': http_method, 'user': username, 'pass': password, 'server': server_root, } else: return '%(http_method)s://%(server)s' % { 'http_method': http_method, 'server': server_root, } def get_dynamic_db_settings(server_root, username, password, dbname, use_https=False): """ Get dynamic database settings. Other apps can use this if they want to change settings """ http_method = 'https' if use_https else 'http' server_url = get_server_url(http_method, server_root, username, password) database = '%(server)s/%(database)s' % { 'server': server_url, 'database': dbname, } return { 'COUCH_SERVER': server_url, 'COUCH_DATABASE': database, } def _make_couchdb_tuple(row, couch_database_url): if isinstance(row, basestring): app_label = row return app_label, couch_database_url else: app_label, postfix = row return app_label, '%s__%s' % (couch_database_url, postfix) def make_couchdb_tuples(config, couch_database_url): """ Helper function to generate couchdb tuples for mapping app name to couch database URL. """ return [_make_couchdb_tuple(row, couch_database_url) for row in config] def get_extra_couchdbs(config, couch_database_url): """ Create a mapping from database prefix to database url :param config: list of database strings or tuples :param couch_database_url: main database url """ extra_dbs = {} for row in config: if isinstance(row, tuple): _, postfix = row extra_dbs[postfix] = '%s__%s' % (couch_database_url, postfix) return extra_dbs
def get_server_url(http_method, server_root, username, password): if username and password: return '%(http_method)s://%(user)s:%(pass)s@%(server)s' % {'http_method': http_method, 'user': username, 'pass': password, 'server': server_root} else: return '%(http_method)s://%(server)s' % {'http_method': http_method, 'server': server_root} def get_dynamic_db_settings(server_root, username, password, dbname, use_https=False): """ Get dynamic database settings. Other apps can use this if they want to change settings """ http_method = 'https' if use_https else 'http' server_url = get_server_url(http_method, server_root, username, password) database = '%(server)s/%(database)s' % {'server': server_url, 'database': dbname} return {'COUCH_SERVER': server_url, 'COUCH_DATABASE': database} def _make_couchdb_tuple(row, couch_database_url): if isinstance(row, basestring): app_label = row return (app_label, couch_database_url) else: (app_label, postfix) = row return (app_label, '%s__%s' % (couch_database_url, postfix)) def make_couchdb_tuples(config, couch_database_url): """ Helper function to generate couchdb tuples for mapping app name to couch database URL. """ return [_make_couchdb_tuple(row, couch_database_url) for row in config] def get_extra_couchdbs(config, couch_database_url): """ Create a mapping from database prefix to database url :param config: list of database strings or tuples :param couch_database_url: main database url """ extra_dbs = {} for row in config: if isinstance(row, tuple): (_, postfix) = row extra_dbs[postfix] = '%s__%s' % (couch_database_url, postfix) return extra_dbs
# # PySNMP MIB module L2L3-VPN-MCAST-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/L2L3-VPN-MCAST-MIB # Produced by pysmi-0.3.4 at Wed May 1 14:04:58 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion") InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType") jnxL2L3VpnMcastExperiment, = mibBuilder.importSymbols("JUNIPER-EXPERIMENT-MIB", "jnxL2L3VpnMcastExperiment") jnxMibs, = mibBuilder.importSymbols("JUNIPER-SMI", "jnxMibs") MplsLabel, = mibBuilder.importSymbols("MPLS-TC-STD-MIB", "MplsLabel") SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString") ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance") Integer32, IpAddress, Bits, TimeTicks, Gauge32, Counter32, Unsigned32, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, experimental, ObjectIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "IpAddress", "Bits", "TimeTicks", "Gauge32", "Counter32", "Unsigned32", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "experimental", "ObjectIdentity", "MibIdentifier") TimeInterval, TimeStamp, TextualConvention, RowPointer, DisplayString, RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TimeInterval", "TimeStamp", "TextualConvention", "RowPointer", "DisplayString", "RowStatus", "TruthValue") jnxL2L3VpnMcastMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1)) jnxL2L3VpnMcastMIB.setRevisions(('2012-11-05 12:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setRevisionsDescriptions(('Initial version of the draft.',)) if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setLastUpdated('201211051200Z') if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setOrganization('IETF Layer-3 Virtual Private Networks Working Group.') if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setContactInfo(' Comments and discussion to l3vpn@ietf.org Jeffrey (Zhaohui) Zhang Juniper Networks, Inc. 10 Technology Park Drive Westford, MA 01886 USA Email: zzhang@juniper.net ') if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setDescription('This MIB contains common managed object definitions for multicast in Layer 2 and Layer 3 VPNs, defined by [I-D.ietf-l2vpn-vpls-mcast] and RFC 6513/6514. Copyright (C) The Internet Society (2012).') class JnxL2L3VpnMcastProviderTunnelType(TextualConvention, Integer32): description = 'Types of provider tunnels used for multicast in a l2/l3vpn.' status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7)) namedValues = NamedValues(("unconfigured", 0), ("rsvp-p2mp", 1), ("ldp-p2mp", 2), ("pim-ssm", 3), ("pim-asm", 4), ("pim-bidir", 5), ("ingress-replication", 6), ("ldp-mp2mp", 7)) jnxL2L3VpnMcastObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1)) jnxL2L3VpnMcastPmsiStates = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1)) jnxL2L3VpnMcastPmsiTunnelAttributeTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1), ) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeTable.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeTable.setDescription('This table is for advertised/received PMSI attributes, to be referred to by I-PMSI or S-PMSI table entries') jnxL2L3VpnMcastPmsiTunnelAttributeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1), ).setIndexNames((0, "L2L3-VPN-MCAST-MIB", "jnxL2L3VpnMcastPmsiTunnelAttributeFlags"), (0, "L2L3-VPN-MCAST-MIB", "jnxL2L3VpnMcastPmsiTunnelAttributeType"), (0, "L2L3-VPN-MCAST-MIB", "jnxL2L3VpnMcastPmsiTunnelAttributeLabel"), (0, "L2L3-VPN-MCAST-MIB", "jnxL2L3VpnMcastPmsiTunnelAttributeId")) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeEntry.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeEntry.setDescription("An entry in this table corresponds to an PMSI attribute that is advertised/received on this router. For BGP-based signaling (for I-PMSI via auto-discovery procedure, or for S-PMSI via S-PMSI A-D routes), they are just as signaled by BGP (RFC 6514 section 5, 'PMSI Tunnel attribute'). For UDP-based S-PMSI signaling for PIM-MVPN, they're derived from S-PMSI Join Message (RFC 6513 section 7.4.2, 'UDP-based Protocol').. Note that BGP-based signaling may be used for PIM-MVPN as well.") jnxL2L3VpnMcastPmsiTunnelAttributeFlags = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeFlags.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeFlags.setDescription("For UDP-based S-PMSI signaling for PIM-MVPN, this is 0. For BGP-based I/S-PMSI signaling, per RFC 6514 section 5, 'PMSI Tunnel Attribute': The Flags field has the following format: 0 1 2 3 4 5 6 7 +-+-+-+-+-+-+-+-+ | reserved |L| +-+-+-+-+-+-+-+-+ This document defines the following flags: + Leaf Information Required (L)") jnxL2L3VpnMcastPmsiTunnelAttributeType = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 2), JnxL2L3VpnMcastProviderTunnelType()) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeType.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeType.setDescription("For BGP-based I/S-PMSI signaling for either PIM or BGP-MVPN, per RFC 6514 section 5, 'PMSI Tunnel Attribute': The Tunnel Type identifies the type of the tunneling technology used to establish the PMSI tunnel. The type determines the syntax and semantics of the Tunnel Identifier field. This document defines the following Tunnel Types: 0 - No tunnel information present 1 - RSVP-TE P2MP LSP 2 - mLDP P2MP LSP 3 - PIM-SSM Tree 4 - PIM-SM Tree 5 - PIM-Bidir Tree 6 - Ingress Replication 7 - mLDP MP2MP LSP For UDP-based S-PMSI signaling for PIM-MVPN, RFC 6513 does not specify if a PIM provider tunnel is SSM, SM or Bidir, and an agent can use either type 3, 4, or 5 based on its best knowledge.") jnxL2L3VpnMcastPmsiTunnelAttributeLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 3), MplsLabel()) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeLabel.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeLabel.setDescription("For BGP-based I/S-PMSI signaling, per RFC 6514 section 5, 'PMSI Tunnel Attribute': If the MPLS Label field is non-zero, then it contains an MPLS label encoded as 3 octets, where the high-order 20 bits contain the label value. Absence of MPLS Label is indicated by setting the MPLS Label field to zero. For UDP-based S-PMSI signaling for PIM-MVPN, this is not applicable for now, as RFC 6513 does not specify mpls encapsulation and tunnel aggregation with UDP-based signaling.") jnxL2L3VpnMcastPmsiTunnelAttributeId = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 37))) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeId.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeId.setDescription("For BGP-based signaling, as defined in RFC 6514 section 5, 'PMSI Tunnel Attribute'. For UDP-based S-PMSI signaling for PIM-MVPN, RFC 6513 only specifies the 'P-Group' address, and that is filled into the first four octets of this field.") jnxL2L3VpnMcastPmsiTunnelPointer = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 5), RowPointer()).setMaxAccess("readonly") if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelPointer.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelPointer.setDescription('If the tunnel exists in some MIB table, this is the row pointer to it.') jnxL2L3VpnMcastPmsiTunnelIf = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 6), RowPointer()).setMaxAccess("readonly") if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelIf.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelIf.setDescription('If the tunnel has a corresponding interface, this is the row pointer to the ifName table.') mibBuilder.exportSymbols("L2L3-VPN-MCAST-MIB", jnxL2L3VpnMcastMIB=jnxL2L3VpnMcastMIB, jnxL2L3VpnMcastPmsiStates=jnxL2L3VpnMcastPmsiStates, jnxL2L3VpnMcastPmsiTunnelAttributeLabel=jnxL2L3VpnMcastPmsiTunnelAttributeLabel, jnxL2L3VpnMcastPmsiTunnelAttributeId=jnxL2L3VpnMcastPmsiTunnelAttributeId, jnxL2L3VpnMcastObjects=jnxL2L3VpnMcastObjects, JnxL2L3VpnMcastProviderTunnelType=JnxL2L3VpnMcastProviderTunnelType, jnxL2L3VpnMcastPmsiTunnelPointer=jnxL2L3VpnMcastPmsiTunnelPointer, jnxL2L3VpnMcastPmsiTunnelIf=jnxL2L3VpnMcastPmsiTunnelIf, jnxL2L3VpnMcastPmsiTunnelAttributeTable=jnxL2L3VpnMcastPmsiTunnelAttributeTable, PYSNMP_MODULE_ID=jnxL2L3VpnMcastMIB, jnxL2L3VpnMcastPmsiTunnelAttributeFlags=jnxL2L3VpnMcastPmsiTunnelAttributeFlags, jnxL2L3VpnMcastPmsiTunnelAttributeType=jnxL2L3VpnMcastPmsiTunnelAttributeType, jnxL2L3VpnMcastPmsiTunnelAttributeEntry=jnxL2L3VpnMcastPmsiTunnelAttributeEntry)
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier') (named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues') (value_range_constraint, value_size_constraint, constraints_intersection, single_value_constraint, constraints_union) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueRangeConstraint', 'ValueSizeConstraint', 'ConstraintsIntersection', 'SingleValueConstraint', 'ConstraintsUnion') (inet_address, inet_address_type) = mibBuilder.importSymbols('INET-ADDRESS-MIB', 'InetAddress', 'InetAddressType') (jnx_l2_l3_vpn_mcast_experiment,) = mibBuilder.importSymbols('JUNIPER-EXPERIMENT-MIB', 'jnxL2L3VpnMcastExperiment') (jnx_mibs,) = mibBuilder.importSymbols('JUNIPER-SMI', 'jnxMibs') (mpls_label,) = mibBuilder.importSymbols('MPLS-TC-STD-MIB', 'MplsLabel') (snmp_admin_string,) = mibBuilder.importSymbols('SNMP-FRAMEWORK-MIB', 'SnmpAdminString') (object_group, notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'ObjectGroup', 'NotificationGroup', 'ModuleCompliance') (integer32, ip_address, bits, time_ticks, gauge32, counter32, unsigned32, iso, counter64, mib_scalar, mib_table, mib_table_row, mib_table_column, module_identity, notification_type, experimental, object_identity, mib_identifier) = mibBuilder.importSymbols('SNMPv2-SMI', 'Integer32', 'IpAddress', 'Bits', 'TimeTicks', 'Gauge32', 'Counter32', 'Unsigned32', 'iso', 'Counter64', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'ModuleIdentity', 'NotificationType', 'experimental', 'ObjectIdentity', 'MibIdentifier') (time_interval, time_stamp, textual_convention, row_pointer, display_string, row_status, truth_value) = mibBuilder.importSymbols('SNMPv2-TC', 'TimeInterval', 'TimeStamp', 'TextualConvention', 'RowPointer', 'DisplayString', 'RowStatus', 'TruthValue') jnx_l2_l3_vpn_mcast_mib = module_identity((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1)) jnxL2L3VpnMcastMIB.setRevisions(('2012-11-05 12:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setRevisionsDescriptions(('Initial version of the draft.',)) if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setLastUpdated('201211051200Z') if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setOrganization('IETF Layer-3 Virtual Private Networks Working Group.') if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setContactInfo(' Comments and discussion to l3vpn@ietf.org Jeffrey (Zhaohui) Zhang Juniper Networks, Inc. 10 Technology Park Drive Westford, MA 01886 USA Email: zzhang@juniper.net ') if mibBuilder.loadTexts: jnxL2L3VpnMcastMIB.setDescription('This MIB contains common managed object definitions for multicast in Layer 2 and Layer 3 VPNs, defined by [I-D.ietf-l2vpn-vpls-mcast] and RFC 6513/6514. Copyright (C) The Internet Society (2012).') class Jnxl2L3Vpnmcastprovidertunneltype(TextualConvention, Integer32): description = 'Types of provider tunnels used for multicast in a l2/l3vpn.' status = 'current' subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(0, 1, 2, 3, 4, 5, 6, 7)) named_values = named_values(('unconfigured', 0), ('rsvp-p2mp', 1), ('ldp-p2mp', 2), ('pim-ssm', 3), ('pim-asm', 4), ('pim-bidir', 5), ('ingress-replication', 6), ('ldp-mp2mp', 7)) jnx_l2_l3_vpn_mcast_objects = mib_identifier((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1)) jnx_l2_l3_vpn_mcast_pmsi_states = mib_identifier((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1)) jnx_l2_l3_vpn_mcast_pmsi_tunnel_attribute_table = mib_table((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1)) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeTable.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeTable.setDescription('This table is for advertised/received PMSI attributes, to be referred to by I-PMSI or S-PMSI table entries') jnx_l2_l3_vpn_mcast_pmsi_tunnel_attribute_entry = mib_table_row((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1)).setIndexNames((0, 'L2L3-VPN-MCAST-MIB', 'jnxL2L3VpnMcastPmsiTunnelAttributeFlags'), (0, 'L2L3-VPN-MCAST-MIB', 'jnxL2L3VpnMcastPmsiTunnelAttributeType'), (0, 'L2L3-VPN-MCAST-MIB', 'jnxL2L3VpnMcastPmsiTunnelAttributeLabel'), (0, 'L2L3-VPN-MCAST-MIB', 'jnxL2L3VpnMcastPmsiTunnelAttributeId')) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeEntry.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeEntry.setDescription("An entry in this table corresponds to an PMSI attribute that is advertised/received on this router. For BGP-based signaling (for I-PMSI via auto-discovery procedure, or for S-PMSI via S-PMSI A-D routes), they are just as signaled by BGP (RFC 6514 section 5, 'PMSI Tunnel attribute'). For UDP-based S-PMSI signaling for PIM-MVPN, they're derived from S-PMSI Join Message (RFC 6513 section 7.4.2, 'UDP-based Protocol').. Note that BGP-based signaling may be used for PIM-MVPN as well.") jnx_l2_l3_vpn_mcast_pmsi_tunnel_attribute_flags = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 1), octet_string().subtype(subtypeSpec=value_size_constraint(1, 1)).setFixedLength(1)) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeFlags.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeFlags.setDescription("For UDP-based S-PMSI signaling for PIM-MVPN, this is 0. For BGP-based I/S-PMSI signaling, per RFC 6514 section 5, 'PMSI Tunnel Attribute': The Flags field has the following format: 0 1 2 3 4 5 6 7 +-+-+-+-+-+-+-+-+ | reserved |L| +-+-+-+-+-+-+-+-+ This document defines the following flags: + Leaf Information Required (L)") jnx_l2_l3_vpn_mcast_pmsi_tunnel_attribute_type = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 2), jnx_l2_l3_vpn_mcast_provider_tunnel_type()) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeType.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeType.setDescription("For BGP-based I/S-PMSI signaling for either PIM or BGP-MVPN, per RFC 6514 section 5, 'PMSI Tunnel Attribute': The Tunnel Type identifies the type of the tunneling technology used to establish the PMSI tunnel. The type determines the syntax and semantics of the Tunnel Identifier field. This document defines the following Tunnel Types: 0 - No tunnel information present 1 - RSVP-TE P2MP LSP 2 - mLDP P2MP LSP 3 - PIM-SSM Tree 4 - PIM-SM Tree 5 - PIM-Bidir Tree 6 - Ingress Replication 7 - mLDP MP2MP LSP For UDP-based S-PMSI signaling for PIM-MVPN, RFC 6513 does not specify if a PIM provider tunnel is SSM, SM or Bidir, and an agent can use either type 3, 4, or 5 based on its best knowledge.") jnx_l2_l3_vpn_mcast_pmsi_tunnel_attribute_label = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 3), mpls_label()) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeLabel.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeLabel.setDescription("For BGP-based I/S-PMSI signaling, per RFC 6514 section 5, 'PMSI Tunnel Attribute': If the MPLS Label field is non-zero, then it contains an MPLS label encoded as 3 octets, where the high-order 20 bits contain the label value. Absence of MPLS Label is indicated by setting the MPLS Label field to zero. For UDP-based S-PMSI signaling for PIM-MVPN, this is not applicable for now, as RFC 6513 does not specify mpls encapsulation and tunnel aggregation with UDP-based signaling.") jnx_l2_l3_vpn_mcast_pmsi_tunnel_attribute_id = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 4), octet_string().subtype(subtypeSpec=value_size_constraint(0, 37))) if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeId.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelAttributeId.setDescription("For BGP-based signaling, as defined in RFC 6514 section 5, 'PMSI Tunnel Attribute'. For UDP-based S-PMSI signaling for PIM-MVPN, RFC 6513 only specifies the 'P-Group' address, and that is filled into the first four octets of this field.") jnx_l2_l3_vpn_mcast_pmsi_tunnel_pointer = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 5), row_pointer()).setMaxAccess('readonly') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelPointer.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelPointer.setDescription('If the tunnel exists in some MIB table, this is the row pointer to it.') jnx_l2_l3_vpn_mcast_pmsi_tunnel_if = mib_table_column((1, 3, 6, 1, 4, 1, 2636, 5, 11, 1, 1, 1, 1, 1, 6), row_pointer()).setMaxAccess('readonly') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelIf.setStatus('current') if mibBuilder.loadTexts: jnxL2L3VpnMcastPmsiTunnelIf.setDescription('If the tunnel has a corresponding interface, this is the row pointer to the ifName table.') mibBuilder.exportSymbols('L2L3-VPN-MCAST-MIB', jnxL2L3VpnMcastMIB=jnxL2L3VpnMcastMIB, jnxL2L3VpnMcastPmsiStates=jnxL2L3VpnMcastPmsiStates, jnxL2L3VpnMcastPmsiTunnelAttributeLabel=jnxL2L3VpnMcastPmsiTunnelAttributeLabel, jnxL2L3VpnMcastPmsiTunnelAttributeId=jnxL2L3VpnMcastPmsiTunnelAttributeId, jnxL2L3VpnMcastObjects=jnxL2L3VpnMcastObjects, JnxL2L3VpnMcastProviderTunnelType=JnxL2L3VpnMcastProviderTunnelType, jnxL2L3VpnMcastPmsiTunnelPointer=jnxL2L3VpnMcastPmsiTunnelPointer, jnxL2L3VpnMcastPmsiTunnelIf=jnxL2L3VpnMcastPmsiTunnelIf, jnxL2L3VpnMcastPmsiTunnelAttributeTable=jnxL2L3VpnMcastPmsiTunnelAttributeTable, PYSNMP_MODULE_ID=jnxL2L3VpnMcastMIB, jnxL2L3VpnMcastPmsiTunnelAttributeFlags=jnxL2L3VpnMcastPmsiTunnelAttributeFlags, jnxL2L3VpnMcastPmsiTunnelAttributeType=jnxL2L3VpnMcastPmsiTunnelAttributeType, jnxL2L3VpnMcastPmsiTunnelAttributeEntry=jnxL2L3VpnMcastPmsiTunnelAttributeEntry)
#map from dna to rna ''' Blood Group: A B AB O DNA: A T C G combination RNA: U,A, G, C combination respectively to DNA input: ATTCG output:UAAGC ''' rna_map={'A':'U','T':'A','C':'G','G':'C'} #dna=input('Enter DNA: ') dna="ATTCG" op="" for l in dna: op+=rna_map.get(l) print(op)
""" Blood Group: A B AB O DNA: A T C G combination RNA: U,A, G, C combination respectively to DNA input: ATTCG output:UAAGC """ rna_map = {'A': 'U', 'T': 'A', 'C': 'G', 'G': 'C'} dna = 'ATTCG' op = '' for l in dna: op += rna_map.get(l) print(op)
# Settings # Access credentials: CONSUMER_KEY = '' CONSUMER_SEC = '' ACCESS_TOK = '' ACCESS_SEC = '' USERLLIST = 'userlist.txt' OUTDIR = u'Output' OUTFILE = 'tweetdata.' # output filename prefix
consumer_key = '' consumer_sec = '' access_tok = '' access_sec = '' userllist = 'userlist.txt' outdir = u'Output' outfile = 'tweetdata.'
"""Given string S and a dictionary of words words, find the number of words[i] that is a subsequence of S.""" class Solution(object): def numMatchingSubseq(self, S, words): """ :type S: str :type words: List[str] :rtype: int """ count = 0 for word in words: if self.is_subsequence(word, S): count += 1 return count def is_subsequence(self, a, b): if len(a) == 0: return True i, j = 0, 0 while j < len(b): if a[i] == b[j]: i += 1 if i == len(a): return True j += 1 return False
"""Given string S and a dictionary of words words, find the number of words[i] that is a subsequence of S.""" class Solution(object): def num_matching_subseq(self, S, words): """ :type S: str :type words: List[str] :rtype: int """ count = 0 for word in words: if self.is_subsequence(word, S): count += 1 return count def is_subsequence(self, a, b): if len(a) == 0: return True (i, j) = (0, 0) while j < len(b): if a[i] == b[j]: i += 1 if i == len(a): return True j += 1 return False
bot_instance = None global_config = None reference_market = None default_time_frame = None def __init__(bot, config): global bot_instance bot_instance = bot global global_config global_config = config def get_bot(): return bot_instance def get_global_config(): return global_config def set_default_time_frame(time_frame): global default_time_frame default_time_frame = time_frame def get_default_time_frame(): return default_time_frame def get_reference_market(): global reference_market if reference_market is None: try: reference_market = next(iter(get_bot().get_exchange_traders().values())).get_trades_manager().get_reference() except StopIteration: reference_market = None return reference_market
bot_instance = None global_config = None reference_market = None default_time_frame = None def __init__(bot, config): global bot_instance bot_instance = bot global global_config global_config = config def get_bot(): return bot_instance def get_global_config(): return global_config def set_default_time_frame(time_frame): global default_time_frame default_time_frame = time_frame def get_default_time_frame(): return default_time_frame def get_reference_market(): global reference_market if reference_market is None: try: reference_market = next(iter(get_bot().get_exchange_traders().values())).get_trades_manager().get_reference() except StopIteration: reference_market = None return reference_market
class Animal (object): pass class Dog(Animal): def __init__(self,name): self.name=name class Cat(Animal): def __init__(self,name): self.name=name class Person(object): def __init__(self,name): self.name=name class Employee(Person): def __init__(self,name,salary): super(Employee,self).__init__(name) self.salary=salary class Fish(object): pass class Salmon(Fish): pass class Halibut(Fish): pass rover=Dog("Rover") satan=Cat("Satan") mary=Person("Mary") mary.pet=satan frank=Employee("Frank",120000) frank.pet=rover flipper=Fish() crouse=Salmon() harry=Halibut()
class Animal(object): pass class Dog(Animal): def __init__(self, name): self.name = name class Cat(Animal): def __init__(self, name): self.name = name class Person(object): def __init__(self, name): self.name = name class Employee(Person): def __init__(self, name, salary): super(Employee, self).__init__(name) self.salary = salary class Fish(object): pass class Salmon(Fish): pass class Halibut(Fish): pass rover = dog('Rover') satan = cat('Satan') mary = person('Mary') mary.pet = satan frank = employee('Frank', 120000) frank.pet = rover flipper = fish() crouse = salmon() harry = halibut()
"""The experimental package provides some analysis functions for aggregating darshan log data. The functions are not well tested. """
"""The experimental package provides some analysis functions for aggregating darshan log data. The functions are not well tested. """
# -*- coding: utf-8 -*- """ Document Library """ module = "doc" #============================================================================== # Settings resource = "setting" tablename = "%s_%s" % (module, resource) table = db.define_table(tablename, Field("audit_read", "boolean"), Field("audit_write", "boolean"), migrate=migrate) #============================================================================== resource = "document" tablename = "%s_%s" % (module, resource) table = db.define_table(tablename, timestamp, uuidstamp, authorstamp, deletion_status, Field("name", length=128, notnull=True, unique=True), Field("file", "upload", autodelete = True,), Field("url"), person_id, organisation_id, location_id, Field("date", "date"), comments, Field("entered", "boolean"), migrate=migrate ) table.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, "%s.name" % tablename)] #table.name.label = T("Name") table.name.comment = SPAN("*", _class="req") def shn_file_represent( file, table): if file: return A(table.file.retrieve(file)[0], _href=URL(r=request, f="download", args=[file])) else: return NONE table.file.represent = lambda file, table=table: shn_file_represent(file, table) table.url.label = T("URL") table.url.represent = lambda url: url and A(url,_href=url) or NONE table.url.requires = [IS_NULL_OR(IS_URL()),IS_NULL_OR(IS_NOT_IN_DB(db, "%s.url" % tablename))] table.person_id.label = T("Author") table.person_id.comment = shn_person_comment(T("Author"), T("The Author of this Document (optional)")) table.location_id.readable = table.location_id.writable = False table.entered.comment = DIV( _class="tooltip", _title="Entered" + "|" + Tstr("Has data from this Reference Document been entered into Sahana?") ) # ----------------------------------------------------------------------------- def document_represent(id): if not id: return NONE represent = shn_get_db_field_value(db = db, table = "doc_document", field = "name", look_up = id) #File #Website #Person return A ( represent, _href = URL(r=request, c="doc", f="document", args = [id], extension = ""), _target = "blank" ) DOCUMENT = Tstr("Reference Document") ADD_DOCUMENT = Tstr("Add Reference Document") document_comment = DIV( A( ADD_DOCUMENT, _class="colorbox", _href=URL(r=request, c="doc", f="document", args="create", vars=dict(format="popup")), _target="top", _title=Tstr("If you need to add a new document then you can click here to attach one."), ), DIV( _class="tooltip", _title=DOCUMENT + "|" + \ Tstr("A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document."), #Tstr("Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead."), ), #SPAN( I( T("If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.") ), # _style = "color:red" # ) ) # CRUD Strings LIST_DOCUMENTS = T("List Documents") s3.crud_strings[tablename] = Storage( title_create = ADD_DOCUMENT, title_display = T("Document Details"), title_list = LIST_DOCUMENTS, title_update = T("Edit Document"), title_search = T("Search Documents"), subtitle_create = T("Add New Document"), subtitle_list = DOCUMENT, label_list_button = LIST_DOCUMENTS, label_create_button = ADD_DOCUMENT, label_delete_button = T("Delete Document"), msg_record_created = T("Document added"), msg_record_modified = T("Document updated"), msg_record_deleted = T("Document deleted"), msg_list_empty = T("No Documents found")) document_id = db.Table(None, "document_id", Field("document_id", db.doc_document, requires = IS_NULL_OR(IS_ONE_OF(db, "doc_document.id", document_represent)), represent = document_represent, label = DOCUMENT, comment = document_comment, ondelete = "RESTRICT", ) ) #============================================================================== resource = "image" tablename = "%s_%s" % (module, resource) table = db.define_table(tablename, timestamp, uuidstamp, authorstamp, deletion_status, Field("name", length=128, notnull=True, unique=True), Field("image", "upload"), #metadata_id, Field("url"), person_id, organisation_id, location_id, Field("date", "date"), comments, migrate=migrate) table.name.requires = [IS_NOT_EMPTY(), IS_NOT_IN_DB(db, "%s.name" % tablename)] #table.name.label = T("Name") table.name.comment = SPAN("*", _class="req") table.url.label = "URL" table.person_id.label = T("Person") # upload folder needs to be visible to the download() function as well as the upload table.image.uploadfolder = os.path.join(request.folder, "uploads/images") IMAGE_EXTENSIONS = ["png", "PNG", "jpg", "JPG", "jpeg", "JPEG", "gif", "GIF", "tif", "TIF", "tiff", "TIFF", "bmp", "BMP", "raw", "RAW"] table.image.requires = IS_IMAGE(extensions=(IMAGE_EXTENSIONS)) ADD_IMAGE = Tstr("Add Photo") image_id = db.Table(None, "image_id", Field("image_id", db.doc_image, requires = IS_NULL_OR(IS_ONE_OF(db, "doc_image.id", "%(name)s")), represent = lambda id: (id and [DIV(A(IMG(_src=URL(r=request, c="default", f="download", args=db(db.doc_image.id == id).select(db.doc_image.image, limitby=(0, 1)).first().image), _height=40), _class="zoom", _href="#zoom-media_image-%s" % id), DIV(IMG(_src=URL(r=request, c="default", f="download", args=db(db.doc_image.id == id).select(db.doc_image.image, limitby=(0, 1)).first().image),_width=600), _id="zoom-media_image-%s" % id, _class="hidden"))] or [""])[0], label = T("Image"), comment = DIV(A(ADD_IMAGE, _class="colorbox", _href=URL(r=request, c="doc", f="image", args="create", vars=dict(format="popup")), _target="top", _title=ADD_IMAGE), DIV( _class="tooltip", _title=ADD_IMAGE + "|" + Tstr("Add an Photo."))), ondelete = "RESTRICT" )) # CRUD Strings LIST_IMAGES = T("List Photos") s3.crud_strings[tablename] = Storage( title_create = ADD_IMAGE, title_display = T("Photo Details"), title_list = LIST_IMAGES, title_update = T("Edit Photo"), title_search = T("Search Photos"), subtitle_create = T("Add New Photo"), subtitle_list = T("Photo"), label_list_button = LIST_IMAGES, label_create_button = ADD_IMAGE, label_delete_button = T("Delete Photo"), msg_record_created = T("Photo added"), msg_record_modified = T("Photo updated"), msg_record_deleted = T("Photo deleted"), msg_list_empty = T("No Photos found")) #============================================================================== # END - Following code is not utilised resource = "metadata" tablename = "%s_%s" % (module, resource) table = db.define_table(tablename, timestamp, uuidstamp, authorstamp, deletion_status, location_id, Field("description"), person_id, #Field("organisation.id", "reference org_organisation"), Field("source"), Field("sensitivity"), # Should be turned into a drop-down by referring to AAA's sensitivity table Field("event_time", "datetime"), Field("expiry_time", "datetime"), Field("url"), migrate=migrate) table.uuid.requires = IS_NOT_IN_DB(db, "%s.uuid" % tablename) table.event_time.requires = IS_NULL_OR(IS_DATETIME()) table.expiry_time.requires = IS_NULL_OR(IS_DATETIME()) table.url.requires = IS_NULL_OR(IS_URL()) ADD_METADATA = Tstr("Add Metadata") metadata_id = db.Table(None, "metadata_id", Field("metadata_id", db.doc_metadata, requires = IS_NULL_OR(IS_ONE_OF(db, "doc_metadata.id", "%(id)s")), represent = lambda id: (id and [db(db.doc_metadata.id==id).select()[0].name] or [NONE])[0], label = T("Metadata"), comment = DIV(A(ADD_METADATA, _class="colorbox", _href=URL(r=request, c="doc", f="metadata", args="create", vars=dict(format="popup")), _target="top", _title=ADD_METADATA), DIV( _class="tooltip", _title=ADD_METADATA + "|" + "Add some metadata for the file, such as Soure, Sensitivity, Event Time.")), ondelete = "RESTRICT" ))
""" Document Library """ module = 'doc' resource = 'setting' tablename = '%s_%s' % (module, resource) table = db.define_table(tablename, field('audit_read', 'boolean'), field('audit_write', 'boolean'), migrate=migrate) resource = 'document' tablename = '%s_%s' % (module, resource) table = db.define_table(tablename, timestamp, uuidstamp, authorstamp, deletion_status, field('name', length=128, notnull=True, unique=True), field('file', 'upload', autodelete=True), field('url'), person_id, organisation_id, location_id, field('date', 'date'), comments, field('entered', 'boolean'), migrate=migrate) table.name.requires = [is_not_empty(), is_not_in_db(db, '%s.name' % tablename)] table.name.comment = span('*', _class='req') def shn_file_represent(file, table): if file: return a(table.file.retrieve(file)[0], _href=url(r=request, f='download', args=[file])) else: return NONE table.file.represent = lambda file, table=table: shn_file_represent(file, table) table.url.label = t('URL') table.url.represent = lambda url: url and a(url, _href=url) or NONE table.url.requires = [is_null_or(is_url()), is_null_or(is_not_in_db(db, '%s.url' % tablename))] table.person_id.label = t('Author') table.person_id.comment = shn_person_comment(t('Author'), t('The Author of this Document (optional)')) table.location_id.readable = table.location_id.writable = False table.entered.comment = div(_class='tooltip', _title='Entered' + '|' + tstr('Has data from this Reference Document been entered into Sahana?')) def document_represent(id): if not id: return NONE represent = shn_get_db_field_value(db=db, table='doc_document', field='name', look_up=id) return a(represent, _href=url(r=request, c='doc', f='document', args=[id], extension=''), _target='blank') document = tstr('Reference Document') add_document = tstr('Add Reference Document') document_comment = div(a(ADD_DOCUMENT, _class='colorbox', _href=url(r=request, c='doc', f='document', args='create', vars=dict(format='popup')), _target='top', _title=tstr('If you need to add a new document then you can click here to attach one.')), div(_class='tooltip', _title=DOCUMENT + '|' + tstr('A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.'))) list_documents = t('List Documents') s3.crud_strings[tablename] = storage(title_create=ADD_DOCUMENT, title_display=t('Document Details'), title_list=LIST_DOCUMENTS, title_update=t('Edit Document'), title_search=t('Search Documents'), subtitle_create=t('Add New Document'), subtitle_list=DOCUMENT, label_list_button=LIST_DOCUMENTS, label_create_button=ADD_DOCUMENT, label_delete_button=t('Delete Document'), msg_record_created=t('Document added'), msg_record_modified=t('Document updated'), msg_record_deleted=t('Document deleted'), msg_list_empty=t('No Documents found')) document_id = db.Table(None, 'document_id', field('document_id', db.doc_document, requires=is_null_or(is_one_of(db, 'doc_document.id', document_represent)), represent=document_represent, label=DOCUMENT, comment=document_comment, ondelete='RESTRICT')) resource = 'image' tablename = '%s_%s' % (module, resource) table = db.define_table(tablename, timestamp, uuidstamp, authorstamp, deletion_status, field('name', length=128, notnull=True, unique=True), field('image', 'upload'), field('url'), person_id, organisation_id, location_id, field('date', 'date'), comments, migrate=migrate) table.name.requires = [is_not_empty(), is_not_in_db(db, '%s.name' % tablename)] table.name.comment = span('*', _class='req') table.url.label = 'URL' table.person_id.label = t('Person') table.image.uploadfolder = os.path.join(request.folder, 'uploads/images') image_extensions = ['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'gif', 'GIF', 'tif', 'TIF', 'tiff', 'TIFF', 'bmp', 'BMP', 'raw', 'RAW'] table.image.requires = is_image(extensions=IMAGE_EXTENSIONS) add_image = tstr('Add Photo') image_id = db.Table(None, 'image_id', field('image_id', db.doc_image, requires=is_null_or(is_one_of(db, 'doc_image.id', '%(name)s')), represent=lambda id: (id and [div(a(img(_src=url(r=request, c='default', f='download', args=db(db.doc_image.id == id).select(db.doc_image.image, limitby=(0, 1)).first().image), _height=40), _class='zoom', _href='#zoom-media_image-%s' % id), div(img(_src=url(r=request, c='default', f='download', args=db(db.doc_image.id == id).select(db.doc_image.image, limitby=(0, 1)).first().image), _width=600), _id='zoom-media_image-%s' % id, _class='hidden'))] or [''])[0], label=t('Image'), comment=div(a(ADD_IMAGE, _class='colorbox', _href=url(r=request, c='doc', f='image', args='create', vars=dict(format='popup')), _target='top', _title=ADD_IMAGE), div(_class='tooltip', _title=ADD_IMAGE + '|' + tstr('Add an Photo.'))), ondelete='RESTRICT')) list_images = t('List Photos') s3.crud_strings[tablename] = storage(title_create=ADD_IMAGE, title_display=t('Photo Details'), title_list=LIST_IMAGES, title_update=t('Edit Photo'), title_search=t('Search Photos'), subtitle_create=t('Add New Photo'), subtitle_list=t('Photo'), label_list_button=LIST_IMAGES, label_create_button=ADD_IMAGE, label_delete_button=t('Delete Photo'), msg_record_created=t('Photo added'), msg_record_modified=t('Photo updated'), msg_record_deleted=t('Photo deleted'), msg_list_empty=t('No Photos found')) resource = 'metadata' tablename = '%s_%s' % (module, resource) table = db.define_table(tablename, timestamp, uuidstamp, authorstamp, deletion_status, location_id, field('description'), person_id, field('source'), field('sensitivity'), field('event_time', 'datetime'), field('expiry_time', 'datetime'), field('url'), migrate=migrate) table.uuid.requires = is_not_in_db(db, '%s.uuid' % tablename) table.event_time.requires = is_null_or(is_datetime()) table.expiry_time.requires = is_null_or(is_datetime()) table.url.requires = is_null_or(is_url()) add_metadata = tstr('Add Metadata') metadata_id = db.Table(None, 'metadata_id', field('metadata_id', db.doc_metadata, requires=is_null_or(is_one_of(db, 'doc_metadata.id', '%(id)s')), represent=lambda id: (id and [db(db.doc_metadata.id == id).select()[0].name] or [NONE])[0], label=t('Metadata'), comment=div(a(ADD_METADATA, _class='colorbox', _href=url(r=request, c='doc', f='metadata', args='create', vars=dict(format='popup')), _target='top', _title=ADD_METADATA), div(_class='tooltip', _title=ADD_METADATA + '|' + 'Add some metadata for the file, such as Soure, Sensitivity, Event Time.')), ondelete='RESTRICT'))
""" Model Config for YOLO v1 """ class YoloConfig: def __init__(self, in_channels=3, split_size=7, num_boxes=2, num_classes=20): # * Define the model aechitecture. # * Each conv layer is a tuple (kernel_size, out_ch, stride, padding.) # * each conv block is a list [(conv1_params), ... , (convN_params), num_repeats] # * "maxpool" --> MaxPool2d with stride 2 and size 2. self.architecture = [ (7, 64, 2, 3), "maxpool", (3, 192, 1, 1), "maxpool", (1, 128, 1, 0), (3, 256, 1, 1), (1, 256, 1, 1), (3, 512, 1, 1), "maxpool", [(1, 256, 1, 0), (3, 1024, 1, 1), 2], (1, 512, 1, 0), (3, 1024, 1, 1), "maxpool", [(1, 512, 1, 0), (3, 1024, 1, 1), 2], (3, 1023, 1, 1), (3, 1024, 2, 1), (3, 1024, 1, 1), (3, 1024, 1, 1) ] self.in_channels = in_channels self.split_size = split_size self.num_boxes = num_boxes self.num_classes = num_classes
""" Model Config for YOLO v1 """ class Yoloconfig: def __init__(self, in_channels=3, split_size=7, num_boxes=2, num_classes=20): self.architecture = [(7, 64, 2, 3), 'maxpool', (3, 192, 1, 1), 'maxpool', (1, 128, 1, 0), (3, 256, 1, 1), (1, 256, 1, 1), (3, 512, 1, 1), 'maxpool', [(1, 256, 1, 0), (3, 1024, 1, 1), 2], (1, 512, 1, 0), (3, 1024, 1, 1), 'maxpool', [(1, 512, 1, 0), (3, 1024, 1, 1), 2], (3, 1023, 1, 1), (3, 1024, 2, 1), (3, 1024, 1, 1), (3, 1024, 1, 1)] self.in_channels = in_channels self.split_size = split_size self.num_boxes = num_boxes self.num_classes = num_classes
available_parts = ["computer", "monitor", "keyboard", "mouse", "mouse mat", "hdmi cable"] current_choice = "-" computer_parts = [] # create an empty list while current_choice != "0": if current_choice in "12345": print("Adding {}".format(current_choice)) if current_choice == "1": computer_parts.append("computer") elif current_choice == "2": computer_parts.append("monitor") elif current_choice == "3": computer_parts.append("keyboard") elif current_choice == "4": computer_parts.append("mouse") elif current_choice == "5": computer_parts.append("mouse mat") elif current_choice == "6": computer_parts.append("hdmi cable") else: # print("Please add options from the lists below:") # print("1: computer") # print("2: monitor") # print("3: keyboard") # print("4: mouse") # print("5: mouse mat") # print("6: hdmi cable") # print("0: to finish") print("Please add options from the list below:") for part in available_parts: print("{0}: {1}".format(available_parts.index(part) + 1 , part)) current_choice = input() print(computer_parts)
available_parts = ['computer', 'monitor', 'keyboard', 'mouse', 'mouse mat', 'hdmi cable'] current_choice = '-' computer_parts = [] while current_choice != '0': if current_choice in '12345': print('Adding {}'.format(current_choice)) if current_choice == '1': computer_parts.append('computer') elif current_choice == '2': computer_parts.append('monitor') elif current_choice == '3': computer_parts.append('keyboard') elif current_choice == '4': computer_parts.append('mouse') elif current_choice == '5': computer_parts.append('mouse mat') elif current_choice == '6': computer_parts.append('hdmi cable') else: print('Please add options from the list below:') for part in available_parts: print('{0}: {1}'.format(available_parts.index(part) + 1, part)) current_choice = input() print(computer_parts)
class PrecisionConfig(object): def __init__(self): self.BASE = 10 self.PRECISION_INTEGRAL = 8 self.PRECISION_FRACTIONAL = 8 self.Q = 293973345475167247070445277780365744413 self.PRECISION = self.PRECISION_INTEGRAL + self.PRECISION_FRACTIONAL assert(self.Q > self.BASE**self.PRECISION) self.INVERSE = 104491423396290281423421247963055991507 # inverse of BASE**FRACTIONAL_PRECISION self.KAPPA = 6 # leave room for five digits overflow before leakage assert((self.INVERSE * self.BASE**self.PRECISION_FRACTIONAL) % self.Q == 1) assert(self.Q > self.BASE**(2 * self.PRECISION + self.KAPPA))
class Precisionconfig(object): def __init__(self): self.BASE = 10 self.PRECISION_INTEGRAL = 8 self.PRECISION_FRACTIONAL = 8 self.Q = 293973345475167247070445277780365744413 self.PRECISION = self.PRECISION_INTEGRAL + self.PRECISION_FRACTIONAL assert self.Q > self.BASE ** self.PRECISION self.INVERSE = 104491423396290281423421247963055991507 self.KAPPA = 6 assert self.INVERSE * self.BASE ** self.PRECISION_FRACTIONAL % self.Q == 1 assert self.Q > self.BASE ** (2 * self.PRECISION + self.KAPPA)
# -*- coding: utf-8 -*- # @Author: chandan # @Date: 2017-07-08 00:49:31 # @Last Modified by: chandan # @Last Modified time: 2017-07-08 10:24:41 DATA_DIR = '/home/chandan/Documents/datasets/uah' MODEL_DIR = '/home/chandan/Dropbox/gridlock/models' SCORE_COLUMNS = 2, 10, 11, 12, 13
data_dir = '/home/chandan/Documents/datasets/uah' model_dir = '/home/chandan/Dropbox/gridlock/models' score_columns = (2, 10, 11, 12, 13)
class Calc: def add(self, number1, number2): return float(number1) + float(number2) def sub(self, number1, number2): return float(number1) - float(number2) def mul(self, number1, number2): return float(number1) * float(number2) def div(self, number1, number2): return float(number1) / float(number2) def mod(self, number1, number2): return float(number1) % float(number2)
class Calc: def add(self, number1, number2): return float(number1) + float(number2) def sub(self, number1, number2): return float(number1) - float(number2) def mul(self, number1, number2): return float(number1) * float(number2) def div(self, number1, number2): return float(number1) / float(number2) def mod(self, number1, number2): return float(number1) % float(number2)
# Problem name: Transform the equation # What it basically does is transform infix to postfix # PASSED def priority(a): if a=='^': return 3 elif a=='*' or a=='/': return 2 elif a=='+' or a=='-': return 1 else: #signifies brackets return 0 t=int(input()) while t: stack=[] string=input() newstring='' for i in range(0,len(string)): if string[i]>='a' and string[i]<='z': newstring+=string[i] elif string[i]=='(': stack.append('(') elif string[i]==')': temp=stack.pop() while temp!='(': newstring+=temp temp=stack.pop() else: #operators if priority(string[i])>priority(stack[-1]): stack.append(string[i]) else: temp=stack.pop() while priority(string[i])<=priority(stack[-1]): newstring+=temp temp=stack.pop() stack.append(string[i]) while len(stack)!=0: newstring+=stack.pop() print(newstring) t=t-1
def priority(a): if a == '^': return 3 elif a == '*' or a == '/': return 2 elif a == '+' or a == '-': return 1 else: return 0 t = int(input()) while t: stack = [] string = input() newstring = '' for i in range(0, len(string)): if string[i] >= 'a' and string[i] <= 'z': newstring += string[i] elif string[i] == '(': stack.append('(') elif string[i] == ')': temp = stack.pop() while temp != '(': newstring += temp temp = stack.pop() elif priority(string[i]) > priority(stack[-1]): stack.append(string[i]) else: temp = stack.pop() while priority(string[i]) <= priority(stack[-1]): newstring += temp temp = stack.pop() stack.append(string[i]) while len(stack) != 0: newstring += stack.pop() print(newstring) t = t - 1
# class used to parse card request in CSV format # throws validation errors if fields have errors class LineParser: # parses a comma seperated string into each field # expected line format is # <merc ref>,<amount>,<card>,<expiry month>,<expiry year>,<first name>,<last name>,<email>,<postal code> def parse( self, line ): tokens = str(line).strip().split(',') data = { "merchantRefNum": tokens[0].strip(), "amount": int(tokens[1]), "settleWithAuth": "true", "card": { "cardNum": tokens[2], "cardExpiry": { "month": tokens[3], "year": tokens[4] } }, "billingDetails": { "zip": tokens[8] } } return data
class Lineparser: def parse(self, line): tokens = str(line).strip().split(',') data = {'merchantRefNum': tokens[0].strip(), 'amount': int(tokens[1]), 'settleWithAuth': 'true', 'card': {'cardNum': tokens[2], 'cardExpiry': {'month': tokens[3], 'year': tokens[4]}}, 'billingDetails': {'zip': tokens[8]}} return data
sea=input("enter season") if sea=='spring': print("its time to plant") elif sea=='summer': print("its time to water") elif sea=='winter': print("its time to stay in") else: print("its time to harvest")
sea = input('enter season') if sea == 'spring': print('its time to plant') elif sea == 'summer': print('its time to water') elif sea == 'winter': print('its time to stay in') else: print('its time to harvest')
def is_orphan_dataset(datasets, pvc): if not datasets: return False for d in datasets: if 'dataset-' + d == pvc: return False return True def is_orphan_group(groups, pvc): if not groups: return False for d in groups: if 'project-' + d.get('name') == pvc: return False return True def is_orphan_user(users, pvc): if not users: return False for d in users: if 'claim-' + d == pvc: return False return True
def is_orphan_dataset(datasets, pvc): if not datasets: return False for d in datasets: if 'dataset-' + d == pvc: return False return True def is_orphan_group(groups, pvc): if not groups: return False for d in groups: if 'project-' + d.get('name') == pvc: return False return True def is_orphan_user(users, pvc): if not users: return False for d in users: if 'claim-' + d == pvc: return False return True
class Solution: def numDecodings(self, s: str) -> int: def is_letter(idx): c1 = s[idx - 1] c2 = s[idx] if c1 == "1": return c2 >= "0" and c2 <= "9" elif c1 == "2": return c2 >= "0" and c2 <= "6" return False if not s: return 0 if len(s) == 1: if s[0] == "0": return 0 return 1 buffer = [] if s[0] == "0": return 0 buffer.append(1) if s[1] == "0": if not is_letter(1): return 0 buffer.append(1) elif is_letter(1): buffer.append(2) else: buffer.append(1) for i in range(2, len(s)): if s[i] == "0": if is_letter(i): buffer.append(buffer[-2]) continue return 0 if is_letter(i): buffer.append(buffer[-1] + buffer[-2]) else: buffer.append(buffer[-1]) return buffer[-1]
class Solution: def num_decodings(self, s: str) -> int: def is_letter(idx): c1 = s[idx - 1] c2 = s[idx] if c1 == '1': return c2 >= '0' and c2 <= '9' elif c1 == '2': return c2 >= '0' and c2 <= '6' return False if not s: return 0 if len(s) == 1: if s[0] == '0': return 0 return 1 buffer = [] if s[0] == '0': return 0 buffer.append(1) if s[1] == '0': if not is_letter(1): return 0 buffer.append(1) elif is_letter(1): buffer.append(2) else: buffer.append(1) for i in range(2, len(s)): if s[i] == '0': if is_letter(i): buffer.append(buffer[-2]) continue return 0 if is_letter(i): buffer.append(buffer[-1] + buffer[-2]) else: buffer.append(buffer[-1]) return buffer[-1]
var1 = "Hello World" for character in var1: if character == ' ': print("There was a space, oh no") break print(character) for character in var1: if character == ' ': print("There was a space, lets skip this iteration") continue print(character) for character in var1: if character == ' ': pass print("Passing this over") print(character)
var1 = 'Hello World' for character in var1: if character == ' ': print('There was a space, oh no') break print(character) for character in var1: if character == ' ': print('There was a space, lets skip this iteration') continue print(character) for character in var1: if character == ' ': pass print('Passing this over') print(character)
cuts = [] def find_max_value(rod_size, prices, total_value): if rod_size < 0: return 0 elif rod_size == 0: return total_value else: max_val = 0 for cut, value in enumerate(prices, 1): max_val = max(max_val, find_max_value(rod_size - cut, prices, total_value + value)) return max_val prices = [1, 5, 8, 9, 10, 17, 17, 20] find_max_value(8, prices, 0)
cuts = [] def find_max_value(rod_size, prices, total_value): if rod_size < 0: return 0 elif rod_size == 0: return total_value else: max_val = 0 for (cut, value) in enumerate(prices, 1): max_val = max(max_val, find_max_value(rod_size - cut, prices, total_value + value)) return max_val prices = [1, 5, 8, 9, 10, 17, 17, 20] find_max_value(8, prices, 0)
""" --- Day 15: Dueling Generators --- Here, you encounter a pair of dueling generators. The generators, called generator A and generator B, are trying to agree on a sequence of numbers. However, one of them is malfunctioning, and so the sequences don't always match. As they do this, a judge waits for each of them to generate its next value, compares the lowest 16 bits of both values, and keeps track of the number of times those parts of the values match. The generators both work on the same principle. To create its next value, a generator will take the previous value it produced, multiply it by a factor (generator A uses 16807; generator B uses 48271), and then keep the remainder of dividing that resulting product by 2147483647. That final remainder is the value it produces next. To calculate each generator's first value, it instead uses a specific starting value as its "previous value" (as listed in your puzzle input). For example, suppose that for starting values, generator A uses 65, while generator B uses 8921. Then, the first five pairs of generated values are: --Gen. A-- --Gen. B-- 1092455 430625591 1181022009 1233683848 245556042 1431495498 1744312007 137874439 1352636452 285222916 In binary, these pairs are (with generator A's value first in each pair): 00000000000100001010101101100111 00011001101010101101001100110111 01000110011001001111011100111001 01001001100010001000010110001000 00001110101000101110001101001010 01010101010100101110001101001010 01100111111110000001011011000111 00001000001101111100110000000111 01010000100111111001100000100100 00010001000000000010100000000100 Here, you can see that the lowest (here, rightmost) 16 bits of the third value match: 1110001101001010. Because of this one match, after processing these five pairs, the judge would have added only 1 to its total. To get a significant sample, the judge would like to consider 40 million pairs. (In the example above, the judge would eventually find a total of 588 pairs that match in their lowest 16 bits.) After 40 million pairs, what is the judge's final count? Your puzzle answer was 573. --- Part Two --- In the interest of trying to align a little better, the generators get more picky about the numbers they actually give to the judge. They still generate values in the same way, but now they only hand a value to the judge when it meets their criteria: Generator A looks for values that are multiples of 4. Generator B looks for values that are multiples of 8. Each generator functions completely independently: they both go through values entirely on their own, only occasionally handing an acceptable value to the judge, and otherwise working through the same sequence of values as before until they find one. The judge still waits for each generator to provide it with a value before comparing them (using the same comparison method as before). It keeps track of the order it receives values; the first values from each generator are compared, then the second values from each generator, then the third values, and so on. Using the example starting values given above, the generators now produce the following first five values each: --Gen. A-- --Gen. B-- 1352636452 1233683848 1992081072 862516352 530830436 1159784568 1980017072 1616057672 740335192 412269392 These values have the following corresponding binary values: 01010000100111111001100000100100 01001001100010001000010110001000 01110110101111001011111010110000 00110011011010001111010010000000 00011111101000111101010001100100 01000101001000001110100001111000 01110110000001001010100110110000 01100000010100110001010101001000 00101100001000001001111001011000 00011000100100101011101101010000 Unfortunately, even though this change makes more bits similar on average, none of these values' lowest 16 bits match. Now, it's not until the 1056th pair that the judge finds the first match: --Gen. A-- --Gen. B-- 1023762912 896885216 00111101000001010110000111100000 00110101011101010110000111100000 This change makes the generators much slower, and the judge is getting impatient; it is now only willing to consider 5 million pairs. (Using the values from the example above, after five million pairs, the judge would eventually find a total of 309 pairs that match in their lowest 16 bits.) After 5 million pairs, but using this new generator logic, what is the judge's final count? Your puzzle answer was 294. Both parts of this puzzle are complete! They provide two gold stars: ** """ generator_a_factor = 16807 generator_b_factor = 48271 def _generator(factor, starting_value, predicate=lambda v: True): previous_value = starting_value while True: previous_value = (previous_value * factor) % 2147483647 if predicate(previous_value): yield previous_value def picky_generator_a(starting_value): return _generator( generator_a_factor, starting_value, predicate=lambda v: v % 4 == 0) def picky_generator_b(starting_value): return _generator( generator_b_factor, starting_value, predicate=lambda v: v % 8 == 0) def generator_a(starting_value): return _generator(generator_a_factor, starting_value) def generator_b(starting_value): return _generator(generator_b_factor, starting_value) def is_lower_16_bits_equal(a, b): return bin(a)[-16:] == bin(b)[-16:] def judge(number_of_pairs, generators): gen_a, gen_b = generators sum = 0 for i in range(number_of_pairs): val_a = next(gen_a) val_b = next(gen_b) if is_lower_16_bits_equal(val_a, val_b): sum += 1 return sum if __name__ == '__main__': gen_a_starting_value = 634 gen_b_starting_value = 301 gen_a = generator_a(gen_a_starting_value) gen_b = generator_b(gen_b_starting_value) picky_gen_a = picky_generator_a(gen_a_starting_value) picky_gen_b = picky_generator_b(gen_b_starting_value) print(f"part1: {judge(40_000_000, (gen_a, gen_b))}") print(f"part2: {judge(5_000_000, (picky_gen_a, picky_gen_b))}")
""" --- Day 15: Dueling Generators --- Here, you encounter a pair of dueling generators. The generators, called generator A and generator B, are trying to agree on a sequence of numbers. However, one of them is malfunctioning, and so the sequences don't always match. As they do this, a judge waits for each of them to generate its next value, compares the lowest 16 bits of both values, and keeps track of the number of times those parts of the values match. The generators both work on the same principle. To create its next value, a generator will take the previous value it produced, multiply it by a factor (generator A uses 16807; generator B uses 48271), and then keep the remainder of dividing that resulting product by 2147483647. That final remainder is the value it produces next. To calculate each generator's first value, it instead uses a specific starting value as its "previous value" (as listed in your puzzle input). For example, suppose that for starting values, generator A uses 65, while generator B uses 8921. Then, the first five pairs of generated values are: --Gen. A-- --Gen. B-- 1092455 430625591 1181022009 1233683848 245556042 1431495498 1744312007 137874439 1352636452 285222916 In binary, these pairs are (with generator A's value first in each pair): 00000000000100001010101101100111 00011001101010101101001100110111 01000110011001001111011100111001 01001001100010001000010110001000 00001110101000101110001101001010 01010101010100101110001101001010 01100111111110000001011011000111 00001000001101111100110000000111 01010000100111111001100000100100 00010001000000000010100000000100 Here, you can see that the lowest (here, rightmost) 16 bits of the third value match: 1110001101001010. Because of this one match, after processing these five pairs, the judge would have added only 1 to its total. To get a significant sample, the judge would like to consider 40 million pairs. (In the example above, the judge would eventually find a total of 588 pairs that match in their lowest 16 bits.) After 40 million pairs, what is the judge's final count? Your puzzle answer was 573. --- Part Two --- In the interest of trying to align a little better, the generators get more picky about the numbers they actually give to the judge. They still generate values in the same way, but now they only hand a value to the judge when it meets their criteria: Generator A looks for values that are multiples of 4. Generator B looks for values that are multiples of 8. Each generator functions completely independently: they both go through values entirely on their own, only occasionally handing an acceptable value to the judge, and otherwise working through the same sequence of values as before until they find one. The judge still waits for each generator to provide it with a value before comparing them (using the same comparison method as before). It keeps track of the order it receives values; the first values from each generator are compared, then the second values from each generator, then the third values, and so on. Using the example starting values given above, the generators now produce the following first five values each: --Gen. A-- --Gen. B-- 1352636452 1233683848 1992081072 862516352 530830436 1159784568 1980017072 1616057672 740335192 412269392 These values have the following corresponding binary values: 01010000100111111001100000100100 01001001100010001000010110001000 01110110101111001011111010110000 00110011011010001111010010000000 00011111101000111101010001100100 01000101001000001110100001111000 01110110000001001010100110110000 01100000010100110001010101001000 00101100001000001001111001011000 00011000100100101011101101010000 Unfortunately, even though this change makes more bits similar on average, none of these values' lowest 16 bits match. Now, it's not until the 1056th pair that the judge finds the first match: --Gen. A-- --Gen. B-- 1023762912 896885216 00111101000001010110000111100000 00110101011101010110000111100000 This change makes the generators much slower, and the judge is getting impatient; it is now only willing to consider 5 million pairs. (Using the values from the example above, after five million pairs, the judge would eventually find a total of 309 pairs that match in their lowest 16 bits.) After 5 million pairs, but using this new generator logic, what is the judge's final count? Your puzzle answer was 294. Both parts of this puzzle are complete! They provide two gold stars: ** """ generator_a_factor = 16807 generator_b_factor = 48271 def _generator(factor, starting_value, predicate=lambda v: True): previous_value = starting_value while True: previous_value = previous_value * factor % 2147483647 if predicate(previous_value): yield previous_value def picky_generator_a(starting_value): return _generator(generator_a_factor, starting_value, predicate=lambda v: v % 4 == 0) def picky_generator_b(starting_value): return _generator(generator_b_factor, starting_value, predicate=lambda v: v % 8 == 0) def generator_a(starting_value): return _generator(generator_a_factor, starting_value) def generator_b(starting_value): return _generator(generator_b_factor, starting_value) def is_lower_16_bits_equal(a, b): return bin(a)[-16:] == bin(b)[-16:] def judge(number_of_pairs, generators): (gen_a, gen_b) = generators sum = 0 for i in range(number_of_pairs): val_a = next(gen_a) val_b = next(gen_b) if is_lower_16_bits_equal(val_a, val_b): sum += 1 return sum if __name__ == '__main__': gen_a_starting_value = 634 gen_b_starting_value = 301 gen_a = generator_a(gen_a_starting_value) gen_b = generator_b(gen_b_starting_value) picky_gen_a = picky_generator_a(gen_a_starting_value) picky_gen_b = picky_generator_b(gen_b_starting_value) print(f'part1: {judge(40000000, (gen_a, gen_b))}') print(f'part2: {judge(5000000, (picky_gen_a, picky_gen_b))}')
class quickFind: _id = [] _count = 0 def __init__(self, N): self._id = list(range(0,N)) self._count = N #Quick-find def find(self, p): return self._id[p] def union(self,p, q): self._pID = self.find(p) self._qID = self.find(q) if (self._pID == self._qID): return None for i in self._id: if (self._id[i] == self._qID): self._id[i] = self._pID self._count = self._count-1 def connected(self, p, q): return self.find(p) == self.find(q) def count(self): return self._count class quickUnion: _id = [] _count = 0 def __init__(self, N): self._id = list(range(0,N)) self._count = N #Quick-union def find(self, p): while (self._id[p] != p): p = self._id[p] return p def union(self, p, q): self._rootP = self.find(p) self._rootQ = self.find(q) if (self._rootP == self._rootQ): return None self._id[self._rootQ] = self._rootP self._count = self._count-1 def connected(self, p, q): return self.find(p) == self.find(q) def count(self): return self._count class weightedQuickUnion: _id = [] _sz = [] _count = 0 def __init__(self, N): self._id = list(range(0,N)) self._count = N self._sz = [1]*N print(self._sz) #Quick-union def find(self, p): while (self._id[p] != p): p = self._id[p] return p def union(self, p, q): self._rootP = self.find(p) self._rootQ = self.find(q) if (self._rootP == self._rootQ): return None if (self._sz[self._rootP]<self._sz[self._rootQ]): self._id[self._rootP] = self._rootQ self._sz[self._rootQ] += self._sz[self._rootP] else: self._id[self._rootQ] = self._rootP self._sz[self._rootP] += self._sz[self._rootQ] self._count = self._count-1 def connected(self, p, q): return self.find(p) == self.find(q) def count(self): return self._count
class Quickfind: _id = [] _count = 0 def __init__(self, N): self._id = list(range(0, N)) self._count = N def find(self, p): return self._id[p] def union(self, p, q): self._pID = self.find(p) self._qID = self.find(q) if self._pID == self._qID: return None for i in self._id: if self._id[i] == self._qID: self._id[i] = self._pID self._count = self._count - 1 def connected(self, p, q): return self.find(p) == self.find(q) def count(self): return self._count class Quickunion: _id = [] _count = 0 def __init__(self, N): self._id = list(range(0, N)) self._count = N def find(self, p): while self._id[p] != p: p = self._id[p] return p def union(self, p, q): self._rootP = self.find(p) self._rootQ = self.find(q) if self._rootP == self._rootQ: return None self._id[self._rootQ] = self._rootP self._count = self._count - 1 def connected(self, p, q): return self.find(p) == self.find(q) def count(self): return self._count class Weightedquickunion: _id = [] _sz = [] _count = 0 def __init__(self, N): self._id = list(range(0, N)) self._count = N self._sz = [1] * N print(self._sz) def find(self, p): while self._id[p] != p: p = self._id[p] return p def union(self, p, q): self._rootP = self.find(p) self._rootQ = self.find(q) if self._rootP == self._rootQ: return None if self._sz[self._rootP] < self._sz[self._rootQ]: self._id[self._rootP] = self._rootQ self._sz[self._rootQ] += self._sz[self._rootP] else: self._id[self._rootQ] = self._rootP self._sz[self._rootP] += self._sz[self._rootQ] self._count = self._count - 1 def connected(self, p, q): return self.find(p) == self.find(q) def count(self): return self._count
def number(g): if not g or len(g) < 4 or any(len(r) != len(g[0]) for r in g): raise ValueError('Ill-formed grid') if g == [" _ ", "| |", "|_|", " "]: return '0' elif g == [" ", " |", " |", " "]: return '1' else: return '?' def grid(n): if n == '0': return [" _ ", "| |", "|_|", " "] elif n == '1': return [" ", " |", " |", " "] raise ValueError('Unknown digit')
def number(g): if not g or len(g) < 4 or any((len(r) != len(g[0]) for r in g)): raise value_error('Ill-formed grid') if g == [' _ ', '| |', '|_|', ' ']: return '0' elif g == [' ', ' |', ' |', ' ']: return '1' else: return '?' def grid(n): if n == '0': return [' _ ', '| |', '|_|', ' '] elif n == '1': return [' ', ' |', ' |', ' '] raise value_error('Unknown digit')
# py-data-structures <http://github.com/gwtw/py-data-structures> # Copyright 2016 Daniel Imms <http://www.growingwiththeweb.com> # Released under the MIT license <http://github.com/gwtw/py-data-structures/blob/master/LICENSE> def default_compare(a, b): if a < b: return -1 elif a > b: return 1 return 0
def default_compare(a, b): if a < b: return -1 elif a > b: return 1 return 0
# -*- coding: utf-8 -*- """ Created on Tue Apr 28 16:56:36 2020 @author: kshit """ ''' Pythagorean Triplets can be generated by the formula. Given any positive integers m and n where m > n > 0, the integers a = m^2 - n^2 b = 2*m*n c = m^2 + n^2 ''' def dicksinson_pythagorus(): for m in range(1,32): for n in range(1,m): a = m**2 - n**2 b = 2 * m * n c = m**2 + n**2 if a + b + c == 1000: return a*b*c if __name__ == '__main__': print(dicksinson_pythagorus())
""" Created on Tue Apr 28 16:56:36 2020 @author: kshit """ '\nPythagorean Triplets can be generated by the formula.\n\nGiven any positive integers m and n where m > n > 0, the integers\na = m^2 - n^2\nb = 2*m*n\nc = m^2 + n^2\n' def dicksinson_pythagorus(): for m in range(1, 32): for n in range(1, m): a = m ** 2 - n ** 2 b = 2 * m * n c = m ** 2 + n ** 2 if a + b + c == 1000: return a * b * c if __name__ == '__main__': print(dicksinson_pythagorus())
#DICTONARIES FOR VARIOUS POSSIBILITIES ones = { 0 : 'zero ', 1 : 'one ', 2 : 'two ', 3 : 'three ', 4 : 'four ', 5 : 'five ', 6 : 'six ', 7 : 'seven ', 8 : 'eight ', 9 : 'nine ' } prefix = { 2 : 'twen', # for -ty and -teen 3 : 'thir', 4 : 'four', 5 : 'fif', 6 : 'six', 7 : 'seven', 8 : 'eigh', 9 : 'nin' } suffix = { 1 : 'thousand ', 2 : 'lakh ', 3 : 'crore ' } #FOR SINGLE NUMBERS def once(num): return ones[num] #FOR PAIR OF NUMBERS def twice(n_10,n_1): if n_10 == 0 and n_1 == 0: return "" elif n_10 == 0: return once(n_1) elif n_10 == 1 and n_1 == 0: return "ten " elif n_10 == 1 and n_1 == 1: return "eleven " elif n_10 == 1 and n_1 == 2: return "twelve " elif n_10 == 1: return prefix[n_1]+"teen " elif n_10 == 2 and n_1 == 0: return prefix[n_10]+"ty " elif n_1 == 0: return prefix[n_10]+"ty " else: return prefix[n_10] + "ty " + once(n_1) #IN ORDER TO MAKE A UNIFORM SYSTEM OF CONVERSION def convert_to_odds(num): numbers = ["0"] res = "" for digits in num: numbers.append(digits) for i in range(len(num)+1): res = res + numbers[i] return res #MAIN PROGRAM while True: try: number = input('Enter number : ') if number == 'exit': break number = str(int(number)) answer = "" l = len(number) if(l > 3): if(l%2 == 0): number = convert_to_odds(number) l = len(number) n = (len(number)-3)//2 for f in range(0,(len(number)-3)//2): if int(number[2*f]) != 0 or int(number[2*f+1]) !=0: answer = answer + twice(int(number[2*f]),int(number[2*f+1])) + suffix[n] n-=1 if l-3 < 0 or number[l-3] == 0: answer = answer + "" elif l != 1: if int(number[l-3]) != 0: answer = answer + once(int(number[l-3])) + "hundered " if(l == 1): number = convert_to_odds(number) l = len(number) answer= answer + twice(int(number[l-2]),int(number[l-1])) print (answer) except ValueError: print("Type only Natural numbers without spaces") except KeyError: print("Excced limits!") except IndexError: print("Type Something!")
ones = {0: 'zero ', 1: 'one ', 2: 'two ', 3: 'three ', 4: 'four ', 5: 'five ', 6: 'six ', 7: 'seven ', 8: 'eight ', 9: 'nine '} prefix = {2: 'twen', 3: 'thir', 4: 'four', 5: 'fif', 6: 'six', 7: 'seven', 8: 'eigh', 9: 'nin'} suffix = {1: 'thousand ', 2: 'lakh ', 3: 'crore '} def once(num): return ones[num] def twice(n_10, n_1): if n_10 == 0 and n_1 == 0: return '' elif n_10 == 0: return once(n_1) elif n_10 == 1 and n_1 == 0: return 'ten ' elif n_10 == 1 and n_1 == 1: return 'eleven ' elif n_10 == 1 and n_1 == 2: return 'twelve ' elif n_10 == 1: return prefix[n_1] + 'teen ' elif n_10 == 2 and n_1 == 0: return prefix[n_10] + 'ty ' elif n_1 == 0: return prefix[n_10] + 'ty ' else: return prefix[n_10] + 'ty ' + once(n_1) def convert_to_odds(num): numbers = ['0'] res = '' for digits in num: numbers.append(digits) for i in range(len(num) + 1): res = res + numbers[i] return res while True: try: number = input('Enter number : ') if number == 'exit': break number = str(int(number)) answer = '' l = len(number) if l > 3: if l % 2 == 0: number = convert_to_odds(number) l = len(number) n = (len(number) - 3) // 2 for f in range(0, (len(number) - 3) // 2): if int(number[2 * f]) != 0 or int(number[2 * f + 1]) != 0: answer = answer + twice(int(number[2 * f]), int(number[2 * f + 1])) + suffix[n] n -= 1 if l - 3 < 0 or number[l - 3] == 0: answer = answer + '' elif l != 1: if int(number[l - 3]) != 0: answer = answer + once(int(number[l - 3])) + 'hundered ' if l == 1: number = convert_to_odds(number) l = len(number) answer = answer + twice(int(number[l - 2]), int(number[l - 1])) print(answer) except ValueError: print('Type only Natural numbers without spaces') except KeyError: print('Excced limits!') except IndexError: print('Type Something!')
class Solution: def twoSum(self, numbers, target): """ :type numbers: List[int] :type target: int :rtype: List[int] """ if not numbers or len(numbers) == 0: return [] i, j = 0, len(numbers) - 1 while i < j: if numbers[i] + numbers[j] == target: return [i+1, j+1] elif numbers[i] + numbers[j] > target: j -= 1 else: i += 1 return [] solution = Solution() print(solution.twoSum([2,7,11,15], 9))
class Solution: def two_sum(self, numbers, target): """ :type numbers: List[int] :type target: int :rtype: List[int] """ if not numbers or len(numbers) == 0: return [] (i, j) = (0, len(numbers) - 1) while i < j: if numbers[i] + numbers[j] == target: return [i + 1, j + 1] elif numbers[i] + numbers[j] > target: j -= 1 else: i += 1 return [] solution = solution() print(solution.twoSum([2, 7, 11, 15], 9))
m = [] with open('input', 'r') as f: for line in f: m.append(list(line.rstrip())) rows = len(m) cols = len(m[0]) def go(r, d): num_trees = 0 i, j = 0, 0 while i < rows: if m[i][j % cols] == '#': num_trees += 1 i += d j += r return num_trees total = 1 for r, d in [(1,1),(3,1),(5,1),(7,1),(1,2)]: result = go(r, d) total = total * result print(total) """ Right 1, down 1. Right 3, down 1. (This is the slope you already checked.) Right 5, down 1. Right 7, down 1. Right 1, down 2. """
m = [] with open('input', 'r') as f: for line in f: m.append(list(line.rstrip())) rows = len(m) cols = len(m[0]) def go(r, d): num_trees = 0 (i, j) = (0, 0) while i < rows: if m[i][j % cols] == '#': num_trees += 1 i += d j += r return num_trees total = 1 for (r, d) in [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]: result = go(r, d) total = total * result print(total) '\n\n Right 1, down 1.\n Right 3, down 1. (This is the slope you already checked.)\n Right 5, down 1.\n Right 7, down 1.\n Right 1, down 2.\n'
def assert_is_in_range(x, constraint): minimum = constraint['min'] maximum = constraint['max'] assert minimum <= x , f'min:{minimum}, got:{x}' assert x <= maximum, f'max:{maximum}, got:{x}'
def assert_is_in_range(x, constraint): minimum = constraint['min'] maximum = constraint['max'] assert minimum <= x, f'min:{minimum}, got:{x}' assert x <= maximum, f'max:{maximum}, got:{x}'
# -*- coding: utf-8 -*- # noinspection PyClassHasNoInit class PlayerStates: AVAILABLE = 0x0 AFK = 0x1 PLAYING = 0x2 QUIT = 0xff # my own, not GGPO server's @staticmethod def codeToString(code): if code == 0: return 'AVAILABLE' elif code == 1: return 'AFK' elif code == 2: return 'PLAYING' elif code == 0xff: return 'QUIT' else: return 'Unknown (' + hex(code) + ')'
class Playerstates: available = 0 afk = 1 playing = 2 quit = 255 @staticmethod def code_to_string(code): if code == 0: return 'AVAILABLE' elif code == 1: return 'AFK' elif code == 2: return 'PLAYING' elif code == 255: return 'QUIT' else: return 'Unknown (' + hex(code) + ')'
""" Mostly constants related to consensus, or p2p connection that are not suitable to be in config """ # blocks bearing a timestamp that is slightly larger than current epoch will be broadcasted ALLOWED_FUTURE_BLOCKS_TIME_BROADCAST = 15 # blocks bearing a timestamp that is slightly larger than current epoch will be considered valid ALLOWED_FUTURE_BLOCKS_TIME_VALIDATION = 15 # Current minor block size is up to 6M gas / 4 (zero-byte gas) = 1.5M # Per-command size is now 128M so 128M / 1.5M = 85 MINOR_BLOCK_BATCH_SIZE = 50 MINOR_BLOCK_HEADER_LIST_LIMIT = 100 # max number of transactions from NEW_TRANSACTION_LIST command NEW_TRANSACTION_LIST_LIMIT = 1000 ROOT_BLOCK_BATCH_SIZE = 100 ROOT_BLOCK_HEADER_LIST_LIMIT = 500 SYNC_TIMEOUT = 30 BLOCK_UNCOMMITTED = 0 # The other slaves and the master may not have the block info BLOCK_COMMITTING = 1 # The block info is propagating to other slaves and the master BLOCK_COMMITTED = 2 # The other slaves and the master have received the block info
""" Mostly constants related to consensus, or p2p connection that are not suitable to be in config """ allowed_future_blocks_time_broadcast = 15 allowed_future_blocks_time_validation = 15 minor_block_batch_size = 50 minor_block_header_list_limit = 100 new_transaction_list_limit = 1000 root_block_batch_size = 100 root_block_header_list_limit = 500 sync_timeout = 30 block_uncommitted = 0 block_committing = 1 block_committed = 2
# Created by MechAviv # Intense Damage Skin | (2436645) if sm.addDamageSkin(2436645): sm.chat("'Intense Damage Skin' Damage Skin has been added to your account's damage skin collection.") sm.consumeItem()
if sm.addDamageSkin(2436645): sm.chat("'Intense Damage Skin' Damage Skin has been added to your account's damage skin collection.") sm.consumeItem()
# Ray-autopilot def autopilot_on_ray(): """ Speed up the dataset collection by running simulation and autopilot in Ray. 1. Port oatomobile on Carla latest (0.9.12+) version and Python 3.7. 2. Solve the problem of Ray supporting ABC. 3. Use Ray actor and task to make autopilot simulation parrallel and distributive. """ pass
def autopilot_on_ray(): """ Speed up the dataset collection by running simulation and autopilot in Ray. 1. Port oatomobile on Carla latest (0.9.12+) version and Python 3.7. 2. Solve the problem of Ray supporting ABC. 3. Use Ray actor and task to make autopilot simulation parrallel and distributive. """ pass
# Asking name name = input("What's your name? ") # Say Hello print(f"Hello, {name}!") print("I'm Moqi. Nice to meet you.")
name = input("What's your name? ") print(f'Hello, {name}!') print("I'm Moqi. Nice to meet you.")
def test_remote_channel(): pass
def test_remote_channel(): pass
class BinaryTreeNode: def __init__(self, value=None, left=None, right=None): self.value = value self.left = left self.right = right def __lt__(self, other): if other == None: return self return self.value < other.value def __str__(self): return str(self.value) def __repr__(self): return self.__str__()
class Binarytreenode: def __init__(self, value=None, left=None, right=None): self.value = value self.left = left self.right = right def __lt__(self, other): if other == None: return self return self.value < other.value def __str__(self): return str(self.value) def __repr__(self): return self.__str__()
class Solution: def compress(self, chars: List[str]) -> int: truncate=0 p=0 q=0 count=0 while p< len(chars): while q < len(chars): if chars[p] == chars[q]: count=count+1 else: break q+=1 if count>1: chars[truncate]=chars[p] truncate+=1 i=0; while i < len(str(count)): chars[truncate]=(str(count))[i] truncate+=1 i+=1 else: chars[truncate]=chars[p] truncate+=1 p=q count=0 return truncate;
class Solution: def compress(self, chars: List[str]) -> int: truncate = 0 p = 0 q = 0 count = 0 while p < len(chars): while q < len(chars): if chars[p] == chars[q]: count = count + 1 else: break q += 1 if count > 1: chars[truncate] = chars[p] truncate += 1 i = 0 while i < len(str(count)): chars[truncate] = str(count)[i] truncate += 1 i += 1 else: chars[truncate] = chars[p] truncate += 1 p = q count = 0 return truncate
projects = dict() employees = dict() tasks = dict()
projects = dict() employees = dict() tasks = dict()
''' Problem Statement : Turbo Sort Link : https://www.codechef.com/problems/TSORT score : accepted ''' numbers = [] for _ in range(int(input())): numbers.append(int(input())) numbers.sort() print(*numbers,sep='\n')
""" Problem Statement : Turbo Sort Link : https://www.codechef.com/problems/TSORT score : accepted """ numbers = [] for _ in range(int(input())): numbers.append(int(input())) numbers.sort() print(*numbers, sep='\n')
{ "targets": [ { "target_name": "ogg", "type": "static_library", "include_dirs": [ "1.3.2/libogg-1.3.2/include" ], "sources": [ "1.3.2/libogg-1.3.2/src/*.c" ], "direct_dependent_settings": { "include_dirs": [ "1.3.2/libogg-1.3.2/include" ] } } ] }
{'targets': [{'target_name': 'ogg', 'type': 'static_library', 'include_dirs': ['1.3.2/libogg-1.3.2/include'], 'sources': ['1.3.2/libogg-1.3.2/src/*.c'], 'direct_dependent_settings': {'include_dirs': ['1.3.2/libogg-1.3.2/include']}}]}
# Reference: # https://www.tellusxdp.com/ja/api-reference/ class APIException(Exception): """General unexpected response.""" pass class BadRequest(APIException): """Invalid request parameter, HTTP 400.""" pass class Unauthorized(APIException): """Authentication failed, HTTP 401.""" pass class Forbidden(APIException): """You do not have access to the resource, HTTP 403.""" pass class NotFound(APIException): """The resource does not exist, HTTP 404.""" pass class MethodNotAllowed(APIException): """Requested methods are not supported, HTTP 405.""" pass class RequestTimeout(APIException): """The request timed out, HTTP 408.""" pass class LengthRequired(APIException): """Length is not included in the request header, HTTP 411.""" pass class RequestEntityTooLarge(APIException): """Request entity too large, HTTP 413.""" pass class InternalServerError(APIException): """An internal error has occurred, HTTP 500.""" pass class ServiceUnavailable(APIException): """Unable to use the service for some reason, HTTP 503.""" pass
class Apiexception(Exception): """General unexpected response.""" pass class Badrequest(APIException): """Invalid request parameter, HTTP 400.""" pass class Unauthorized(APIException): """Authentication failed, HTTP 401.""" pass class Forbidden(APIException): """You do not have access to the resource, HTTP 403.""" pass class Notfound(APIException): """The resource does not exist, HTTP 404.""" pass class Methodnotallowed(APIException): """Requested methods are not supported, HTTP 405.""" pass class Requesttimeout(APIException): """The request timed out, HTTP 408.""" pass class Lengthrequired(APIException): """Length is not included in the request header, HTTP 411.""" pass class Requestentitytoolarge(APIException): """Request entity too large, HTTP 413.""" pass class Internalservererror(APIException): """An internal error has occurred, HTTP 500.""" pass class Serviceunavailable(APIException): """Unable to use the service for some reason, HTTP 503.""" pass
class Settings(object): def __init__(self): self.show_status_messages = True self.show_view_status = True self.auto_show_diagnostics_panel = True self.show_diagnostics_phantoms = False self.show_diagnostics_count_in_view_status = False self.show_diagnostics_in_view_status = True self.show_diagnostics_severity_level = 3 self.only_show_lsp_completions = False self.diagnostics_highlight_style = "underline" self.highlight_active_signature_parameter = True self.document_highlight_style = "stippled" self.document_highlight_scopes = { "unknown": "text", "text": "text", "read": "markup.inserted", "write": "markup.changed" } self.diagnostics_gutter_marker = "dot" self.complete_all_chars = False self.completion_hint_type = "auto" self.resolve_completion_for_snippets = False self.log_debug = True self.log_server = True self.log_stderr = False self.log_payloads = False
class Settings(object): def __init__(self): self.show_status_messages = True self.show_view_status = True self.auto_show_diagnostics_panel = True self.show_diagnostics_phantoms = False self.show_diagnostics_count_in_view_status = False self.show_diagnostics_in_view_status = True self.show_diagnostics_severity_level = 3 self.only_show_lsp_completions = False self.diagnostics_highlight_style = 'underline' self.highlight_active_signature_parameter = True self.document_highlight_style = 'stippled' self.document_highlight_scopes = {'unknown': 'text', 'text': 'text', 'read': 'markup.inserted', 'write': 'markup.changed'} self.diagnostics_gutter_marker = 'dot' self.complete_all_chars = False self.completion_hint_type = 'auto' self.resolve_completion_for_snippets = False self.log_debug = True self.log_server = True self.log_stderr = False self.log_payloads = False
class LinkedList: def __init__(self): self.head = None self.tail = None self.length = 0 def find(self, item): if self.head is None: return None return self.head.find(item) def find_by(self, fn): if self.head is None: return None return self.head.find_by(fn) def append(self, item): if self.head is None: l = LinkedListNode(item) self.head = l self.tail = l else: self.tail.append(item) self.tail = self.tail.next self.length += 1 def remove(self, item): node = self.find(item) if node is None: return None if node == self.head: if self.head == self.tail: self.tail = self.head.next self.head = self.head.next else: predecessor = self.head.find_predecessor(node) if node == self.tail: self.tail = predecessor predecessor.next = node.next self.length -= 1 return item class LinkedListNode: def __init__(self, item): self.value = item self.next = None def find(self, item): if self == None: return None if self.value == item: return self if self.next is None: return None return self.next.find(item) def find_by(self, fn): if self == None: return None if fn(self.value): return self if self.next is None: return None return self.next.find(fn) def find_predecessor(self, node): if self.next is None: return None if self.next == node: return self return self.next.find_predecessor(node) def append(self, item): self.next = LinkedListNode(item)
class Linkedlist: def __init__(self): self.head = None self.tail = None self.length = 0 def find(self, item): if self.head is None: return None return self.head.find(item) def find_by(self, fn): if self.head is None: return None return self.head.find_by(fn) def append(self, item): if self.head is None: l = linked_list_node(item) self.head = l self.tail = l else: self.tail.append(item) self.tail = self.tail.next self.length += 1 def remove(self, item): node = self.find(item) if node is None: return None if node == self.head: if self.head == self.tail: self.tail = self.head.next self.head = self.head.next else: predecessor = self.head.find_predecessor(node) if node == self.tail: self.tail = predecessor predecessor.next = node.next self.length -= 1 return item class Linkedlistnode: def __init__(self, item): self.value = item self.next = None def find(self, item): if self == None: return None if self.value == item: return self if self.next is None: return None return self.next.find(item) def find_by(self, fn): if self == None: return None if fn(self.value): return self if self.next is None: return None return self.next.find(fn) def find_predecessor(self, node): if self.next is None: return None if self.next == node: return self return self.next.find_predecessor(node) def append(self, item): self.next = linked_list_node(item)
arr = [11, 22, 33, 44, 55] print("Array is :",arr) res = arr[::-1] print("New array:",res) arr.reverse() print("After reversing Array using method reverse():",arr)
arr = [11, 22, 33, 44, 55] print('Array is :', arr) res = arr[::-1] print('New array:', res) arr.reverse() print('After reversing Array using method reverse():', arr)
CLEAN_BOARD = [ ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], # ------------------------------------------------- ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], # ------------------------------------------------- ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'] ] EASY_BOARD = [ ['0', '0', '0'] + ['5', '0', '3'] + ['4', '6', '1'], ['0', '7', '3'] + ['0', '0', '0'] + ['0', '0', '0'], ['1', '0', '0'] + ['2', '8', '0'] + ['0', '0', '0'], # ------------------------------------------------- ['3', '0', '0'] + ['4', '0', '0'] + ['0', '1', '0'], ['0', '9', '4'] + ['3', '0', '1'] + ['5', '7', '0'], ['0', '1', '0'] + ['0', '0', '7'] + ['0', '0', '8'], # ------------------------------------------------- ['0', '0', '0'] + ['0', '3', '2'] + ['0', '0', '5'], ['0', '0', '0'] + ['0', '0', '0'] + ['7', '2', '0'], ['5', '2', '6'] + ['7', '0', '9'] + ['0', '0', '0'] ] MEDIUM_BOARD = [ ['0', '0', '0'] + ['7', '0', '0'] + ['2', '9', '4'], ['0', '0', '0'] + ['8', '2', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '4'] + ['3', '6', '0'], # ------------------------------------------------- ['0', '0', '6'] + ['0', '0', '0'] + ['0', '7', '9'], ['4', '0', '0'] + ['0', '8', '0'] + ['0', '0', '6'], ['2', '1', '0'] + ['0', '0', '0'] + ['4', '0', '0'], # ------------------------------------------------- ['0', '6', '1'] + ['4', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '6', '2'] + ['0', '0', '0'], ['9', '4', '2'] + ['0', '0', '5'] + ['0', '0', '0'] ] HARD_BOARD = [ ['0', '4', '0'] + ['0', '0', '2'] + ['0', '0', '0'], ['0', '0', '8'] + ['4', '7', '0'] + ['0', '0', '5'], ['0', '0', '0'] + ['0', '0', '6'] + ['0', '7', '0'], # ------------------------------------------------- ['0', '6', '0'] + ['9', '1', '0'] + ['0', '5', '0'], ['3', '0', '0'] + ['0', '0', '0'] + ['0', '0', '8'], ['0', '0', '0'] + ['0', '0', '7'] + ['0', '0', '0'], # ------------------------------------------------- ['0', '9', '0'] + ['5', '4', '0'] + ['0', '1', '0'], ['0', '0', '6'] + ['0', '0', '0'] + ['9', '0', '0'], ['0', '0', '0'] + ['2', '0', '0'] + ['0', '0', '0'] ]
clean_board = [['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '0'] + ['0', '0', '0']] easy_board = [['0', '0', '0'] + ['5', '0', '3'] + ['4', '6', '1'], ['0', '7', '3'] + ['0', '0', '0'] + ['0', '0', '0'], ['1', '0', '0'] + ['2', '8', '0'] + ['0', '0', '0'], ['3', '0', '0'] + ['4', '0', '0'] + ['0', '1', '0'], ['0', '9', '4'] + ['3', '0', '1'] + ['5', '7', '0'], ['0', '1', '0'] + ['0', '0', '7'] + ['0', '0', '8'], ['0', '0', '0'] + ['0', '3', '2'] + ['0', '0', '5'], ['0', '0', '0'] + ['0', '0', '0'] + ['7', '2', '0'], ['5', '2', '6'] + ['7', '0', '9'] + ['0', '0', '0']] medium_board = [['0', '0', '0'] + ['7', '0', '0'] + ['2', '9', '4'], ['0', '0', '0'] + ['8', '2', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '0', '4'] + ['3', '6', '0'], ['0', '0', '6'] + ['0', '0', '0'] + ['0', '7', '9'], ['4', '0', '0'] + ['0', '8', '0'] + ['0', '0', '6'], ['2', '1', '0'] + ['0', '0', '0'] + ['4', '0', '0'], ['0', '6', '1'] + ['4', '0', '0'] + ['0', '0', '0'], ['0', '0', '0'] + ['0', '6', '2'] + ['0', '0', '0'], ['9', '4', '2'] + ['0', '0', '5'] + ['0', '0', '0']] hard_board = [['0', '4', '0'] + ['0', '0', '2'] + ['0', '0', '0'], ['0', '0', '8'] + ['4', '7', '0'] + ['0', '0', '5'], ['0', '0', '0'] + ['0', '0', '6'] + ['0', '7', '0'], ['0', '6', '0'] + ['9', '1', '0'] + ['0', '5', '0'], ['3', '0', '0'] + ['0', '0', '0'] + ['0', '0', '8'], ['0', '0', '0'] + ['0', '0', '7'] + ['0', '0', '0'], ['0', '9', '0'] + ['5', '4', '0'] + ['0', '1', '0'], ['0', '0', '6'] + ['0', '0', '0'] + ['9', '0', '0'], ['0', '0', '0'] + ['2', '0', '0'] + ['0', '0', '0']]
# Functions for sorting exercise def cmp(a, b): return (a > b) - (a < b) def mySort(numbers): numbers = bubbleSort(numbers) return numbers def bubbleSort(nums): """ :type nums: List[int] :rtype: List[int] """ ################################# # Selection Sort n = len(nums) if n<=1: return nums for i in range(n): smallest_val = nums[i] smallest_idx = i # find the smallest element for j in range(i,n): temp = nums[j] if temp <= smallest_val: smallest_idx = j smallest_val = temp # print(smallest_val) # swap it with nums[i] temp = nums[i] nums[i] = nums[smallest_idx] nums[smallest_idx] = temp ################################# return nums
def cmp(a, b): return (a > b) - (a < b) def my_sort(numbers): numbers = bubble_sort(numbers) return numbers def bubble_sort(nums): """ :type nums: List[int] :rtype: List[int] """ n = len(nums) if n <= 1: return nums for i in range(n): smallest_val = nums[i] smallest_idx = i for j in range(i, n): temp = nums[j] if temp <= smallest_val: smallest_idx = j smallest_val = temp temp = nums[i] nums[i] = nums[smallest_idx] nums[smallest_idx] = temp return nums
def check_all(array_all, expected): if len(array_all) < len(expected): print("Error: Output is too short to match expected results") return False item_i = 0 while item_i < len(array_all): if check_line(array_all[item_i], expected[0]): break item_i += 1 print("Detected offset: %d / %d" % (item_i, len(array_all))) if len(array_all)-item_i < len(expected): print("Error: Output offset to big...") return False for index in range(len(expected)): if not check_line(array_all[item_i+index], expected[index]): print("Error at index %d: PC=%s" % (index, array_all[item_i+index]["PC"])) return False return True def check_line(result_dic, pattern_dic, verbose=False): for key in pattern_dic: if pattern_dic[key] == "xx": continue elif pattern_dic[key][0] == 'z' or result_dic[key][0] == 'z': if pattern_dic[key] != result_dic[key]: return False else: continue elif key in result_dic and int(result_dic[key], 16) != int(pattern_dic[key], 16): if verbose: print("%s Error: %s does not match %s" % (key, pattern_dic[key],result_dic[key])) return False return True # Use this function to check the design # Arguments: # - dic_step: contains dictionnary with values of # r0...r7 FE FG FL C and PC registers # Return: # - True if et matches the expected values, # Else False non_reset_pc = "ffff" non_reset_pc_int = 0xffff def load_csv(filename): """ Load a csv file containning expected result it contains: r0, ..., r7, FE, FG, FL, C values are either 1 byte hexadecimals or xx for don't care vales """ output_list = [] with open(filename) as resfile: first_line = resfile.readline().strip() keys = first_line.split(",") line_count = 0 for line in resfile: line_count += 1 splitted = line.strip().split(",") if splitted==['']: continue elif len(splitted) != len(keys): print("Error in CSV file %s: %s" % (filename, splitted)) print("splitted is %d, keys are %d" % (len(splitted),len(keys))) continue line_dic = {} for i in range(len(splitted)): line_dic[keys[i]] = splitted[i]; output_list.append(line_dic) return output_list[:]
def check_all(array_all, expected): if len(array_all) < len(expected): print('Error: Output is too short to match expected results') return False item_i = 0 while item_i < len(array_all): if check_line(array_all[item_i], expected[0]): break item_i += 1 print('Detected offset: %d / %d' % (item_i, len(array_all))) if len(array_all) - item_i < len(expected): print('Error: Output offset to big...') return False for index in range(len(expected)): if not check_line(array_all[item_i + index], expected[index]): print('Error at index %d: PC=%s' % (index, array_all[item_i + index]['PC'])) return False return True def check_line(result_dic, pattern_dic, verbose=False): for key in pattern_dic: if pattern_dic[key] == 'xx': continue elif pattern_dic[key][0] == 'z' or result_dic[key][0] == 'z': if pattern_dic[key] != result_dic[key]: return False else: continue elif key in result_dic and int(result_dic[key], 16) != int(pattern_dic[key], 16): if verbose: print('%s Error: %s does not match %s' % (key, pattern_dic[key], result_dic[key])) return False return True non_reset_pc = 'ffff' non_reset_pc_int = 65535 def load_csv(filename): """ Load a csv file containning expected result it contains: r0, ..., r7, FE, FG, FL, C values are either 1 byte hexadecimals or xx for don't care vales """ output_list = [] with open(filename) as resfile: first_line = resfile.readline().strip() keys = first_line.split(',') line_count = 0 for line in resfile: line_count += 1 splitted = line.strip().split(',') if splitted == ['']: continue elif len(splitted) != len(keys): print('Error in CSV file %s: %s' % (filename, splitted)) print('splitted is %d, keys are %d' % (len(splitted), len(keys))) continue line_dic = {} for i in range(len(splitted)): line_dic[keys[i]] = splitted[i] output_list.append(line_dic) return output_list[:]