content
stringlengths
7
1.05M
MOCK_DATA = [ { "symbol": "sy1", "companyName": "cn1", "exchange": "ex1", "industry": "in1", "website": "ws1", "description": "dc1", "CEO": "ceo1", "issueType": "is1", "sector": "sc1", }, { "symbol": "sy2", "companyName": "cn2", "exchange": "ex2", "industry": "in2", "website": "ws2", "description": "dc2", "CEO": "ceo2", "issueType": "is2", "sector": "sc2", }, { "symbol": "sy3", "companyName": "cn3", "exchange": "ex3", "industry": "in3", "website": "ws3", "description": "dc3", "CEO": "ceo3", "issueType": "is3", "sector": "sc3", }, { "symbol": "sy4", "companyName": "cn4", "exchange": "ex4", "industry": "in4", "website": "ws4", "description": "dc4", "CEO": "ceo4", "issueType": "is4", "sector": "sc4", }, { "symbol": "sy5", "companyName": "cn5", "exchange": "ex5", "industry": "in5", "website": "ws5", "description": "dc5", "CEO": "ceo5", "issueType": "is5", "sector": "sc5", }, { "symbol": "sy6", "companyName": "cn6", "exchange": "ex6", "industry": "in6", "website": "ws6", "description": "dc6", "CEO": "ceo6", "issueType": "is6", "sector": "sc6", }, { "symbol": "sy7", "companyName": "cn7", "exchange": "ex7", "industry": "in7", "website": "ws7", "description": "dc7", "CEO": "ceo7", "issueType": "is7", "sector": "sc7", }, { "symbol": "sy8", "companyName": "cn8", "exchange": "ex8", "industry": "in8", "website": "ws8", "description": "dc8", "CEO": "ceo8", "issueType": "is8", "sector": "sc8", }, { "symbol": "sy9", "companyName": "cn9", "exchange": "ex9", "industry": "in9", "website": "ws9", "description": "dc9", "CEO": "ceo9", "issueType": "is9", "sector": "sc9", }, { "symbol": "sy", "companyName": "cn", "exchange": "ex", "industry": "in", "website": "ws", "description": "dc", "CEO": "ceo", "issueType": "is", "sector": "sc", } ]
# Only used for PyTorch open source BUCK build # @lint-ignore-every BUCKRESTRICTEDSYNTAX def is_arvr_mode(): if read_config("pt", "is_oss", "0") == "0": fail("This file is for open source pytorch build. Do not use it in fbsource!") return False
# a = "crazy_python" # print(id(a)) # print(id("crazy" + "_" + "python")) """ output: 41899952 41899952 """ a = 'crazy' b = 'crazy' c = 'crazy!!' d = 'crazy!!' e, f = "crazy!", "crazy!" print (a is b) print(c is d) print(e is f) """ output: True True True """ array_1 = [1,2,3,4] print(id(array_1)) g1 = (x for x in array_1) array_1 = [1,2,3,4,5] print(id(array_1)) """ output: 41768200 41767048 """
class ValueParsingOptions(object,IDisposable): """ Options for parsing strings into numbers with units. ValueParsingOptions() """ def Dispose(self): """ Dispose(self: ValueParsingOptions) """ pass def GetFormatOptions(self): """ GetFormatOptions(self: ValueParsingOptions) -> FormatOptions Gets the FormatOptions to optionally override the default settings in the Units class. Returns: A copy of the FormatOptions. """ pass def ReleaseUnmanagedResources(self,*args): """ ReleaseUnmanagedResources(self: ValueParsingOptions,disposing: bool) """ pass def SetFormatOptions(self,formatOptions): """ SetFormatOptions(self: ValueParsingOptions,formatOptions: FormatOptions) Sets the FormatOptions to optionally override the default settings in the Units class. formatOptions: The FormatOptions. """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass def __repr__(self,*args): """ __repr__(self: object) -> str """ pass AllowedValues=property(lambda self: object(),lambda self,v: None,lambda self: None) """The allowable range of values to be parsed. Get: AllowedValues(self: ValueParsingOptions) -> AllowedValues Set: AllowedValues(self: ValueParsingOptions)=value """ IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None) """Specifies whether the .NET object represents a valid Revit entity. Get: IsValidObject(self: ValueParsingOptions) -> bool """
"""Below Python Programme demonstrate lstrip functions in a string""" #Example: random_string = ' this is good ' # Leading whitepsace are removed print(random_string.lstrip()) # Argument doesn't contain space # No characters are removed. print(random_string.lstrip('sti')) print(random_string.lstrip('s ti'))
# Diffusion 2D nx = 20 # number of elements ny = nx # number of nodes mx = nx + 1 my = ny + 1 # initial values iv = {} for j in range(int(0.2*my), int(0.3*my)): for i in range(int(0.5*mx), int(0.8*mx)): index = j*mx + i iv[index] = 1.0 print("iv: ",iv) config = { "solverStructureDiagramFile": "solver_structure.txt", # output file of a diagram that shows data connection between solvers "logFormat": "csv", # "csv" or "json", format of the lines in the log file, csv gives smaller files "scenarioName": "diffusion", # scenario name to find the run in the log file "mappingsBetweenMeshesLogFile": "", # a log file about mappings between meshes, here we do not want that because there are no mappings "Heun" : { "initialValues": iv, "timeStepWidth": 1e-3, "endTime": 1.0, "timeStepOutputInterval": 100, "checkForNanInf": False, "inputMeshIsGlobal": True, "nAdditionalFieldVariables": 0, "additionalSlotNames": [], "dirichletBoundaryConditions": {}, "dirichletOutputFilename": None, # filename for a vtp file that contains the Dirichlet boundary condition nodes and their values, set to None to disable "FiniteElementMethod" : { "nElements": [nx, ny], "physicalExtent": [4.0,4.0], "inputMeshIsGlobal": True, "prefactor": 0.1, # solver parameters "solverType": "gmres", "preconditionerType": "none", "relativeTolerance": 1e-15, # relative tolerance of the residual normal, respective to the initial residual norm, linear solver "absoluteTolerance": 1e-10, # 1e-10 absolute tolerance of the residual "maxIterations": 10000, "dumpFilename": "", "dumpFormat": "ascii", # ascii, default or matlab "slotName": "", }, "OutputWriter" : [ {"format": "Paraview", "outputInterval": 10, "filename": "out/filename", "binary": "false", "fixedFormat": False, "onlyNodalValues": True, "combineFiles": True, "fileNumbering": "incremental"}, {"format": "PythonFile", "outputInterval": 10, "filename": "out/out_diffusion2d", "binary": True, "onlyNodalValues": True, "combineFiles": True, "fileNumbering": "incremental"} ] }, }
__title__ = 'lightwood' __package_name__ = 'mindsdb' __version__ = '0.14.1' __description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects." __email__ = "jorge@mindsdb.com" __author__ = 'MindsDB Inc' __github__ = 'https://github.com/mindsdb/lightwood' __pypi__ = 'https://pypi.org/project/lightwood' __license__ = 'MIT' __copyright__ = 'Copyright 2019- mindsdb'
# Problem: input: an unsorted array A[lo..hi]; | output: the (left) median of the given array # Source: SCU COEN279 DAA HW3 Q3 # Author: Shreyas Padhye # Algorithm: Decrease-Conquer class solution(): def left_median(self, A): if len(A) == 1 or len(A) == 2: return A[0] else: median = self.left_median(A[:-1]) if len(A[:-1]) % 2 == 0: if median > A[-1]: return median else: med_neighbour = A.index(median) + 1 if A[-1] < A[med_neighbour]: temp = A[-1] A[-1] = A[med_neighbour] A[med_neighbour] = temp return A[med_neighbour] elif median < A[-1]: return median else: med_neighbour = A.index(median) - 1 if A[-1] > A[med_neighbour]: temp = A[-1] A[-1] = A[med_neighbour] A[med_neighbour] = temp return A[med_neighbour] t = solution() # t.left_median([0, 1, 3, 5]) #1 # t.left_median([0, 1, 3, 5, 2]) #2 # t.left_median([0, 1, 3, 5]) #1 t.left_median([0, 1, 3, 5]) #1 t.left_median([5, 1, 3, 8, 2]) #5 t.left_median([7, 0, 1, 12, 1, 45, 2]) #7 t.left_median([7, 0, 1, 12, 45, 2, 11, 8, 101, 14]) #correct: 8, mine: 12 # t.left_median([7, 0, 1, 12, 1, 45, 2, 11, 8, 101, 14]) #correct: 8, mine: 1
""" This module provide all the functions or classes which help pre-processing data. For example, splitting paragraph into sentences """
""" Defines CPU Options for use in the CPU target """ class FastMathOptions(object): """ Options for controlling fast math optimization. """ def __init__(self, value): # https://releases.llvm.org/7.0.0/docs/LangRef.html#fast-math-flags valid_flags = { 'fast', 'nnan', 'ninf', 'nsz', 'arcp', 'contract', 'afn', 'reassoc', } if isinstance(value, FastMathOptions): self.flags = value.flags.copy() elif value is True: self.flags = {'fast'} elif value is False: self.flags = set() elif isinstance(value, set): invalid = value - valid_flags if invalid: raise ValueError("Unrecognized fastmath flags: %s" % invalid) self.flags = value elif isinstance(value, dict): invalid = set(value.keys()) - valid_flags if invalid: raise ValueError("Unrecognized fastmath flags: %s" % invalid) self.flags = {v for v, enable in value.items() if enable} else: msg = "Expected fastmath option(s) to be either a bool, dict or set" raise ValueError(msg) def __bool__(self): return bool(self.flags) __nonzero__ = __bool__ def __repr__(self): return f"FastMathOptions({self.flags})" def __eq__(self, other): if type(other) is type(self): return self.flags == other.flags return NotImplemented class ParallelOptions(object): """ Options for controlling auto parallelization. """ __slots__ = ("enabled", "comprehension", "reduction", "inplace_binop", "setitem", "numpy", "stencil", "fusion", "prange") def __init__(self, value): if isinstance(value, bool): self.enabled = value self.comprehension = value self.reduction = value self.inplace_binop = value self.setitem = value self.numpy = value self.stencil = value self.fusion = value self.prange = value elif isinstance(value, dict): self.enabled = True self.comprehension = value.pop('comprehension', True) self.reduction = value.pop('reduction', True) self.inplace_binop = value.pop('inplace_binop', True) self.setitem = value.pop('setitem', True) self.numpy = value.pop('numpy', True) self.stencil = value.pop('stencil', True) self.fusion = value.pop('fusion', True) self.prange = value.pop('prange', True) if value: msg = "Unrecognized parallel options: %s" % value.keys() raise NameError(msg) elif isinstance(value, ParallelOptions): self.enabled = value.enabled self.comprehension = value.comprehension self.reduction = value.reduction self.inplace_binop = value.inplace_binop self.setitem = value.setitem self.numpy = value.numpy self.stencil = value.stencil self.fusion = value.fusion self.prange = value.prange else: msg = "Expect parallel option to be either a bool or a dict" raise ValueError(msg) def _get_values(self): """Get values as dictionary. """ return {k: getattr(self, k) for k in self.__slots__} def __eq__(self, other): if type(other) is type(self): return self._get_values() == other._get_values() return NotImplemented class InlineOptions(object): """ Options for controlling inlining """ def __init__(self, value): ok = False if isinstance(value, str): if value in ('always', 'never'): ok = True else: ok = hasattr(value, '__call__') if ok: self._inline = value else: msg = ("kwarg 'inline' must be one of the strings 'always' or " "'never', or it can be a callable that returns True/False. " "Found value %s" % value) raise ValueError(msg) @property def is_never_inline(self): """ True if never inline """ return self._inline == 'never' @property def is_always_inline(self): """ True if always inline """ return self._inline == 'always' @property def has_cost_model(self): """ True if a cost model is provided """ return not (self.is_always_inline or self.is_never_inline) @property def value(self): """ The raw value """ return self._inline def __eq__(self, other): if type(other) is type(self): return self.value == other.value return NotImplemented
def get_belief(sent): if '<|belief|>' in sent: tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|action|>')[0] else: return [] tmp = tmp.strip(' .,') tmp = tmp.replace('<|endofbelief|>', '') tmp = tmp.replace('<|endoftext|>', '') belief = tmp.split(',') new_belief = [] for bs in belief: bs = bs.strip(' .,') if bs not in new_belief: new_belief.append(bs) return new_belief def get_belief_dbsearch(sent): if '<|belief|>' in sent: tmp = sent.strip(' ').split('<|belief|>')[-1].split('<|endofbelief|>')[0] else: return [] tmp = tmp.strip(' .,') tmp = tmp.replace('<|endofbelief|>', '') tmp = tmp.replace('<|endoftext|>', '') belief = tmp.split(',') new_belief = [] for bs in belief: bs = bs.strip(' .,') if bs not in new_belief: new_belief.append(bs) return new_belief def get_belief_openaigpt(sent): if '< | belief | >' in sent: tmp = sent.strip(' ').split('< | belief | >')[-1].split('< | action | >')[0] else: return [] tmp = tmp.strip(' .,') tmp = tmp.replace('< | endofbelief | >', '') tmp = tmp.replace('< | endoftext | >', '') belief = tmp.split(',') new_belief = [] for bs in belief: bs = bs.strip(' .,') if bs not in new_belief: new_belief.append(bs) return new_belief def get_response(sent, tokenizer): if '<|response|>' in sent: tmp = sent.split('<|belief|>')[-1].split('<|action|>')[-1].split('<|response|>')[-1] else: return '' tmp = tmp.strip(' .,') tmp = tmp.replace('<|endofresponse|>', '') tmp = tmp.replace('<|endoftext|>', '') tokens = tokenizer.encode(tmp) new_tokens = [] for tok in tokens: if tok in tokenizer.encode(tokenizer._eos_token): continue new_tokens.append(tok) response = tokenizer.decode(new_tokens).strip(' ,.') return response def get_response_openaigpt(sent, tokenizer): if '< | response | >' in sent: tmp = sent.split('< | belief | >')[-1].split('< | action | >')[-1].split('< | response | >')[-1] else: return '' tmp = tmp.strip(' .,') tmp = tmp.replace('< | endofresponse | >', '') tmp = tmp.replace('< | endoftext | >', '') tokens = tokenizer.encode(tmp) new_tokens = [] for tok in tokens: if tok in tokenizer.encode(tokenizer._eos_token): continue new_tokens.append(tok) response = tokenizer.decode(new_tokens).strip(' ,.') response = response.replace('[ ', '[') response = response.replace(' ]', ']') response = response.replace(' _ ', '_') response = response.replace('i d', 'id') return response def get_action(sent): if '<|action|>' not in sent: return [] elif '<|belief|>' in sent: tmp = sent.split('<|belief|>')[-1].split('<|response|>')[0].split('<|action|>')[-1].strip() elif '<|action|>' in sent: tmp = sent.split('<|response|>')[0].split('<|action|>')[-1].strip() else: return [] tmp = tmp.strip(' .,') tmp = tmp.replace('<|endofaction|>', '') tmp = tmp.replace('<|endoftext|>', '') action = tmp.split(',') new_action = [] for act in action: if act == '': continue act = act.strip(' .,') if act not in new_action: new_action.append(act) return new_action def get_action_openaigpt(sent): if '< | belief | >' in sent: tmp = sent.split('< | belief | >')[-1].split('< | response | >')[0].split('< | action | >')[-1].strip() elif '< | action | >' in sent: tmp = sent.split('< | response | >')[0].split('< | action | >')[-1].strip() else: return [] tmp = tmp.strip(' .,') tmp = tmp.replace('< | endofaction | >', '') tmp = tmp.replace('< | endoftext | >', '') action = tmp.split(',') new_action = [] for act in action: if act == '': continue act = act.strip(' .,') if act not in new_action: act = act.replace('i d', 'id') new_action.append(act) return new_action def get_db_dynamically(predicted_text, goal, multiwoz_db): gen_belief = get_belief_dbsearch(predicted_text) belief_domain = {} belief_book_domain = {} for bs in gen_belief: if bs in ['', ' ']: continue bs_domain = bs.split()[0] if 'book' in bs: bs_slot = bs.split()[2] bs_val = ' '.join(bs.split()[3:]) if bs_domain not in belief_book_domain: belief_book_domain[bs_domain] = {} belief_book_domain[bs_domain][bs_slot] = bs_val else: bs_slot = bs.split()[1] bs_val = ' '.join(bs.split()[2:]) if bs_domain not in belief_domain: belief_domain[bs_domain] = {} belief_book_domain[bs_domain] = {} belief_domain[bs_domain][bs_slot] = bs_val db_text_tmp = [] for dom in belief_domain: if dom not in ['restaurant', 'hotel', 'attraction', 'train']: continue domain_match = len(multiwoz_db.queryResultVenues(dom, belief_domain[dom], real_belief=True)) if dom != 'train': if domain_match >= 5: domain_match_text = '>=5' else: domain_match_text = '={}'.format(domain_match) elif dom == 'train': if domain_match == 0: domain_match_text = '=0' elif domain_match == 2: domain_match_text = '<3' elif domain_match == 5: domain_match_text = '<6' elif domain_match == 10: domain_match_text = '<11' elif domain_match == 40: domain_match_text = '<41' else: domain_match_text = '>40' if 'fail_book' in goal[dom]: for item in goal[dom]['fail_book'].items(): if item in belief_book_domain[dom].items(): domain_book_text = 'not available' break else: domain_book_text = 'available' else: if domain_match == 0: domain_book_text = 'not available' else: domain_book_text = 'available' db_text_tmp.append('{} match{} booking={}'.format(dom, domain_match_text, domain_book_text)) db_text = ' <|dbsearch|> {} <|endofdbsearch|>'.format(' , '.join(db_text_tmp)) return db_text
''' Created on 2016-01-13 @author: Wu Wenxiang (wuwenxiang.sh@gmail.com) ''' DEBUG = False
description = 'Helmholtz field coil' group = 'optional' includes = ['alias_B'] tango_base = 'tango://phys.kws1.frm2:10000/kws1/' devices = dict( I_helmholtz = device('nicos.devices.entangle.PowerSupply', description = 'Current in coils', tangodevice = tango_base + 'gesupply/ps2', unit = 'A', fmtstr = '%.2f', ), B_helmholtz = device('nicos.devices.generic.CalibratedMagnet', currentsource = 'I_helmholtz', description = 'Magnet field', unit = 'T', fmtstr = '%.5f', calibration = ( 0.0032507550, # slope 0.003255221(old) 0, 0, 0, 0 ) ), ) alias_config = { 'B': {'B_helmholtz': 100}, } extended = dict( representative = 'B_helmholtz', )
# Test checked class SymbolTable(object): def __init__(self): self._symbols = \ { \ 'SP':0, 'LCL':1, 'ARG':2, 'THIS':3, 'THAT':4, \ 'R0':0, 'R1':1, 'R2':2, 'R3':3, 'R4':4, 'R5':5, 'R6':6, 'R7':7, \ 'R8':8, 'R9':9, 'R10':10, 'R11':11, 'R12':12, 'R13':13, 'R14':14, \ 'R15':15, 'SCREEN':0x4000, 'KBD':0x6000 \ } def add_entry(self, symbol, address): self._symbols[symbol] = address def contains(self, symbol): return symbol in self._symbols def get_address(self, symbol): return self._symbols[symbol]
''' This the demonstration of range function in python''' class Range: """A class that mimic's the built-in range class""" def __init__(self, start, stop=None, step=1): """Initialize a Range instance Semantic is similar to built-in range class """ if step == 0: raise ValueError('step cannot be 0') if stop == None: stop, start = start, 0 # special case of range(n) #calculate the effective lenght once self._length = max(0, (stop -start + step -1)//step) # need knowledge to start and step(but not stop) to support __getitem__ self._start = start self._step = step def __len__(self): """Return number of entries in the range""" return self._length def __getitem__(self, k): """Return entry at index k(using standar interpreation if negative)""" if k < 0: k += len(self) #attempt to convert negative index; depends on __len__ if not 0 <= k < self._length: raise IndexError('index out of range') return self._start + k*self._step #------------------------------------------------------------------------------------------ def test(): r = Range(8, 140, 5) print(r) print(len(r)) print(r[25]) print(r[-1]) if __name__ == "__main__": test()
# File: question3.py # Author: David Lechner # Date: 11/19/2019 '''Ask some questions about goats''' # REVIEW: We write `NUM_GOATS` in all caps because it is a constant. We set it # once at the begining of the program and don't change after that. NUM_GOATS = 10 # This is how many goats I have # REVIEW: The input() function gives us a string, but we need a number, so we # use int() to convert what the user types in to an integer. # TRY IT: What happens if the user types in a letter instead of a nubmer? What # about a number with a decimal point? answer = int(input('How many goats do you see?')) # LEARN: We can use inequality operators to compare numbers if answer < NUM_GOATS: print('Some of your goats are missing!') if answer == NUM_GOATS: print('All of the goats are there.') if answer > NUM_GOATS: print('You have extra goats!')
# lec6.4-removeDups.py # edX MITx 6.00.1x # Introduction to Computer Science and Programming Using Python # Lecture 6, video 4 # Demonstrates performing operations on lists # Demonstrates how changing a list while iterating over it creates # unintended problems def removeDups(L1, L2): for e1 in L1: if e1 in L2: # Note: we are iterating over L1, but just removed one of # its elements L1.remove(e1) # L1 is now [2,3,4] so when it loops through again the next # element is 3. As a result the 2 is skipped and not removed # as intended L1 = [1,2,3,4] L2 = [1,2,5,6] removeDups(L1, L2) print(L1) # Better way to perform operations on list by creating a copy of L1 # then iterating over that as it will not change def removeDupsBetter(L1, L2): # Make a copy of L1 and put into L1Start L1Start = L1[:] for e1 in L1Start: if e1 in L2: L1.remove(e1) L1 = [1,2,3,4] L2 = [1,2,5,6] removeDupsBetter(L1, L2) print(L1)
def compareTriplets(a, b): result = [] aliceScore =0 bobScore = 0 for Alice, Bob in zip(a, b): if Alice > Bob: aliceScore +=1 continue if Bob > Alice: bobScore +=1 continue if Alice == Bob: continue result.append(aliceScore) result.append(bobScore) return result if __name__ == '__main__': a = list(map(int, input('::: ').strip().split())) b = list(map(int, input('::: ').strip().split())) print(compareTriplets(a, b))
#!/usr/bin/env python3 # -*- coding: utf-8 -*- def test(): message = int(input('Введите целое число:')) if message > 0: positive(message) else: negative(message) def negative(message): print(f'Число {message} отрицательное') def positive(message): print(f"Число {message} положительное") if __name__ == '__main__': test()
#file extension '''n = input("enter file name with extension:") f_ext = n.split('.') x = f_ext[-1] print(x) #sum n = input("enter one number:") temp = n temp1 = temp+temp temp2 = temp+temp+temp val = int(n)+int(temp1)+int(temp2) print(val) #Multiline comment print("a string that you \"don\'t\" have to escape \n This \n is a ....... multi-line \n here doc string -------->") #4 n = int(input("enter num")) dif = n - 19 if(dif>0): print("value is ", 2 * dif) else: print(dif) #5 n = input("Enter string: ") if(n[0:2] == "Is"): print("String is ", n) else: n = "Is" + n print("String",n) #9 import math n = int(input("enter value:")) x= hex(n) print("hexa decimal value is", x) #10 import math n = int(input("enter value:")) x= bin(n) print("hexa decimal value is", x) #7 import math n = input("enter character:") x= ord(n) print("ASCII value is", x) #6 sec=int(input("enter the number of seconds:")) if(sec >= 86400): m = sec/60 h = m/60 d = h/24 print("Time in Minutes is ", m,"mins") print("Time in Hours is ", h,"hrs") print("Days is", d,"days") else: print("Seconds can\'t redable") #8 x = int(input("enter first num:")) y = int(input("enter second num:")) f1 = float(x) f2 = float(y) if(x>y): print("largest integer value",x) else: print("largest integer value",y) if(f1>f2): print("largest float value",f1) else: print("largest float value",f2)'''
""""IMC O IMC - Índice de Massa Corporal é um critério da Organização Mundial de Saúde para dar uma indicação sobre a condição de peso de uma pessoa adulta. A fórmula é IMC = peso / (altura) 2 . Elabore um algoritmo que leia o peso e a altura de um adulto e mostre sua condição de acordo com a tabela abaixo: - Abaixo de 18,5 Abaixo do peso - Entre 18,5 e 25 Peso normal - Entre 25 e 30 Acima do peso - Acima de 30 obeso """ altura = float(input('Altura: ')) peso = float(input('Peso: ')) imc = peso / (altura ** 2) if imc < 18.5: print('\nAaixo do peso.') elif (imc >= 18.5) and (imc < 25): print('\nPeso normal.') elif (imc >= 25) and (imc < 30): print('\nAcima do peso.') else: print('\nObeso')
# Python - Object Oriented Programming # In here we will use special methods, also called as Magic methods. # Double underscore is called as dunder. We have seen dunder __init__ fuction # we will see __repr__ and __str__ in here. # we can also have custom dunder that we can create for performing certain # tasks as functions. class Employee: raise_amt = 1.04 def __init__(self, firstName, lastName, pay): self.firstName = firstName self.lastName = lastName self.pay = pay self.email = f"{firstName}.{lastName}@Company.com" def fullname(self): return f"{self.firstName} {self.lastName}" def apply_raise(self): self.pay = int(self.pay * self.raise_amt) # __repr__ is a special method used to represent a class's objects as a string def __repr__(self): return f"Employee('{self.firstName}', '{self.lastName}', {self.pay})" # The __str__ method should be defined in a way that is easy to read # and outputs all the members of the class. def __str__(self): return f"{self.fullname()} - {self.email}" # we can control the result of a sum of two objects by modifying or defying # the __add__ method. Takes first object as Self and second as other. def __add__(self, other): return self.pay + other.pay # __len__ as a method you can customize for any object def __len__(self): return len(self.fullname()) employee_1 = Employee("Tom", "Hanks", 50000) employee_2 = Employee("Ricky", "Martin", 60000) # Now when we print an object we get a string output print(employee_1) print(str(employee_1)) # The output is string representation of object. print(repr(employee_1)) print(employee_1.__repr__()) print(employee_1.__str__()) # Returns combined pay of employee_1 and employee_2 print(employee_1 + employee_2) # Returns length of full name print(len(employee_1))
# File: okta_consts.py # # Copyright (c) 2018-2022 Splunk Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific language governing permissions # and limitations under the License. OKTA_BASE_URL = "base_url" OKTA_API_TOKEN = "api_key" OKTA_PAGINATED_ACTIONS_LIST = [ 'list_users', 'list_user_groups', 'list_providers', 'list_roles'] OKTA_RESET_PASSWORD_SUCC = "Successfully created one-time token for user to reset password" OKTA_LIMIT_INVALID_MSG_ERR = "Please provide a valid positive integer value for 'limit' action parameter" OKTA_LIMIT_NON_ZERO_POSITIVE_MSG_ERR = "Please provide a valid non-zero positive integer value for 'limit' action parameter" OKTA_PAGINATION_MSG_ERR = "Error occurred while fetching paginated response for action: {action_name}. Error Details: {error_detail}" OKTA_DISABLE_USER_SUCC = "Successfully disabled the user" OKTA_ALREADY_DISABLED_USER_ERR = "User is already disabled" OKTA_ENABLE_USER_SUCC = "Successfully enabled the user" OKTA_ALREADY_ENABLED_USER_ERR = "User is already enabled" OKTA_SET_PASSWORD_SUCC = "Successfully set user password" OKTA_ASSIGN_ROLE_SUCC = "Successfully assigned role to user" OKTA_ALREADY_ASSIGN_ROLE_ERR = "Role is already assigned to user" OKTA_UNASSIGN_ROLE_SUCC = "Successfully unassigned role to user" OKTA_ALREADY_UNASSIGN_ROLE_ERR = "Role is not assigned to user" OKTA_ALREADY_ADDED_GROUP_ERR = "Group already added to organization" OKTA_ADDED_GROUP_SUCCESS_MSG = "Group has been added successfully" OKTA_GET_GROUP_SUCC = "Successfully retrieved group" OKTA_GET_USER_SUCC = "Successfully retrieved user" OKTA_TEST_CONNECTIVITY_FAILED = "Test Connectivity Failed" OKTA_TEST_CONNECTIVITY_PASSED = "Test Connectivity Passed" OKTA_INVALID_USER_MSG = "Please provide a valid user_id" OKTA_CLEAR_USER_SESSIONS_SUCC = "Successfully cleared user sessions" OKTA_SEND_PUSH_NOTIFICATION_ERR_MSG = "Please configure factor_type '{factor_type}' for the user '{user_id}'" # DO NOT MODIFY! # A fixed field used by Okta to the integration OKTA_APP_USER_AGENT_BASE = "SplunkPhantom/" UNEXPECTED_RESPONSE_MSG = "Unexpected response received" # Constants relating to '_get_error_message_from_exception' ERR_CODE_MSG = "Error code unavailable" ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters" PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters" TYPE_ERR_MSG = "Error occurred while connecting to the Okta Server. Please check the asset configuration and|or the action parameters" # Constants relating to value_list check FACTOR_TYPE_VALUE_LIST = ["push", "sms (not yet implemented)", "token:software:totp (not yet implemented)"] RECEIVE_TYPE_VALUE_LIST = ["Email", "UI"] IDENTITY_PROVIDERS_TYPE_VALUE_LIST = ["SAML2", "FACEBOOK", "GOOGLE", "LINKEDIN", "MICROSOFT"] ROLE_TYPE_VALUE_LIST = [ "SUPER_ADMIN", "ORG_ADMIN", "API_ACCESS_MANAGEMENT_ADMIN", "APP_ADMIN", "USER_ADMIN", "MOBILE_ADMIN", "READ_ONLY_ADMIN", "HELP_DESK_ADMIN", "GROUP_MEMBERSHIP_ADMIN", "REPORT_ADMIN"] VALUE_LIST_VALIDATION_MSG = "Please provide valid input from {} in '{}' action parameter"
# """ Array addition Have the function ArrayAddition(arr) take the array of numbers stored in arr and return the string true if any combination of numbers in the array (excluding the largest number) can be added up to equal the largest number in the array, otherwise return the string false. For example: if arr contains [4, 6, 23, 10, 1, 3] the output should return true because 4 + 6 + 10 + 3 = 23. The array will not be empty, will not contain all the same elements, and may contain negative numbers. """ #%% # Solution seen from the internet but I fully understood it, https://www.geeksforgeeks.org/subset-sum-problem-dp-25/ #array = [ 4, 6, 23, 10, 1, 3]#True #array = [ -1, -2, -1]#False array = [3,5,-1,8,12]#True def searchSum(arr, sumV, ind, maxV): if sumV == maxV: return True elif sumV > maxV or ind >= len(arr): #no point of continuing with this stack return None sumLinearly = searchSum(arr, sumV + arr[ind], ind + 1, maxV) skipPoisition = searchSum(arr, sumV, ind + 1, maxV) return sumLinearly or skipPoisition def findingSuminArr(arr): arr.sort() maxV = arr.pop(-1) arrLen = len(arr) doesExist = searchSum(arr, 0, 0, maxV) if doesExist == False or doesExist == None: return False return True print(findingSuminArr(array)) # %%
""" 1. Make a table value -> [index]. 2. Make an empty set of the tuples (i, j, k) 3. For-loop through array. Pick two random elements. Test set for a third that completes the triplet (N^2) 4. Confirm i != j !=k, add to ret set. convert ret set to list and return it. """ class Solution: def threeSum(self, nums): # len(nums) < 3000, so O(n^2) may be possible. # Return all triples s.t. nums[i] + nums[j] + nums[k] = 0 and i,j,k are all distinct valueToIndex = {} for ind, number in enumerate(nums): if number in valueToIndex: valueToIndex[number].append(ind) else: valueToIndex[number] = [ind] retSet = set() for iV in valueToIndex: for jV in valueToIndex: if iV != jV: lookingFor = -(iV + jV) if lookingFor in valueToIndex: for k in valueToIndex[lookingFor]: if i != j and i != k and j != k: numbers = [nums[i], nums[j], nums[k]] numbers.sort() retSet.add(tuple(numbers)) return [list(tup) for tup in retSet] soln = Solution() print(soln.threeSum([-1, 0, 1, 2, -1, -4]))
class Node: def __init__(self, dataval = None): self.dataval = dataval self.next = None self.prev = None def __str__(self): return str(self.dataval) class MyList: def __init__(self): self.first = None self.last = None def add(self, dataval): if (self.first == None): self.first = Node(dataval) self.last = self.first else: temp = Node(dataval) temp.next = self.first self.first = temp def index(self, item): index = 0 while index < self.length(): if self.get(index).dataval == item: return index index += 1 return -1 def remove(self, item): index = self.index(item) if self.last == self.first: self.last = None self.first = None elif index == 0: temp = self.first.next self.first.next = None self.first = temp elif index > 0: oneBefore = self.get(index - 1) temp = oneBefore.next if self.last == temp: self.last = oneBefore oneBefore.next = temp.next temp.next = None def append(self, dataval): if (self.first == None): self.first = Node(dataval) self.last = self.first else: newItem = Node(dataval) self.last.next = newItem newItem.prev = self.last self.last = newItem newItem.next = None def get(self, index): current = 0 if index >= self.length() and index < 0: return None item = self.first while (current < index): item = item.next current += 1 return item def __len__(self): return self.length() def isEmpty(self): return self.first == None def length(self): result = 0 if (self.first == None): return result item = self.first result += 1 while (item.next != None): item = item.next result += 1 return result def pop(self): last = self.last self.remove(self.last.dataval) return last def __str__(self): if (self.first == None): return "empty" result = str(self.first.__str__()) item = self.first while (item.next != None): item = item.next result = str(result) + str(",") + str(item.__str__()) return result
category_layers = { "title": "Hazards", "abstract": "", "layers": [ { "include": "ows_refactored.hazards.burntarea.ows_provisional_ba_cfg.layers", "type": "python", }, ] }
class Logger: # Time: O(1), Space: O(M) all incoming messages def __init__(self): # Initialize a data structure to store messages self.messages = {} def shouldPrintMessage(self, timestamp: int, message: str) -> bool: # If the message streaming in does not exist add to dictionary if message not in self.messages: self.messages[message] = timestamp return True # Message exists in dictionary but has the time passed? prev_timestamp + 10 seconds if yes, print elif timestamp >= self.messages[message] + 10: self.messages[message] = timestamp return True # messages has been seen before and time has not passed enough yet. else: return False # Your Logger object will be instantiated and called as such: # obj = Logger() # param_1 = obj.shouldPrintMessage(timestamp,message
def add_native_methods(clazz): def getScrollbarSize__int__(a0): raise NotImplementedError() def setValues__int__int__int__int__(a0, a1, a2, a3, a4): raise NotImplementedError() def setLineIncrement__int__(a0, a1): raise NotImplementedError() def setPageIncrement__int__(a0, a1): raise NotImplementedError() def create__sun_awt_windows_WComponentPeer__(a0, a1): raise NotImplementedError() clazz.getScrollbarSize__int__ = staticmethod(getScrollbarSize__int__) clazz.setValues__int__int__int__int__ = setValues__int__int__int__int__ clazz.setLineIncrement__int__ = setLineIncrement__int__ clazz.setPageIncrement__int__ = setPageIncrement__int__ clazz.create__sun_awt_windows_WComponentPeer__ = create__sun_awt_windows_WComponentPeer__
""" Avoid already-imported warning cause of we are importing this package from run wrapper for loading config. You can see documentation here: https://docs.pytest.org/en/latest/reference.html under section PYTEST_DONT_REWRITE """
number = int(input("Which number do you want to choose")) if number % 2 == 0: print("This is an even number.") else: print("This is an odd number.")
# General info AUTHOR_NAME = "Jeremy Gordon" SITENAME = "Flow" EMAIL_PREFIX = "[ Flow ] " TAGLINE = "A personal dashboard to focus on what matters" SECURE_BASE = "https://flowdash.co" # Emails APP_OWNER = "onejgordon@gmail.com" ADMIN_EMAIL = APP_OWNER DAILY_REPORT_RECIPS = [APP_OWNER] SENDER_EMAIL = APP_OWNER NOTIF_EMAILS = [APP_OWNER] GCS_REPORT_BUCKET = "/flow_reports" BACKGROUND_SERVICE = "default" # Flags NEW_USER_NOTIFICATIONS = False DEFAULT_USER_SETTINGS = { 'journals': { 'questions': [ { 'name': "narrative", 'text': "A few words on your day?", 'response_type': "text", 'parse_tags': True }, { 'name': "day_rating", 'label': "Rating", 'text': "How was the day?", 'response_type': "slider", 'chart': True, 'chart_default': True, 'tag_segment_chart': True, 'color': '#dd0000' } ] } } # Strings HABIT_DONE_REPLIES = [ "Well done!", "Nice work!", "Alright!", "Great!", "Great job!", "Keep it up!" ] HABIT_COMMIT_REPLIES = [ "Yeah, do it!", "You can do it!", "Can't wait!", "Great idea!", "You got this" ] TASK_DONE_REPLIES = [ "That didn't look hard!", "Just like that", "Fini", "OK!", "OK", "Roger", "Check", "Great" ] COOKIE_NAME = "flow_session" class HABIT(): HELP = "You can set habits to build, and track completion. Try saying 'new habit: run', 'habit progress', or 'commit to run tonight'" ACTIVE_LIMIT = 20 class EVENT(): # Type PERSONAL = 1 FAMILY = 2 PROFESSIONAL = 3 PUBLIC = 4 class JOURNAL(): HELP = "You can set up daily questions to track anything you want over time. Try saying 'daily report'" # Timing START_HOUR = 21 END_HOUR = 4 # Patterns PTN_TEXT_RESPONSE = '.*' PTN_NUM_RESPONSE = '\d{1,4}\.?\d{0,2}' # Response Types PATTERNS = { 'text': PTN_TEXT_RESPONSE, 'number': PTN_NUM_RESPONSE, 'number_oe': PTN_NUM_RESPONSE, # For input box entry 'slider': PTN_NUM_RESPONSE } NUMERIC_RESPONSES = ['number', 'slider', 'number_oe'] INVALID_REPLY = "I couldn't understand your answer." INVALID_SUFFIX_NUMERIC = "I'm expecting a number between 1 and 10." INVALID_TASK = "That didn't look like a task, please try again" TOP_TASK_PROMPT = "Enter a top task for tomorrow (or you can say 'done')" TOP_TASK_PROMPT_ADDTL = "Enter another top task for tomorrow (you can say 'done')" ALREADY_SUBMITTED_REPLY = "Sorry, you've already submitted today's journal." class JOURNALTAG(): # Types PERSON = 1 HASHTAG = 2 # Activities, etc class READABLE(): # Type ARTICLE = 1 BOOK = 2 PAPER = 3 LABELS = { ARTICLE: "Article", BOOK: "Book", PAPER: "Paper" } LOOKUP = { "article": 1, "book": 2, "paper": 3 } class TASK(): # Status NOT_DONE = 1 DONE = 2 HELP = "You can set and track top tasks each day. Try saying 'add task remember the milk' or 'my tasks'" class USER(): USER = 1 ADMIN = 2 class GOAL(): HELP = "You can review your monthly and annual goals. Try saying 'view goals'" SET_INFO = "Bear with us as we're still in beta! Please visit flowdash.co to create monthly, annual, and long-term goals." DEFAULT_GOAL_SLOTS = 4 class REPORT(): # Types HABIT_REPORT = 1 TASK_REPORT = 2 GOAL_REPORT = 3 JOURNAL_REPORT = 4 EVENT_REPORT = 5 PROJECT_REPORT = 6 TRACKING_REPORT = 7 # Status CREATED = 1 GENERATING = 2 DONE = 3 CANCELLED = 4 ERROR = 5 # STORAGE TYPES GCS_CLIENT = 1 # Ftypes CSV = 1 TYPE_LABELS = { HABIT_REPORT: "Habit Report", TASK_REPORT: "Task Report", GOAL_REPORT: "Goal Report", JOURNAL_REPORT: "Journal Report", EVENT_REPORT: "Event Report", TRACKING_REPORT: "Tracking Report" } STATUS_LABELS = { CREATED: "Created", GENERATING: "Generating", DONE: "Done", CANCELLED: "Cancelled", ERROR: "Error" } EXTENSIONS = {CSV: "csv"}
class StandartIO: def read(self, file_name): f = open(file_name, "r") result = f.read() f.close() return result def write(self, file_name, data): f = open(file_name, "w") f.write(data) f.close() def append(self, file_name, data, line): content = '' with open(file_name, 'r') as f: content = f.readlines() if line == 0: content[-1] += '\n' content.append(data) else: content[line-1] = content[line-1].strip() + data + '\n' # Write them back to the file with open(file_name, 'w') as f: f.writelines(content)
a=input('输入你的分数: ') a=int(a) if a>=90: print('优秀') if a>=80: print('良好') if a>=60: print('普通')
N, M = map(int, input().split()) grid = [input() for _ in range(N)] max_length = min(N, M) def is_square(n): for i in range(N-n+1): for j in range(M-n+1): if is_same(n, i, j): return True return False def is_same(k, x, y): if grid[x][y] == grid[x][y+k-1] == grid[x+k-1][y] == grid[x+k-1][y+k-1]: return True return False for length in range(max_length, 1, -1): if is_square(length): print(length ** 2) break else: print(1)
"""Pylons requires that packages have a lib.base and lib.helpers So we've added on here so we can run tests in the context of the tg package itself, pylons will likely remove this restriction before 1.0 and this package can then be removed. """
def KGI(serial): if len(str(serial)) == 10 : enterprise = "362" tests = "3713713713713" synthetic = enterprise+str(serial) step1 =[] for i in range(len(synthetic)): step1.append(str(int(synthetic[i])*int(tests[i]))) step2 = 0 for i in range(len(step1)): step2 = step2+ int(step1[i][-1]) step3 = int(str(step2)[-1]) result = str(10-step3)[-1] return enterprise+str(serial)+result else : print("input error") return None
class JadeError(Exception): # RPC error codes INVALID_REQUEST = -32600 UNKNOWN_METHOD = -32601 BAD_PARAMETERS = -32602 INTERNAL_ERROR = -32603 # Implementation specific error codes: -32000 to -32099 USER_CANCELLED = -32000 PROTOCOL_ERROR = -32001 HW_LOCKED = -32002 NETWORK_MISMATCH = -32003 def __init__(self, code, message, data): self.code = code self.message = message self.data = data def __repr__(self): return ( "JadeError: " + str(self.code) + " - " + self.message + " (Data: " + repr(self.data) + ")" ) def __str__(self): return repr(self)
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution(object): def zigzagLevelOrder(self, root): """ :type root: TreeNode :rtype: List[List[int]] """ self.levels = [] self.recurse(root, 0) result = [] for i in range(len(self.levels)): level = self.levels[i] if i % 2: level = level[::-1] result.append(level) return result def recurse(self, node, lvl): if not node: return if len(self.levels) <= lvl: self.levels.append([node.val]) else: self.levels[lvl].append(node.val) self.recurse(node.left, lvl + 1) self.recurse(node.right, lvl + 1)
""" Dictionaries are lists of key value pairs. The key can be any object that doesn't change and the value can be any object. The key and values have a colon ':' between them. { Key:Value, Key:Value, Key:Value} """ # # Creating empty dictionaries # data = {} data = dict() # # Dictionary with three pairs using numbers as keys. # numbers = {1:'one', 2:'two', 3:'three'} # # Dictionary with four pairs using strings as keys. # data = {'FirstName':'Graham', 'LastName':'Chapman', 'FavoriteColor': 'blue', 'Character':'King Arthur'} # # Getting things out of a dictionary. # val = data['FirstName'] val = data['LastName'] # # Using the get method in case the key is not in the dictionary. # If the key is not there then it returns null which tests as False. # val = data.get('BirthDate') if not val: print("BirthDate is not one of the keys.") # # Changing things in the dictionary # data['FavoriteColor'] = 'green' data['Character'] = 'The French guy' # # Dictionary with lists # data = {'colors': ['yellow', 'blue', 'red'], 'cars':['Ford', 'Fiat', 'VW'], 'planes':['747', 'A320', 'DC9'] } # # Getting the key or getting the values # keys = data.keys() values = data.values() # # Looping through a dictionary # for key in data: print(data[key]) # # Looping through a dictionary by items. # Items returns two values each time. # The first one is the key, the second is the value. # for k, v in data.items(): print(k, v) ############################# print("Done")
#Day 1: #Python Program to check if a Number Is Positive Or Negative #Step 1. Start #Step 2. Insert the number. #Step 3. If the number is greater than zero, then print, “The number is Positive” #Step 4. Else print, “You entered Negative” #Step 5. Stop #Code num = int(input("Enter a Number: ")) if num > 0: print("Positive Number") else: print("Negative Number")
def sum(arr): if len(arr) == 1: return arr[0] return arr[0] + sum(arr[1:]) print(sum([2, 2, 4, 6])) # 14
WEEK0 = 'week0' WEEK1 = 'week1' MONTH0 = 'month0' MONTH1 = 'month1' DATE_RANGES = [WEEK0, WEEK1, MONTH0, MONTH1] FORMS_SUBMITTED = 'forms_submitted' CASES_TOTAL = 'cases_total' CASES_ACTIVE = 'cases_active' CASES_OPENED = 'cases_opened' CASES_CLOSED = 'cases_closed' LEGACY_TOTAL_CASES = 'totalCases' LEGACY_CASES_UPDATED = 'casesUpdated' LEGACY_FORMS_SUBMITTED = 'formsSubmitted' CASE_SLUGS = [CASES_TOTAL, CASES_ACTIVE, CASES_OPENED, CASES_CLOSED] STANDARD_SLUGS = [FORMS_SUBMITTED] + CASE_SLUGS LEGACY_SLUGS = [LEGACY_TOTAL_CASES, LEGACY_FORMS_SUBMITTED, LEGACY_CASES_UPDATED] LEGACY_SLUG_MAP = { LEGACY_TOTAL_CASES: CASES_TOTAL, LEGACY_CASES_UPDATED: CASES_ACTIVE, LEGACY_FORMS_SUBMITTED: FORMS_SUBMITTED, } PCI_CHILD_FORM = 'http://openrosa.org/formdesigner/85823851-3622-4E9E-9E86-401500A39354' PCI_MOTHER_FORM = 'http://openrosa.org/formdesigner/366434ec56aba382966f77639a2414bbc3c56cbc' AAROHI_CHILD_FORM = 'http://openrosa.org/formdesigner/09486EF6-04C8-480C-BA11-2F8887BBBADD' AAROHI_MOTHER_FORM = 'http://openrosa.org/formdesigner/6C63E53D-2F6C-4730-AA5E-BAD36B50A170' # https://www.commcarehq.org/a/cdc-mozambique-test/apps/view/a85dfcb5054dd6349ba696e8f8d5f425 # https://www.commcarehq.org/a/infomovel/apps/view/44fce3b36d9d43f7f30eaaab84df6cda CDC_FindPatientForms = 'http://openrosa.org/formdesigner/DA10DCC2-8240-4101-B964-6F5424BD2B86' CDC_RegisterContactForms = 'http://openrosa.org/formdesigner/c0671536f2087bb80e460d57f60c98e5b785b955' CDC_NormalVisit = 'http://openrosa.org/formdesigner/74BD43B5-5253-4855-B195-F3F049B8F8CC' CDC_FirstVisit = 'http://openrosa.org/formdesigner/66e768cc5f551f6c42f3034ee67a869b85bac826' CDC_ContactRegistration = 'http://openrosa.org/formdesigner/e3aa9c0da42a616cbd28c8ce3d74f0d09718fe81' CDC_PatientEducation = 'http://openrosa.org/formdesigner/58d56d542f35bd8d3dd16fbd31ee4e5a3a7b35d2' CDC_ActivistaEducation = 'http://openrosa.org/formdesigner/b8532594e7d38cdd6c632a8249814ce5c043c03c' CDC_BuscaActivaVisit = 'http://openrosa.org/formdesigner/52ca9bc2d99d28a07bc60f3a353a80047a0950a8' CDC_RegisterPatientForms = "http://openrosa.org/formdesigner/1738a73d0c19b608912255c412674cb9c8b64629" CUSTOM_FORM = 'custom_form' TYPE_DURATION = 'duration' TYPE_SUM = 'sum' PER_DOMAIN_FORM_INDICATORS = { 'aarohi': { 'motherForms': {'type': TYPE_SUM, 'xmlns': AAROHI_MOTHER_FORM}, 'childForms': {'type': TYPE_SUM, 'xmlns': AAROHI_CHILD_FORM}, 'motherDuration': {'type': TYPE_DURATION, 'xmlns': AAROHI_MOTHER_FORM}, }, 'pci-india': { 'motherForms': {'type': TYPE_SUM, 'xmlns': PCI_MOTHER_FORM}, 'childForms': {'type': TYPE_SUM, 'xmlns': PCI_CHILD_FORM}, }, 'infomovel': { # Supervisor App V1 'FindPatientForms': {'type': TYPE_SUM, 'xmlns': CDC_FindPatientForms}, 'RegisterContactForms': {'type': TYPE_SUM, 'xmlns': CDC_RegisterContactForms}, 'NormalVisit': {'type': TYPE_SUM, 'xmlns': CDC_NormalVisit}, 'FirstVisit': {'type': TYPE_SUM, 'xmlns': CDC_FirstVisit}, 'ContactRegistration': {'type': TYPE_SUM, 'xmlns': CDC_ContactRegistration}, 'PatientEducation': {'type': TYPE_SUM, 'xmlns': CDC_PatientEducation}, 'ActivistaEducation': {'type': TYPE_SUM, 'xmlns': CDC_ActivistaEducation}, 'BuscaActivaVisit': {'type': TYPE_SUM, 'xmlns': CDC_BuscaActivaVisit}, # Supervisor App V2 # 'FindPatientForms': {'type': TYPE_SUM, 'xmlns': CDC_FindPatientForms}, 'FirstVisitForms': {'type': TYPE_SUM, 'xmlns': CDC_FirstVisit}, 'RegisterPatientForms': {'type': TYPE_SUM, 'xmlns': CDC_RegisterPatientForms}, 'BuscaActivaForms': {'type': TYPE_SUM, 'xmlns': CDC_BuscaActivaVisit}, 'HomeVisitForms': {'type': TYPE_SUM, 'xmlns': CDC_NormalVisit}, }, 'cdc-mozambique-test': { 'FindPatientForms': {'type': TYPE_SUM, 'xmlns': CDC_FindPatientForms}, 'FirstVisitForms': {'type': TYPE_SUM, 'xmlns': CDC_FirstVisit}, 'RegisterPatientForms': {'type': TYPE_SUM, 'xmlns': CDC_RegisterPatientForms}, 'BuscaActivaForms': {'type': TYPE_SUM, 'xmlns': CDC_BuscaActivaVisit}, 'HomeVisitForms': {'type': TYPE_SUM, 'xmlns': CDC_NormalVisit}, }, } CALLCENTER_USER = 'callcenter-system'
# for no processing # the generators for non essential parameters need to provide the # default value when called with None def nothing(strin): if strin is None: return 'default' return strin def prot_core(strin): return strin.split(',') # {'keyword in input file':(essential?,method to call(processing))} # method should take exactly one string # for non-essential parameters: method must return default value parameters = {'michaels_test_message': (True, nothing)}
# Minimum distance to skip in meters for skeleton creation. skip_rate = 100 # seconds to skip while creating the estimate trail from raw trail jump_seconds = 100 # Distance vehicle can travel away from route, used in case of identifying trails that leave a skeleton and then join again. d1 = 1000 # meters # Distance to measure closeness towards skeleton points d2 = 60 # meters # minimum length of a route min_route_length = 8000 # meters # Define distance and angle that can be interpolared allowed_distance = 100 # meters allowed_time = 10 # seconds allowed_angle = 15 # degrees # Data file location data_location = 'G:/Repos/Trans-Portal' # BusStopFinderCodeLocation bsf_location = '../BusStopage/Busstop/' # Feature Extraction Code Location fa_location = '../RoadNatureDetection/' # archive location archive = './archive/'
QUESTIONS = [ { "text": "Nechi yoshsiz?", "choices": ["18 yoki undan kichik", "19 - 64", "65 yoki undan katta"], }, { "text": "Yaqinda siz ushbu alomatlardan birini boshdan kechirganmisiz?", "choices": [ "Isitma yoki titroq", "Nafas olishda yengil yoki o'rtacha qiyinchiliklar", "Yangi yoki yomonlashib borayotgan yo'tal", "Hid, ta'm yoki ishtahani doimiy yo'qotish." "Tomoq og'rigi", "Qusish yoki diareya", "Butun tanada og'riq", "Yuqoridagilardan hech qaysisi" ] }, { "text": "Bularning qaysi biri sizga tegishli? ", "choices": [ "O'rtacha va og'ir astma yoki o'pkaning surunkali kasalligi", "Immunitetni pasaytiradigan saraton kasalligi yoki dorilar", "Irsiy immunitet tanqisligi yoki OIV", "Yurak etishmovchiligi yoki oldingi yurak xuruji kabi jiddiy yurak kasalliklari", "Asoratlari bo'lgan diabet", "Dializga muhtoj bo'lgan buyrak yetishmovchiligi", "Jigar sirozi", "Haddan tashqari semirish", "Homiladorlik", "Yuqoridagilardan hech qaysisi", ], }, { "text": "So'nggi 14 kun ichida xalqaro miqyosda sayohat qildingizmi?", "choices": [ "Men xalqaro miqyosda sayohat qildim", "Men xalqaro miqyosda sayohat qilmaganman", ] }, { "text": "So'nggi 14 kun ichida sizning COVID ‑ 19 ga ega bo'lgan boshqalarga nisbatan ta'siringiz qanday?", "choices": [ "Men COVID‑19 bo'lgan odam bilan yashayman", "Men COVID ‑ 19 bo'lgan kishi bilan yaqin aloqada bo'ldim", "Men COVID ‑ 19 bo'lgan odamning yonida edim", "Men kamida 6 fut masofada edim va aksirish yoki yo'talga duch kelmadim.", "Men hech qanday ta'sirda bo'lmadim", "Men bilmayman.", ] }, { "text": "Qaysi viloyatdasiz?", "choices": [ "Andijon", "Buxoro", "Fargʻona", "Jizzax", "Xorazm", "Namangan", "Navoiy", "Qashqadaryo", "Qoraqalpogʻiston", "Samarqand", "Sirdaryo", "Surxondaryo", "Toshkent", ] } ]
''' Given a binary tree, determine if it is height-balanced. For this problem, a height-balanced binary tree is defined as: a binary tree in which the left and right subtrees of every node differ in height by no more than 1. '''
class CinemaDatabaseMixin: """ mixin to make spider use database's cinema table """ use_cinema_database = True class ShowingDatabaseMixin: """ mixin to make spider use database's showing table """ use_showing_database = True class MovieDatabaseMixin: """ mixin to make spider use database's movie table """ use_movie_database = True def use_cinema_database(spider): return hasattr(spider, "use_cinema_database") def use_showing_database(spider): return hasattr(spider, "use_showing_database") def use_movie_database(spider): return hasattr(spider, "use_movie_database")
def f1(): print('call f1') def f2(): return 'some value'
"""API key storage for direct order retrieval.""" # Key example: # # KEYS = { # 'bittrex': { # 'key': 'mykey', # 'secret': 'mysecret' # } # } KEYS = { }
"""Rules for importing Nixpkgs packages.""" load("@bazel_tools//tools/build_defs/repo:utils.bzl", "workspace_and_buildfile") load("@bazel_skylib//lib:sets.bzl", "sets") load("@bazel_skylib//lib:versions.bzl", "versions") load("@bazel_tools//tools/cpp:cc_configure.bzl", "cc_autoconf_impl") load( "@bazel_tools//tools/cpp:lib_cc_configure.bzl", "get_cpu_value", "get_starlark_list", "write_builtin_include_directory_paths", ) load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") load(":private/location_expansion.bzl", "expand_location") load("//nixpkgs:git.bzl", "git_repo") def _get_include_dirs(repository_ctx, compiler): result = _execute_or_fail( repository_ctx, [ compiler, "-E", "-x", "c++", "-", "-v", ], ) gatheringInc = False gatheringSys = False out = [] for line in result.stderr.splitlines(): if line == '#include "..." search starts here:': gatheringInc = True gatheringSys = False elif line == "#include <...> search starts here:": gatheringInc = False gatheringSys = True elif line == "End of search list.": gatheringSys = False gatheringInc = False elif gatheringInc: out.append(line.strip()) elif gatheringSys: out.append(line.strip()) return out def _is_linker_option_supported(repository_ctx, compiler, option): result = repository_ctx.execute([ compiler, option, "-x", "c++", "-Werror", "-o", "/dev/null", Label("@io_tweag_rules_nixpkgs//nixpkgs/toolchains:test.cc"), ]) return result.return_code == 0 def _is_compiler_option_supported(repository_ctx, compiler, option): result = repository_ctx.execute([ compiler, option, "-c", "-x", "c++", "-Werror", "-o", "/dev/null", Label("@io_tweag_rules_nixpkgs//nixpkgs/toolchains:test.cc"), ]) return result.return_code == 0 def filter_empty(lst): out = [] for f in lst: f = f.strip() if f != "": out.append(f) return out def _read_nix_package_root(repository_ctx): nix_package_root = repository_ctx.os.environ.get("NIX_PACKAGE_ROOT") if nix_package_root == None: fail("If you are building inside nixpkgs, you must have a package that contains symlinks to all the dependencies in this build") return [ repository_ctx.path(nix_package_root + "/" + attribute_path).realpath for attribute_path in repository_ctx.attr.attribute_paths ] def _is_supported_platform(repository_ctx): return repository_ctx.which("nix-build") != None def _build_nixpkg(repository_ctx): repository = repository_ctx.attr.repository repositories = repository_ctx.attr.repositories if repository and repositories or not repository and not repositories: fail("Specify one of 'repository' or 'repositories' (but not both).") elif repository: repositories = {repository_ctx.attr.repository: "nixpkgs"} # Is nix supported on this platform? not_supported = not _is_supported_platform(repository_ctx) # Should we fail if Nix is not supported? if not_supported and repository_ctx.attr.fail_not_supported: fail("Platform is not supported: nix-build not found in PATH. See attribute fail_not_supported if you don't want to use Nix.") elif not_supported: return [] strFailureImplicitNixpkgs = ( "One of 'repositories', 'nix_file' or 'nix_file_content' must be provided. " + "The NIX_PATH environment variable is not inherited." ) expr_args = [] if not repositories: fail(strFailureImplicitNixpkgs) else: expr_args = ["-E", "import <nixpkgs>"] for attribute_path in repository_ctx.attr.attribute_paths: expr_args.extend([ "-A", attribute_path, ]) expr_args.extend([ # Creating an out link prevents nix from garbage collecting the store path. # nixpkgs uses `nix-support/` for such house-keeping files, so we mirror them # and use `bazel-support/`, under the assumption that no nix package has # a file named `bazel-support` in its root. # A `bazel clean` deletes the symlink and thus nix is free to garbage collect # the store path. "--out-link", "bazel-support/{}".format(repository_ctx.name), ]) expr_args.extend([ expand_location( repository_ctx = repository_ctx, string = opt, labels = None, attr = "nixopts", ) for opt in repository_ctx.attr.nixopts ]) for repo in repositories.keys(): path = str(repository_ctx.path(repo).dirname) + "/nix-file-deps" if repository_ctx.path(path).exists: content = repository_ctx.read(path) for f in content.splitlines(): # Hack: this is to register all Nix files as dependencies # of this rule (see issue #113) repository_ctx.path(repo.relative(":{}".format(f))) # If repositories is not set, leave empty so nix will fail # unless a pinned nixpkgs is set in the `nix_file` attribute. nix_path = [ "{}={}".format(prefix, repository_ctx.path(repo)) for (repo, prefix) in repositories.items() ] if not repositories: fail(strFailureImplicitNixpkgs) for dir in nix_path: expr_args.extend(["-I", dir]) nix_build_path = _executable_path( repository_ctx, "nix-build", extra_msg = "See: https://nixos.org/nix/", ) nix_build = [nix_build_path] + expr_args # Large enough integer that Bazel can still parse. We don't have # access to MAX_INT and 0 is not a valid timeout so this is as good # as we can do. The value shouldn't be too large to avoid errors on # macOS, see https://github.com/tweag/rules_nixpkgs/issues/92. timeout = 8640000 repository_ctx.report_progress("Building Nix derivation") exec_result = _execute_or_fail( repository_ctx, nix_build, failure_message = "Cannot build Nix attributes {}.".format( repository_ctx.attr.attribute_paths, ), quiet = repository_ctx.attr.quiet, timeout = timeout, environment = {"NIXPKGS_ALLOW_UNFREE": "1", "NIX_PROFILES": "/nix/var/nix/profiles/default"}, ) return exec_result.stdout.splitlines() def match_count(leftList, rightList): # match in order if len(leftList) == 0 or len(rightList) == 0: return 0 a = int(leftList[0] == rightList[0]) + match_count(leftList[1:], rightList[1:]) b = match_count(leftList[1:], rightList) c = match_count(leftList, rightList[1:]) return max(max(a, b), c) def _match_inputs_to_attributes(buildInputs, attr_map): name_with_paths = [] for path in buildInputs: # should be one of: # /nix/store/z4zqsz220zsrfdrllmxs2zapnmzp12g6-<name>-<version>-<suffix> # /nix/store/z4zqsz220zsrfdrllmxs2zapnmzp12g6-<name>-<version> if not path.startswith("/nix/store/") or len(path) < 44: fail("Unknown path type: %s" % path) name_with_paths.append((path[44:], path)) outputs = [] for attr_name, attr_path in attr_map.items(): found = False for name, path in name_with_paths: if attr_name == name: found = True outputs.append(path) break if not found: fail("No build input found to match attribute: {}".format(attr_name)) if len(outputs) != len(attr_map): fail("Unknown failure while matching attributes") return outputs def _nixpkgs_package_impl(repository_ctx): # If true, a BUILD file will be created from a template if it does not # exits. # However this will happen AFTER the nix-build command. create_build_file_if_needed = False if repository_ctx.attr.build_file and repository_ctx.attr.build_file_content: fail("Specify one of 'build_file' or 'build_file_content', but not both.") elif repository_ctx.attr.build_file: repository_ctx.symlink(repository_ctx.attr.build_file, "BUILD") elif repository_ctx.attr.build_file_content: repository_ctx.file("BUILD", content = repository_ctx.attr.build_file_content) else: # No user supplied build file, we may create the default one. create_build_file_if_needed = True if "NIX_PACKAGE_ROOT" in repository_ctx.os.environ: # we're inside nix, use the provided NIX_PACKAGE_ROOT output_paths = _read_nix_package_root(repository_ctx) else: # we're outside nix so we should be able to build the packages directly output_paths = _build_nixpkg(repository_ctx) # ensure that the output is a directory test_path = repository_ctx.which("test") for output_path in output_paths: _execute_or_fail( repository_ctx, [test_path, "-d", output_path], failure_message = "nixpkgs_package '@{}' outputs a single file which is not supported by rules_nixpkgs. Please only use directories.".format( repository_ctx.name, ), ) # Build a forest of symlinks (like new_local_package() does) to the # Nix store. for attribute_path, output_path in zip(repository_ctx.attr.attribute_paths, output_paths): repository_ctx.symlink(output_path, attribute_path) # Create a default BUILD file only if it does not exists and is not # provided by `build_file` or `build_file_content`. if create_build_file_if_needed: p = repository_ctx.path("BUILD") if not p.exists: repository_ctx.template("BUILD", Label("@io_tweag_rules_nixpkgs//nixpkgs:BUILD.pkg")) _nixpkgs_package = repository_rule( implementation = _nixpkgs_package_impl, environ = ["NIX_PACKAGE_ROOT"], configure = True, attrs = { "attribute_paths": attr.string_list(), # name of packages to build e.g. pkgs.boost "repositories": attr.label_keyed_string_dict(), "repository": attr.label(), "build_file": attr.label(), "build_file_content": attr.string(), "nixopts": attr.string_list(), "quiet": attr.bool(), "fail_not_supported": attr.bool(default = True, doc = """ If set to True (default) this rule will fail on platforms which do not support Nix (e.g. Windows). If set to False calling this rule will succeed but no output will be generated. """), }, ) def nixpkgs_package( name, attribute_paths, repository = None, repositories = {}, build_file = None, build_file_content = "", nixopts = [], quiet = False, fail_not_supported = True, **kwargs): """Make the content of a Nixpkgs package available in the Bazel workspace. If `repositories` is not specified, you must provide a nixpkgs clone in `nix_file` or `nix_file_content`. Args: name: A unique name for this repository. attribute_paths: List of attributes to build and symlink in the build directory nix_file_content: An expression for a Nix derivation. repository: A repository label identifying which Nixpkgs to use. Equivalent to `repositories = { "nixpkgs": ...}` repositories: A dictionary mapping `NIX_PATH` entries to repository labels. Setting it to ``` repositories = { "myrepo" : "//:myrepo" } ``` for example would replace all instances of `<myrepo>` in the called nix code by the path to the target `"//:myrepo"`. See the [relevant section in the nix manual](https://nixos.org/nix/manual/#env-NIX_PATH) for more information. Specify one of `repository` or `repositories`. build_file: The file to use as the BUILD file for this repository. Its contents are copied copied into the file `BUILD` in root of the nix output folder. The Label does not need to be named `BUILD`, but can be. For common use cases we provide filegroups that expose certain files as targets: <dl> <dt><code>:bin</code></dt> <dd>Everything in the <code>bin/</code> directory.</dd> <dt><code>:lib</code></dt> <dd>All <code>.so</code> and <code>.a</code> files that can be found in subdirectories of <code>lib/</code>.</dd> <dt><code>:include</code></dt> <dd>All <code>.h</code> files that can be found in subdirectories of <code>bin/</code>.</dd> </dl> If you need different files from the nix package, you can reference them like this: ``` package(default_visibility = [ "//visibility:public" ]) filegroup( name = "our-docs", srcs = glob(["share/doc/ourpackage/**/*"]), ) ``` See the bazel documentation of [`filegroup`](https://docs.bazel.build/versions/master/be/general.html#filegroup) and [`glob`](https://docs.bazel.build/versions/master/be/functions.html#glob). build_file_content: Like `build_file`, but a string of the contents instead of a file name. nixopts: Extra flags to pass when calling Nix. quiet: Whether to hide the output of the Nix command. fail_not_supported: If set to `True` (default) this rule will fail on platforms which do not support Nix (e.g. Windows). If set to `False` calling this rule will succeed but no output will be generated. """ kwargs.update( name = name, attribute_paths = attribute_paths, repository = repository, repositories = repositories, build_file = build_file, build_file_content = build_file_content, nixopts = nixopts, quiet = quiet, fail_not_supported = fail_not_supported, ) # Because of https://github.com/bazelbuild/bazel/issues/7989 we can't # directly pass a dict from strings to labels to the rule (which we'd like # for the `repositories` arguments), but we can pass a dict from labels to # strings. So we swap the keys and the values (assuming they all are # distinct). if "repositories" in kwargs: inversed_repositories = {value: key for (key, value) in kwargs["repositories"].items()} kwargs["repositories"] = inversed_repositories _nixpkgs_package(**kwargs) def _nixpkgs_cc_toolchain_config_impl(repository_ctx): cpu_value = get_cpu_value(repository_ctx) darwin = cpu_value == "darwin" # Generate the cc_toolchain workspace following the example from # `@bazel_tools//tools/cpp:unix_cc_configure.bzl`. # Uses the corresponding templates from `@bazel_tools` as well, see the # private attributes of the `_nixpkgs_cc_toolchain_config` rule. repository_ctx.symlink( repository_ctx.path(repository_ctx.attr._unix_cc_toolchain_config), "cc_toolchain_config.bzl", ) repository_ctx.symlink( repository_ctx.path(repository_ctx.attr._armeabi_cc_toolchain_config), "armeabi_cc_toolchain_config.bzl", ) cxx_builtin_include_directories = _get_include_dirs(repository_ctx, repository_ctx.attr.gcc) compile_flags = [] for flag in [ # Security hardening requires optimization. # We need to undef it as some distributions now have it enabled by default. "-U_FORTIFY_SOURCE", "-fstack-protector", # All warnings are enabled. Maybe enable -Werror as well? "-Wall", # Enable a few more warnings that aren't part of -Wall. "-Wthread-safety", "-Wself-assign", # Disable problematic warnings. "-Wunused-but-set-parameter", # has false positives "-Wno-free-nonheap-object", # Enable coloring even if there's no attached terminal. Bazel removes the # escape sequences if --nocolor is specified. "-fcolor-diagnostics", # Keep stack frames for debugging, even in opt mode. "-fno-omit-frame-pointer", ]: if _is_compiler_option_supported(repository_ctx, repository_ctx.attr.gcc, flag): compile_flags.append(flag) cxx_flags = [ "-x c++", "-std=c++0x", ] link_flags = [] for flag in [ "-Wl,-no-as-needed", "-no-as-needed", "-Wl,-z,relro,-z,now", "-z", # Have gcc return the exit code from ld. "-pass-exit-codes", ]: if _is_linker_option_supported(repository_ctx, repository_ctx.attr.gcc, flag): link_flags.append(flag) if darwin: link_flags.extend([ "-undefined dynamic_lookup", "-headerpad_max_install_names", ]) else: link_flags.extend(["-B${cc}/bin", "-L${cc}/lib"]) link_libs = [ "-lstdc++", "-lm", ] opt_compile_flags = [ # No debug symbols. # Maybe we should enable https://gcc.gnu.org/wiki/DebugFission for opt or # even generally? However, that can't happen here, as it requires special # handling in Bazel. "-g0", # Conservative choice for -O # -O3 can increase binary size and even slow down the resulting binaries. # Profile first and / or use FDO if you need better performance than this. "-O2", # Security hardening on by default. # Conservative choice; -D_FORTIFY_SOURCE=2 may be unsafe in some cases. "-D_FORTIFY_SOURCE=1", # Disable assertions "-DNDEBUG", # Removal of unused code and data at link time (can this increase binary # size in some cases?). "-ffunction-sections", "-fdata-sections", ] opt_link_flags = [] if not darwin: opt_link_flags.extend(["-Wl,--gc-sections", "-gc-sections"]) unfiltered_compile_flags = [ "-fno-canonical-system-headers", "-no-canonical-prefixes", "-Wno-builtin-macro-redefined", '-D__DATE__=\\\"redacted\\\"', '-D__TIMESTAMP__=\\\"redacted\\\"', '-D__TIME__=\\\"redacted\\\"', ] # Make C++ compilation deterministic. Use linkstamping instead of these # compiler symbols. dbg_compile_flags = ["-g"] if darwin: coverage_compile_flags = ["-fprofile-instr-generate", "-fcoverage-mapping"] else: coverage_compile_flags = ["--coverage"] if darwin: coverage_link_flags = ["-fprofile-instr-generate"] else: coverage_link_flags = ["--coverage"] supports_start_end_lib = False if repository_ctx.attr.ld.name.endswith("ld.gold"): link_flags.append("-fuse-ld=gold") supports_start_end_lib = True # TODO(micah) support ld.gold tool_paths = { "ar": repository_ctx.attr.ar, "cpp": repository_ctx.attr.cpp, "dwp": repository_ctx.attr.dwp, "gcc": repository_ctx.attr.gcc, "gcov": repository_ctx.attr.gcov, "ld": repository_ctx.attr.ld, "nm": repository_ctx.attr.nm, "objcopy": repository_ctx.attr.objcopy, "objdump": repository_ctx.attr.objdump, "strip": repository_ctx.attr.strip, } compile_flags = compile_flags cxx_flags = [] link_libs = [] opt_compile_flags = [] opt_link_flags = [] unfiltered_compile_flags = [] dbg_compile_flags = [] dbg_compile_flags = [] coverage_compile_flags = ["--coverage"] coverage_link_flags = ["--coverage"] supports_start_end_lib = supports_start_end_lib is_clang = repository_ctx.attr.is_clang # A module map is required for clang starting from Bazel version 3.3.0. # https://github.com/bazelbuild/bazel/commit/8b9f74649512ee17ac52815468bf3d7e5e71c9fa needs_module_map = repository_ctx.attr.is_clang and versions.is_at_least("3.3.0", versions.get()) if needs_module_map: generate_system_module_map = [ repository_ctx.path(repository_ctx.attr._generate_system_module_map), ] repository_ctx.file( "module.modulemap", _execute_or_fail( repository_ctx, generate_system_module_map + cxx_builtin_include_directories, "Failed to generate system module map.", ).stdout.strip(), executable = False, ) cc_wrapper_src = ( repository_ctx.attr._osx_cc_wrapper if darwin else repository_ctx.attr._linux_cc_wrapper ) repository_ctx.template( "cc_wrapper.sh", repository_ctx.path(cc_wrapper_src), { "%{cc}": tool_paths["gcc"].name, "%{env}": "", }, ) if darwin: tool_paths["gcc"] = "cc_wrapper.sh" tool_paths["ar"] = "/usr/bin/libtool" write_builtin_include_directory_paths( repository_ctx, tool_paths["gcc"], cxx_builtin_include_directories, ) repository_ctx.template( "BUILD.bazel", repository_ctx.path(repository_ctx.attr._build), { "%{cc_toolchain_identifier}": "local", "%{name}": cpu_value, "%{modulemap}": ("\":module.modulemap\"" if needs_module_map else "None"), "%{supports_param_files}": "0" if darwin else "1", "%{cc_compiler_deps}": get_starlark_list( [":builtin_include_directory_paths"] + ( [":cc_wrapper"] if darwin else [] ), ), "%{compiler}": "compiler", "%{abi_version}": "local", "%{abi_libc_version}": "local", "%{host_system_name}": "local", "%{target_libc}": "macosx" if darwin else "local", "%{target_cpu}": cpu_value, "%{target_system_name}": "local", "%{tool_paths}": ",\n ".join( ['"%s": "%s"' % (k, repository_ctx.path(v)) for (k, v) in tool_paths.items()], ), "%{cxx_builtin_include_directories}": get_starlark_list(cxx_builtin_include_directories), "%{compile_flags}": get_starlark_list(compile_flags), "%{cxx_flags}": get_starlark_list(cxx_flags), "%{link_flags}": get_starlark_list(link_flags), "%{link_libs}": get_starlark_list(link_libs), "%{opt_compile_flags}": get_starlark_list(opt_compile_flags), "%{opt_link_flags}": get_starlark_list(opt_link_flags), "%{unfiltered_compile_flags}": get_starlark_list(unfiltered_compile_flags), "%{dbg_compile_flags}": get_starlark_list(dbg_compile_flags), "%{coverage_compile_flags}": get_starlark_list(coverage_compile_flags), "%{coverage_link_flags}": get_starlark_list(coverage_link_flags), "%{supports_start_end_lib}": repr(supports_start_end_lib), }, ) _nixpkgs_cc_toolchain_config = repository_rule( _nixpkgs_cc_toolchain_config_impl, environ = ["NIX_PACKAGE_ROOT"], configure = True, attrs = { "fail_not_supported": attr.bool(), "ar": attr.label(mandatory = True), "cpp": attr.label(mandatory = True), "dwp": attr.label(), "gcc": attr.label(mandatory = True), "gcov": attr.label(mandatory = True), "ld": attr.label(mandatory = True), "nm": attr.label(mandatory = True), "objcopy": attr.label(mandatory = True), "objdump": attr.label(mandatory = True), "strip": attr.label(), "is_clang": attr.bool(default = False), "_unix_cc_toolchain_config": attr.label( default = Label("@bazel_tools//tools/cpp:unix_cc_toolchain_config.bzl"), ), "_armeabi_cc_toolchain_config": attr.label( default = Label("@bazel_tools//tools/cpp:armeabi_cc_toolchain_config.bzl"), ), "_generate_system_module_map": attr.label( default = Label("@bazel_tools//tools/cpp:generate_system_module_map.sh"), ), "_osx_cc_wrapper": attr.label( default = Label("@bazel_tools//tools/cpp:osx_cc_wrapper.sh.tpl"), ), "_linux_cc_wrapper": attr.label( default = Label("@bazel_tools//tools/cpp:linux_cc_wrapper.sh.tpl"), ), "_build": attr.label( default = Label("@bazel_tools//tools/cpp:BUILD.tpl"), ), }, ) def _loadNixStructFile(ctx, fname): # TODO(micah) make this actually robust, either find a json parser or just # make this more robust data = ctx.read(fname) data = data.replace("\n", "") data = data.replace("\r", "") data = data.replace("\n", "") data = data.replace(" ", "") if data[0] != "{" or data[-1] != "}": fail("Malformed nix struct: {}".format(data)) remapped = {} data = data[1:-1] for line in data.split(";"): if line == "": continue if "=" not in line: fail('nix struct should be of the form a = "b";') key, value = line.split("=") if value[0] != '"' or value[-1] != '"': fail("Expected value to be quoted string, error parsing value:\n{}", value) value = value[1:-1] remapped[key] = value return remapped def _nixpkgs_git_repository_impl(ctx): if "NIX_PACKAGE_ROOT" in ctx.os.environ: ctx.file("BUILD", executable = False) ctx.file("WORKSPACE", executable = False) else: if ctx.attr.fetchGitFile != None: data = _loadNixStructFile(ctx, ctx.attr.fetchGitFile) commit = data.get("rev") remote = data.get("url") branch = data.get("ref") else: commit = ctx.attr.commit remote = ctx.attr.remote branch = ctx.attr.branch root = ctx.path(".") directory = str(root) git_repo( ctx, directory, branch = branch, commit = commit, remote = remote, shallow_since = ctx.attr.shallow_since, tag = ctx.attr.tag, init_submodules = ctx.attr.init_submodules, verbose = ctx.attr.verbose, strip_prefix = ctx.attr.strip_prefix, recursive_init_submodules = ctx.attr.recursive_init_submodules, ) workspace_and_buildfile(ctx) ctx.delete(ctx.path(".git")) nixpkgs_git_repository = repository_rule( _nixpkgs_git_repository_impl, environ = ["NIX_PACKAGE_ROOT"], attrs = { "commit": attr.string(), "remote": attr.string(), "fetchGitFile": attr.label( allow_single_file = True, doc = "file contraining a nix struct of the form " + "{url = \"...\"; rev = \"...\"; ref = \"...\"}", ), "shallow_since": attr.string( default = "", doc = "an optional date, not after the specified commit; the " + "argument is not allowed if a tag is specified (which allows " + "cloning with depth 1). Setting such a date close to the " + "specified commit allows for a more shallow clone of the " + "repository, saving bandwidth " + "and wall-clock time.", ), "verbose": attr.bool(default = False), "init_submodules": attr.bool( default = False, doc = "Whether to clone submodules in the repository.", ), "recursive_init_submodules": attr.bool( default = False, doc = "Whether to clone submodules recursively in the repository.", ), "build_file": attr.label( allow_single_file = True, doc = "The file to use as the BUILD file for this repository." + "This attribute is an absolute label (use '@//' for the main " + "repo). The file does not need to be named BUILD, but can " + "be (something like BUILD.new-repo-name may work well for " + "distinguishing it from the repository's actual BUILD files. " + "Either build_file or build_file_content must be specified.", ), "build_file_content": attr.string( default = 'filegroup(name = "srcs", srcs = glob(["**"]), visibility = ["//visibility:public"])', doc = "The content for the BUILD file for this repository. " + "Either build_file or build_file_content must be specified.", ), "workspace_file": attr.label( doc = "The file to use as the `WORKSPACE` file for this repository. " + "Either `workspace_file` or `workspace_file_content` can be " + "specified, or neither, but not both.", ), "workspace_file_content": attr.string( doc = "The content for the WORKSPACE file for this repository. " + "Either `workspace_file` or `workspace_file_content` can be " + "specified, or neither, but not both.", ), "tag": attr.string( default = "", doc = "tag in the remote repository to checked out." + " Precisely one of branch, tag, or commit must be specified.", ), "strip_prefix": attr.string( default = "", doc = "A directory prefix to strip from the extracted files.", ), }, ) def _nixpkgs_cc_toolchain_impl(repository_ctx): cpu = get_cpu_value(repository_ctx) repository_ctx.file( "BUILD.bazel", executable = False, content = """\ package(default_visibility = ["//visibility:public"]) toolchain( name = "cc-toolchain-{cpu}", toolchain = "@{cc_toolchain_config}//:cc-compiler-{cpu}", toolchain_type = "@rules_cc//cc:toolchain_type", exec_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", "@io_tweag_rules_nixpkgs//nixpkgs/constraints:support_nix", ], target_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", ], ) toolchain( name = "cc-toolchain-armeabi-v7a", toolchain = "@{cc_toolchain_config}//:cc-compiler-armeabi-v7a", toolchain_type = "@rules_cc//cc:toolchain_type", exec_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", "@io_tweag_rules_nixpkgs//nixpkgs/constraints:support_nix", ], target_compatible_with = [ "@platforms//cpu:arm", "@platforms//os:android", ], ) """.format( cc_toolchain_config = repository_ctx.attr.cc_toolchain_config, cpu = cpu, os = "osx" if cpu == "darwin" else "linux", ), ) _nixpkgs_cc_toolchain = repository_rule( _nixpkgs_cc_toolchain_impl, attrs = { "cc_toolchain_config": attr.string(), }, ) def nixpkgs_cc_configure( name, repositories = {}, repository = None, nixopts = [], quiet = False, fail_not_supported = True): """Use a CC toolchain from Nixpkgs. No-op if not a nix-based platform. By default, Bazel auto-configures a CC toolchain from commands (e.g. `gcc`) available in the environment. To make builds more hermetic, use this rule to specify explicitly which commands the toolchain should use. Specifically, it builds a Nix derivation that provides the CC toolchain tools in the `bin/` path and constructs a CC toolchain that uses those tools. Tools that aren't found are replaced by `${coreutils}/bin/false`. You can inspect the resulting `@<name>_info//:CC_TOOLCHAIN_INFO` to see which tools were discovered. This rule depends on [`rules_cc`](https://github.com/bazelbuild/rules_cc). **Note:** You need to configure `--crosstool_top=@<name>//:toolchain` to activate this toolchain. Args: repositories: dict of Label to string, Provides `<nixpkgs>` and other repositories. Specify one of `repositories` or `repository`. repository: Label, Provides `<nixpkgs>`. Specify one of `repositories` or `repository`. nixopts: optional, list of string, Extra flags to pass when calling Nix. Subject to location expansion, any instance of `$(location LABEL)` will be replaced by the path to the file ferenced by `LABEL` relative to the workspace root. quiet: bool, Whether to hide `nix-build` output. fail_not_supported: bool, Whether to fail if `nix-build` is not available. """ nixopts = list(nixopts) attribute_paths = [ "pkgs.clang", "pkgs.gcc.cc", "pkgs.binutils.bintools", ] # Invoke `toolchains/cc.nix` which generates `CC_TOOLCHAIN_INFO`. _nixpkgs_package( name = "{}_pkg".format(name), build_file = "@io_tweag_rules_nixpkgs//nixpkgs/toolchains:cc.BUILD", repositories = repositories, repository = repository, attribute_paths = attribute_paths, nixopts = nixopts, quiet = quiet, fail_not_supported = fail_not_supported, ) # Generate the `cc_toolchain_config` workspace. _nixpkgs_cc_toolchain_config( name = "{}".format(name), ar = "@{}_pkg//:pkgs.binutils.bintools/bin/ar".format(name), cpp = "@{}_pkg//:pkgs.clang/bin/cpp".format(name), dwp = "@{}_pkg//:pkgs.binutils.bintools/bin/dwp".format(name), gcc = "@{}_pkg//:pkgs.clang/bin/c++".format(name), gcov = "@{}_pkg//:pkgs.gcc.cc/bin/gcov".format(name), ld = "@{}_pkg//:pkgs.binutils.bintools/bin/ld".format(name), nm = "@{}_pkg//:pkgs.binutils.bintools/bin/nm".format(name), objdump = "@{}_pkg//:pkgs.binutils.bintools/bin/objdump".format(name), objcopy = "@{}_pkg//:pkgs.binutils.bintools/bin/objcopy".format(name), strip = "@{}_pkg//:pkgs.binutils.bintools/bin/strip".format(name), is_clang = True, fail_not_supported = fail_not_supported, ) # Generate the `cc_toolchain` workspace. _nixpkgs_cc_toolchain( name = "{}_toolchains".format(name), cc_toolchain_config = name, ) maybe( native.bind, name = "cc_toolchain", actual = "@{}//:toolchain".format(name), ) native.register_toolchains("@{}_toolchains//:all".format(name)) def _readlink(repository_ctx, path): return repository_ctx.path(path).realpath def nixpkgs_cc_autoconf_impl(repository_ctx): cpu_value = get_cpu_value(repository_ctx) if not _is_supported_platform(repository_ctx): cc_autoconf_impl(repository_ctx) return # Calling repository_ctx.path() on anything but a regular file # fails. So the roundabout way to do the same thing is to find # a regular file we know is in the workspace (i.e. the WORKSPACE # file itself) and then use dirname to get the path of the workspace # root. workspace_file_path = repository_ctx.path( Label("@nixpkgs_cc_toolchain//:WORKSPACE"), ) workspace_root = _execute_or_fail( repository_ctx, ["dirname", workspace_file_path], ).stdout.rstrip() # Make a list of all available tools in the Nix derivation. Override # the Bazel autoconfiguration with the tools we found. bin_contents = _find_children(repository_ctx, workspace_root + "/bin") overriden_tools = { tool: _readlink(repository_ctx, entry) for entry in bin_contents for tool in [entry.rpartition("/")[-1]] # Compute basename } cc_autoconf_impl(repository_ctx, overriden_tools = overriden_tools) def _nixpkgs_python_toolchain_impl(repository_ctx): cpu = get_cpu_value(repository_ctx) repository_ctx.file("BUILD.bazel", executable = False, content = """ load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair") py_runtime_pair( name = "py_runtime_pair", py2_runtime = {python2_runtime}, py3_runtime = {python3_runtime}, ) toolchain( name = "toolchain", toolchain = ":py_runtime_pair", toolchain_type = "@bazel_tools//tools/python:toolchain_type", exec_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", "@io_tweag_rules_nixpkgs//nixpkgs/constraints:support_nix", ], target_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", ], ) """.format( python2_runtime = _label_string(repository_ctx.attr.python2_runtime), python3_runtime = _label_string(repository_ctx.attr.python3_runtime), os = {"darwin": "osx"}.get(cpu, "linux"), )) _nixpkgs_python_toolchain = repository_rule( _nixpkgs_python_toolchain_impl, attrs = { # Using attr.string instead of attr.label, so that the repository rule # does not explicitly depend on the nixpkgs_package instances. This is # necessary, so that builds don't fail on platforms without nixpkgs. "python2_runtime": attr.string(), "python3_runtime": attr.string(), }, ) _python_nix_file_content = """ with import <nixpkgs>; runCommand "bazel-nixpkgs-python-toolchain" {{ executable = false; # Pointless to do this on a remote machine. preferLocalBuild = true; allowSubstitutes = false; }} '' n=$out/BUILD.bazel mkdir -p "$(dirname "$n")" cat >>$n <<EOF py_runtime( name = "runtime", interpreter_path = "${{{attribute_path}}}/{bin_path}", python_version = "{version}", visibility = ["//visibility:public"], ) EOF '' """ def nixpkgs_python_configure( name = "nixpkgs_python_toolchain", python2_attribute_path = None, python2_bin_path = "bin/python", python3_attribute_path = "python3", python3_bin_path = "bin/python", repository = None, repositories = {}, nix_file_deps = None, nixopts = [], fail_not_supported = True, quiet = False): """Define and register a Python toolchain provided by nixpkgs. Creates `nixpkgs_package`s for Python 2 or 3 `py_runtime` instances and a corresponding `py_runtime_pair` and `toolchain`. The toolchain is automatically registered and uses the constraint: ``` "@io_tweag_rules_nixpkgs//nixpkgs/constraints:support_nix" ``` Args: name: The name-prefix for the created external repositories. python2_attribute_path: The nixpkgs attribute path for python2. python2_bin_path: The path to the interpreter within the package. python3_attribute_path: The nixpkgs attribute path for python3. python3_bin_path: The path to the interpreter within the package. repository: See [`nixpkgs_package`](#nixpkgs_package-repository). repositories: See [`nixpkgs_package`](#nixpkgs_package-repositories). nix_file_deps: See [`nixpkgs_package`](#nixpkgs_package-nix_file_deps). nixopts: See [`nixpkgs_package`](#nixpkgs_package-nixopts). fail_not_supported: See [`nixpkgs_package`](#nixpkgs_package-fail_not_supported). quiet: See [`nixpkgs_package`](#nixpkgs_package-quiet). """ python2_specified = python2_attribute_path and python2_bin_path python3_specified = python3_attribute_path and python3_bin_path if not python2_specified and not python3_specified: fail("At least one of python2 or python3 has to be specified.") kwargs = dict( repository = repository, repositories = repositories, nix_file_deps = nix_file_deps, nixopts = nixopts, fail_not_supported = fail_not_supported, quiet = quiet, ) python2_runtime = None if python2_attribute_path: python2_runtime = "@%s_python2//:runtime" % name nixpkgs_package( name = name + "_python2", nix_file_content = _python_nix_file_content.format( attribute_path = python2_attribute_path, bin_path = python2_bin_path, version = "PY2", ), **kwargs ) python3_runtime = None if python3_attribute_path: python3_runtime = "@%s_python3//:runtime" % name nixpkgs_package( name = name + "_python3", nix_file_content = _python_nix_file_content.format( attribute_path = python3_attribute_path, bin_path = python3_bin_path, version = "PY3", ), **kwargs ) _nixpkgs_python_toolchain( name = name, python2_runtime = python2_runtime, python3_runtime = python3_runtime, ) native.register_toolchains("@%s//:toolchain" % name) def nixpkgs_sh_posix_config(name, packages, **kwargs): nixpkgs_package( name = name, nix_file_content = """ with import <nixpkgs>; let # `packages` might include lists, e.g. `stdenv.initialPath` is a list itself, # so we need to flatten `packages`. flatten = builtins.concatMap (x: if builtins.isList x then x else [x]); env = buildEnv {{ name = "posix-toolchain"; paths = flatten [ {} ]; }}; cmd_glob = "${{env}}/bin/*"; os = if stdenv.isDarwin then "osx" else "linux"; in runCommand "bazel-nixpkgs-posix-toolchain" {{ executable = false; # Pointless to do this on a remote machine. preferLocalBuild = true; allowSubstitutes = false; }} '' n=$out/nixpkgs_sh_posix.bzl mkdir -p "$(dirname "$n")" cat >>$n <<EOF load("@rules_sh//sh:posix.bzl", "posix", "sh_posix_toolchain") discovered = {{ EOF for cmd in ${{cmd_glob}}; do if [[ -x $cmd ]]; then echo " '$(basename $cmd)': '$cmd'," >>$n fi done cat >>$n <<EOF }} def create_posix_toolchain(): sh_posix_toolchain( name = "nixpkgs_sh_posix", cmds = {{ cmd: discovered[cmd] for cmd in posix.commands if cmd in discovered }} ) EOF '' """.format(" ".join(packages)), build_file_content = """ load("//:nixpkgs_sh_posix.bzl", "create_posix_toolchain") create_posix_toolchain() """, **kwargs ) def _nixpkgs_sh_posix_toolchain_impl(repository_ctx): cpu = get_cpu_value(repository_ctx) repository_ctx.file("BUILD", executable = False, content = """ toolchain( name = "nixpkgs_sh_posix_toolchain", toolchain = "@{workspace}//:nixpkgs_sh_posix", toolchain_type = "@rules_sh//sh/posix:toolchain_type", exec_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", "@io_tweag_rules_nixpkgs//nixpkgs/constraints:support_nix", ], target_compatible_with = [ "@platforms//cpu:x86_64", "@platforms//os:{os}", ], ) """.format( workspace = repository_ctx.attr.workspace, os = {"darwin": "osx"}.get(cpu, "linux"), )) _nixpkgs_sh_posix_toolchain = repository_rule( _nixpkgs_sh_posix_toolchain_impl, attrs = { "workspace": attr.string(), }, ) def nixpkgs_sh_posix_configure( name = "nixpkgs_sh_posix_config", packages = ["stdenv.initialPath"], **kwargs): """Create a POSIX toolchain from nixpkgs. Loads the given Nix packages, scans them for standard Unix tools, and generates a corresponding `sh_posix_toolchain`. Make sure to call `nixpkgs_sh_posix_configure` before `sh_posix_configure`, if you use both. Otherwise, the local toolchain will always be chosen in favor of the nixpkgs one. Args: name: Name prefix for the generated repositories. packages: List of Nix attribute paths to draw Unix tools from. nix_file_deps: See nixpkgs_package. repositories: See nixpkgs_package. repository: See nixpkgs_package. nixopts: See nixpkgs_package. fail_not_supported: See nixpkgs_package. """ nixpkgs_sh_posix_config( name = name, packages = packages, **kwargs ) # The indirection is required to avoid errors when `nix-build` is not in `PATH`. _nixpkgs_sh_posix_toolchain( name = name + "_toolchain", workspace = name, ) native.register_toolchains( "@{}//:nixpkgs_sh_posix_toolchain".format(name + "_toolchain"), ) def _execute_or_fail(repository_ctx, arguments, failure_message = "", *args, **kwargs): """Call repository_ctx.execute() and fail if non-zero return code.""" result = repository_ctx.execute(arguments, *args, **kwargs) if result.return_code: outputs = dict( failure_message = failure_message, arguments = arguments, return_code = result.return_code, stderr = result.stderr, ) fail(""" {failure_message} Command: {arguments} Return code: {return_code} Error output: {stderr} """.format(**outputs)) return result def _find_children(repository_ctx, target_dir): find_args = [ _executable_path(repository_ctx, "find"), "-L", target_dir, "-maxdepth", "1", # otherwise the directory is printed as well "-mindepth", "1", # filenames can contain \n "-print0", ] exec_result = _execute_or_fail(repository_ctx, find_args) return exec_result.stdout.rstrip("\000").split("\000") def _executable_path(repository_ctx, exe_name, extra_msg = ""): """Try to find the executable, fail with an error.""" path = repository_ctx.which(exe_name) if path == None: fail("Could not find the `{}` executable in PATH.{}\n" .format(exe_name, " " + extra_msg if extra_msg else "")) return path def _cp(repository_ctx, src, dest = None): """Copy the given file into the external repository root. Args: repository_ctx: The repository context of the current repository rule. src: The source file. Must be a Label if dest is None. dest: Optional, The target path within the current repository root. By default the relative path to the repository root is preserved. Returns: The dest value """ if dest == None: if type(src) != "Label": fail("src must be a Label if dest is not specified explicitly.") dest = "/".join([ component for component in [src.workspace_root, src.package, src.name] if component ]) repository_ctx.template(dest, src, executable = False) return dest def _label_string(label): """Convert the given (optional) Label to a string.""" if not label: return "None" else: return '"%s"' % label
{ "name" : "ZK Biometric Device Integration Kware (ZKTECO) Demo (UDP)", "version" : "1.0", "author" : "JUVENTUD PRODUCTIVA VENEZOLANA", "category" : "HR", "website" : "https://www.youtube.com/channel/UCTj66IUz5M-QV15Mtbx_7yg", "description": "Module for the connection between odoo and zkteco devices for the control of employee assistance. This module is a demo version to test the compatibility of your device with our module.d Odoo.", 'license': 'AGPL-3', "depends" : ["base","hr"], "data" : [ "views/biometric_machine_view.xml", "secuirty/res_groups.xml", "secuirty/ir.model.access.csv" ], 'images': ['static/images/zk_screenshot.jpg'], "active": True, "installable": True, }
class HierarchicalVirtualizationHeaderDesiredSizes(object): """ HierarchicalVirtualizationHeaderDesiredSizes(logicalSize: Size,pixelSize: Size) """ def Equals(self,*__args): """ Equals(self: HierarchicalVirtualizationHeaderDesiredSizes,comparisonHeaderSizes: HierarchicalVirtualizationHeaderDesiredSizes) -> bool Equals(self: HierarchicalVirtualizationHeaderDesiredSizes,oCompare: object) -> bool """ pass def GetHashCode(self): """ GetHashCode(self: HierarchicalVirtualizationHeaderDesiredSizes) -> int """ pass def __eq__(self,*args): """ x.__eq__(y) <==> x==y """ pass @staticmethod def __new__(self,logicalSize,pixelSize): """ __new__(cls: type,logicalSize: Size,pixelSize: Size) __new__[HierarchicalVirtualizationHeaderDesiredSizes]() -> HierarchicalVirtualizationHeaderDesiredSizes """ pass def __ne__(self,*args): pass LogicalSize=property(lambda self: object(),lambda self,v: None,lambda self: None) """Get: LogicalSize(self: HierarchicalVirtualizationHeaderDesiredSizes) -> Size """ PixelSize=property(lambda self: object(),lambda self,v: None,lambda self: None) """Get: PixelSize(self: HierarchicalVirtualizationHeaderDesiredSizes) -> Size """
# Задача 1. Вариант 10. # Напишите программу, которая будет сообщать род деятельности и псевдоним под # которым скрывается Ричард Дженкинс. После вывода информации программа должна # дожидаться пока пользователь нажмет Enter для выхода. # Колеганов Никита Сергеевич # 29.05.2016 print("Ричард Дженкинс извествен по имени Ричард Дейл. Американсикй актер кино, театра и телевидения") input("\n\nНажмите Enter для выхода.")
f = open("1/input.txt", "rt") # read line by line lines = f.readlines() line = lines[0] floor = 0 firstBasement = None for i in range(0, len(line)): if line[i] == "(": floor += 1 else: floor -= 1 if (floor < 0) & (firstBasement == None): firstBasement = i+1 print("answer 1 - " + str(floor)) print("answer 2 - " + str(firstBasement))
a = [int(x) for x in input().split()] if a[0] >= a[1]: a[1] += 24 print(f"O JOGO DUROU {a[1] - a[0]} HORA(S)")
# -*- coding: utf-8 -*- R = float(input()) PI = 3.14159 VOLUME = (4 / 3) * PI * (R ** 3) print("VOLUME = %.3f" % (VOLUME))
f = open("textfile.txt"); for line in f: print (line) f.close()
# search_data: The data to be used in a search POST. search_data = { 'metodo': 'buscar', 'acao': '', 'resumoFormacao': '', 'resumoAtividade': '', 'resumoAtuacao': '', 'resumoProducao': '', 'resumoPesquisador': '', 'resumoIdioma': '', 'resumoPresencaDGP': '', 'resumoModalidade': 'Bolsas+de+PQ+de+categorias0', 'modoIndAdhoc': '', 'buscaAvancada': '0', 'filtros.buscaNome': 'true', 'textoBusca': '.', 'buscarDoutores': 'true', 'buscarDemais': 'true', 'buscarBrasileiros': 'true', 'buscarEstrangeiros': 'true', 'paisNascimento': '0', 'textoBuscaTodas': '', 'textoBuscaFrase': '', 'textoBuscaQualquer': '', 'textoBuscaNenhuma': '', 'textoExpressao': '', 'buscarDoutoresAvancada': 'true', 'buscarBrasileirosAvancada': 'true', 'buscarEstrangeirosAvancada': 'true', 'paisNascimentoAvancada': '0', 'filtros.atualizacaoCurriculo': '48', 'quantidadeRegistros': '20', 'filtros.visualizaEnderecoCV': 'true', 'filtros.visualizaFormacaoAcadTitCV': 'true', 'filtros.visualizaAtuacaoProfCV': 'true', 'filtros.visualizaAreasAtuacaoCV': 'true', 'filtros.visualizaIdiomasCV': 'true', 'filtros.visualizaPremiosTitulosCV': 'true', 'filtros.visualizaSoftwaresCV': 'true', 'filtros.visualizaProdutosCV': 'true', 'filtros.visualizaProcessosCV': 'true', 'filtros.visualizaTrabalhosTecnicosCV': 'true', 'filtros.visualizaOutrasProdTecCV': 'true', 'filtros.visualizaArtigosCV': 'true', 'filtros.visualizaLivrosCapitulosCV': 'true', 'filtros.visualizaTrabEventosCV': 'true', 'filtros.visualizaTxtJornalRevistaCV': 'true', 'filtros.visualizaOutrasProdBibCV': 'true', 'filtros.visualizaProdArtCultCV': 'true', 'filtros.visualizaOrientacoesConcluidasCV': 'true', 'filtros.visualizaOrientacoesAndamentoCV': 'true', 'filtros.visualizaDemaisTrabalhosCV': 'true', 'filtros.visualizaDadosComplementaresCV': 'true', 'filtros.visualizaOutrasInfRelevantesCV': 'true', 'filtros.radioPeriodoProducao': '1', 'filtros.visualizaPeriodoProducaoCV': '', 'filtros.categoriaNivelBolsa': '', 'filtros.modalidadeBolsa': '0', 'filtros.nivelFormacao': '0', 'filtros.paisFormacao': '0', 'filtros.regiaoFormacao': '0', 'filtros.ufFormacao': '0', 'filtros.nomeInstFormacao': '', 'filtros.conceitoCurso': '', 'filtros.buscaAtuacao': 'false', 'filtros.codigoGrandeAreaAtuacao': '0', 'filtros.codigoAreaAtuacao': '0', 'filtros.codigoSubareaAtuacao': '0', 'filtros.codigoEspecialidadeAtuacao': '0', 'filtros.orientadorCNPq': '', 'filtros.idioma': '0', 'filtros.grandeAreaProducao': '0', 'filtros.areaProducao': '0', 'filtros.setorProducao': '0', 'filtros.naturezaAtividade': '0', 'filtros.paisAtividade': '0', 'filtros.regiaoAtividade': '0', 'filtros.ufAtividade': '0', 'filtros.nomeInstAtividade': '', } # params_payload: The params to be used in a GET pagination. params_payload = { 'metodo': 'forwardPaginaResultados', 'registros': '1;1000', 'query': ('( +idx_particao:1 +idx_nacionalidade:e) or ' '( +idx_particao:1 +idx_nacionalidade:b)'), 'analise': 'cv', 'tipoOrdenacao': 'null', 'paginaOrigem': 'index.do', 'mostrarScore': 'false', 'mostrarBandeira': 'false', 'modoIndAdhoc': 'null', }
class Solution(object): def destCity(self, paths): origin, destination = [], [] for p in paths: origin.append(p[0]) destination.append(p[1]) for d in destination: if d not in origin : return d
class AsyncRunner: async def __call__(self, facade_method, *args, **kwargs): await self.connection.rpc(facade_method(*args, **kwargs)) class ThreadedRunner: pass # Methods are descriptors?? # get is called with params # set gets called with the result? # This could let us fake the protocol we want # while decoupling the protocol from the RPC and the IO/Process context # The problem is leaking the runtime impl details to the top levels of the API # with async def By handling the Marshal/Unmarshal side of RPC as a protocol we # can leave the RPC running to a specific delegate without altering the method # signatures. This still isn't quite right though as async is co-op # multitasking and the methods still need to know not to block or they will # pause other execution
class ListNode: def __init__(self, x): self.val = x self.next = None def make_list(values: list[int]) -> list[ListNode]: v = [] for i, val in enumerate(values): v.append(ListNode(val)) if i > 0: v[i - 1].next = v[i] return v def same_values(head: ListNode, values: list[int]) -> bool: node = head for val in values: if node is None or node.val != val: return False node = node.next return node is None
""" Default disk quota assigned to each new user Units: Bytes (Base 2 - 1 kiB = 1024 bytes) Default: 100 MiB Note: Make migrations when updating value """ DEFAULT_QUOTA = 104857600 """ How long the 'Change Password' URL should be valid, in days. Current: 1 day (24 Hours) """ PASSWORD_RESET_TIMEOUT_DAYS = 1 """ External server url TODO: CHANGE IN PRODUCTION """ #EXTERNAL_URL = 'http://cloud.domain.com/' EXTERNAL_URL = 'http://127.0.0.1:8000/' """ Email settings Note: Leave EMAIL_HOST blank to disable sending emails. TODO: CHANGE IN PRODUCTION """ FROM_EMAIL_ADDRESS = 'CS Cloud <cs_cloud@example.com>' EMAIL_USE_TLS = True EMAIL_HOST = "" EMAIL_PORT = 25 EMAIL_HOST_USER = "cs_cloud" EMAIL_HOST_PASSWORD = "Cs.cl0ud.p@ssw0rd"
def test_d_1(): assert True def test_d_2(): assert True def test_d_3(): assert True
#!/usr/bin/env python3 class Node(): def __init__(self, parent=None, position=None): self.parent = parent # parent node self.position = position #self.position = (position[1], position[0]) # y, x self.distance_from_start = 0 self.distance_to_end = 0 self.cost = 0 self.move = 0 # move counter def __str__(self): if self.position == None: return "None" else: return str(self.position) def __eq__(self, node): return self.position == node.position # unused def is_position(self, x, y): if self.position[0] == x and self.position[1] == y: return True return False def get_neighbor_positions(self, order=None): neighbors = [] offsets = [] if order == 0: offsets = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)] elif order == 1: offsets = [(1,-1),(1,0),(0,-1),(1,1),(-1,-1),(0,1),(-1,0),(-1,1)] elif order == 2: offsets = [(1,-1),(1,0),(0,-1),(1,1),(-1,-1),(0,1),(-1,0),(-1,1)] elif order == 3: offsets = [(0,1),(0,-1),(-1,0),(1,0),(1,1),(1,-1),(-1,-1),(-1,1)] elif order == 4: offsets = [(0,-1),(0,1),(-1,0),(1,0),(-1,-1),(-1,1),(1,-1),(1,1)] else: offsets = [(1,1),(1,0),(0,1),(1,-1),(-1,1),(0,-1),(-1,0),(-1,-1)] check_parent = True if self.parent is None: check_parent = False else: if self.parent.position is None: check_parent = False for offset in offsets: if offset[0] == 0 and offset[1] == 0: continue y = self.position[0] + offset[0] x = self.position[1] + offset[1] if check_parent: if self.parent.position[0] == y and self.parent.position[1] == x: continue if x < 0 or y < 0: # skip minus position continue neighbors.append((y,x)) return neighbors
class ContactList(list): def search(self, name): """Return all contacts that contain the search value in their name.""" matching_contacts = [] for contact in self: if name in contact.name: matching_contacts.append(contact) return matching_contacts class Contact: all_contacts = ContactList() def __init__(self, name, email): self.name = name self.email = email Contact.all_contacts.append(self) class Supplier(Contact): def order(self, order): print( "If this were a real system we would send " "'{}' order to '{}'".format(order, self.name) ) class Friend(Contact): def __init__(self, name, email, phone): self.name = name self.email = email self.phone = phone class MailSender: def send_mail(self, message): print("Sending mail to " + self.email) # Add e-mail logic here class EmailableContact(Contact, MailSender): pass
registry = set() def register(active=True): def decorate(func): print('Running registry (active=%s)->decorate(%s)' % (active, func)) if active: registry.add(func) else: registry.discard(func) return func return decorate @register(active=False) def f1(): print('Running f1()') @register() # need parentheses, as parametrized decorator def f2(): print('Running f2()') def f3(): print('Running f3()') if __name__ == '__main__': f1() f2() f3() print(registry) # {<function f2 at 0x102c441e0>}
''' Description: Given a collection of numbers that might contain duplicates, return all possible unique permutations. Example: Input: [1,1,2] Output: [ [1,1,2], [1,2,1], [2,1,1] ] ''' class Solution: # @param num, a list of integer # @return a list of lists of integers def permuteUnique(self, num): length = len(num) if length == 0: return [] if length == 1: return [num] # pre-processing num.sort() res = [] previousNum = None for i in range(length): if num[i] == previousNum: continue previousNum = num[i] for j in self.permuteUnique(num[:i] + num[i+1:]): res.append([num[i]] + j) return res def test_bench(): test_data = [1,1,2] print( Solution().permuteUnique(test_data) ) return if __name__ == '__main__': test_bench()
# -*- coding: utf-8 -*- """ 1669. Merge In Between Linked Lists You are given two linked lists: list1 and list2 of sizes n and m respectively. Remove list1's nodes from the ath node to the bth node, and put list2 in their place. The blue edges and nodes in the following figure indicate the result: Build the result list and return its head. Constraints: 3 <= list1.length <= 104 1 <= a <= b < list1.length - 1 1 <= list2.length <= 104 """ # Definition for singly-linked list. class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next class Solution: def mergeInBetween(self, list1: ListNode, a: int, b: int, list2: ListNode) -> ListNode: tail_index = b + 1 cur_index = 0 tail = list1 while cur_index < tail_index: tail = tail.next cur_index += 1 res = ListNode(next=list1) head = res cur_index = -1 tar_index = a - 1 while cur_index < tar_index: head = head.next cur_index += 1 head.next = list2 while head.next: head = head.next head.next = tail return res.next
## 백준 9498 ## 시험성적 ## 조건문, 구현 def examScore(n): pass if n <101 and n > 89: print('A') elif n > 79: print('B') elif n > 69: print('C') elif n > 59: print('D') else: print('F') if __name__ == "__main__": N = int(input()) examScore(N) ''' 참고 숏코드 print("FFFFFFDCBAA"[int(input())//10]) 0~5 : F 6: D 7: C 8: B 9: A 10: A "문자열"[인덱스] //연산자 : 나누고 정수만 남김 '''
def x1(y): if y < 10: z = x1(y+1) z += 1 return z + 3 return y def x2(y): if y < 10: z = x2(y+1) return z + 3 return y x1(5) x2(5)
# This file is used with the GYP meta build system. # http://code.google.com/p/gyp # To build try this: # svn co http://gyp.googlecode.com/svn/trunk gyp # ./gyp/gyp -f make --depth=`pwd` libexpat.gyp # make # ./out/Debug/test { 'target_defaults': { 'default_configuration': 'Debug', 'configurations': { # TODO: hoist these out and put them somewhere common, because # RuntimeLibrary MUST MATCH across the entire project 'Debug': { 'defines': [ 'DEBUG', '_DEBUG' ], 'msvs_settings': { 'VCCLCompilerTool': { 'RuntimeLibrary': 1, # static debug }, }, }, 'Release': { 'defines': [ 'NDEBUG' ], 'msvs_settings': { 'VCCLCompilerTool': { 'RuntimeLibrary': 0, # static release }, }, } }, 'msvs_settings': { 'VCCLCompilerTool': { }, 'VCLibrarianTool': { }, 'VCLinkerTool': { 'GenerateDebugInformation': 'true', }, }, }, 'targets': [ { 'variables': { 'target_arch%': 'ia32' }, # default for node v0.6.x 'target_name': 'expat', 'product_prefix': 'lib', 'type': 'static_library', 'sources': [ 'lib/xmlparse.c', 'lib/xmltok.c', 'lib/xmlrole.c', ], 'defines': [ 'PIC', 'HAVE_EXPAT_CONFIG_H' ], 'include_dirs': [ '.', 'lib', ], 'direct_dependent_settings': { 'include_dirs': [ '.', 'lib', ], 'conditions': [ ['OS=="win"', { 'defines': [ 'XML_STATIC' ] }] ], }, }, { 'target_name': 'version', 'type': 'executable', 'dependencies': [ 'expat' ], 'sources': [ 'version.c' ] }, ] }
# 简单的while循环 # 从1开始, 打印小于等于5的整数 current_number = 1 while current_number <= 5: print(current_number) current_number += 1 # 让用户选择何时退出 prompt = "\nTell me something, and I will repeat it back to you:" prompt += "\nEnter 'quit' to end the program. " message = "" while message != 'quit': message = input(prompt) if message != 'quit': # 不是quit才打印信息 print(message) # 使用标志 # 在要求很多条件都满足才继续运行的程序中, 可定义一个变量, 用于判断整个程序是否处于活动状态. # 这个变量被称为标志, 充当了程序的交通信号灯. # 你可让程序在标志为True时继续运行, 并在任何事件导致标志的值为False时让程序停止运行. # 这样, 在while语句中就只需检查一个条件——标志的当前值是否为True, # 并将所有测试(是否发生了应将标志设置为False的事件)都放在其他地方, 从而让程序变得更为整洁. prompt = "\nTell me something, and I will repeat it back to you:" prompt += "\nEnter 'quit' to end the program. " active = True while active: message = input(prompt) if message == 'quit': active = False else: print(message) # 使用break退出循环 # 要立即退出while循环, 不再运行循环中余下的代码, 也不管条件测试的结果如何, 可使用break语句. # break语句用于控制程序流程, 可使用它来控制哪些代码行将执行, # 哪些代码行不执行, 从而让程序按你的要求执行你要执行的代码. # 在任何Python循环中都可使用break语句 prompt = "\nPlease enter the name of a city you have visited:" prompt += "\n(Enter 'quit' when you are finished.) " while True: city = input(prompt) if city == 'quit': break else: print("I'd love to go to " + city.title() + "!") # 在循环中使用continue # 要返回到循环开头, 并根据条件测试结果决定是否继续执行循环, 可使用continue语句. # 它不像break语句那样不再执行余下的代码并退出整个循环. # 示例: 打印1-10范围内的奇数 print('\n') print('1-10内的奇数如下: ') current_number = 0 while current_number < 10: current_number += 1 if current_number % 2 == 0: continue print(current_number)
x = [1,2,3] y = [1,2,3] print(x == y) print(x is y)
class KostenPlaatsError(Exception): pass class ServerError(Exception): pass class FaultCodeNotFoundError(Exception): """Exception when faultcode not found""" pass class TwinfieldFaultCode(Exception): """Exception when Twinfield has raised a faultcode""" pass class SelectOfficeError(Exception): """Exception when selecting office in twinfield is not succesful""" pass class EnvironmentVariablesError(Exception): """Exception when not all env variables are set""" pass class LoginSessionError(Exception): """Exception when response from login is not valid""" pass
# Equal # https://www.interviewbit.com/problems/equal/ # # Given an array A of integers, find the index of values that satisfy A + B = C + D, where A,B,C & D are integers values in the array # # Note: # # 1) Return the indices `A1 B1 C1 D1`, so that # A[A1] + A[B1] = A[C1] + A[D1] # A1 < B1, C1 < D1 # A1 < C1, B1 != D1, B1 != C1 # # 2) If there are more than one solutions, # then return the tuple of values which are lexicographical smallest. # # Assume we have two solutions # S1 : A1 B1 C1 D1 ( these are values of indices int the array ) # S2 : A2 B2 C2 D2 # # S1 is lexicographically smaller than S2 iff # A1 < A2 OR # A1 = A2 AND B1 < B2 OR # A1 = A2 AND B1 = B2 AND C1 < C2 OR # A1 = A2 AND B1 = B2 AND C1 = C2 AND D1 < D2 # Example: # # Input: [3, 4, 7, 1, 2, 9, 8] # Output: [0, 2, 3, 5] (O index) # If no solution is possible, return an empty list. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # class Solution: def intersect(self, l1, l2): return [x for x in l1 if x in l2] # @param A : list of integers # @return a list of integers def equal(self, A): dp, ans = dict(), list() for i in range(len(A)): for j in range(i + 1, len(A)): s, st = A[i] + A[j], [i, j] if s in dp: if not self.intersect(dp[s], st): ll = list(dp[s] + st) ans = ll if not ans or ll < ans else ans else: dp[s] = st return ans # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # if __name__ == "__main__": s = Solution() print(s.equal([3, 4, 7, 1, 2, 9, 8]))
{ "targets": [ { "target_name": "gomodule_addon", "sources": ["nodegomodule.cc"], "include_dirs": [ "<(module_root_dir)/../../" ], "libraries": ["<(module_root_dir)/../../../gomodule/build/gomodule.so"] } ] }
{ "targets": [ { # OpenSSL has a lot of config options, with some default options # enabling known insecure algorithms. What's a good combinations # of openssl config options? # ./config no-asm no-shared no-ssl2 no-ssl3 no-hw no-zlib no-threads # ? # See also http://codefromthe70s.org/sslimprov.aspx "target_name": "openssl", "type": "static_library", # The list of sources I computed on Windows via: # >cd bru_modules\openssl\1.0.1j\openssl-1.0.1j # >perl Configure VC-WIN32 no-asm no-ssl2 no-ssl3 no-hw # >call ms\\do_ms.bat # >nmake /n /f ms\nt.mak > nmake.log # >cd bru_modules\openssl # where the *.gyp is located # >~\bru\makefile2gyp.py 1.0.1j\openssl-1.0.1j\nmake.log "sources": [ "1.0.1j/openssl-1.0.1j/crypto/aes/aes_cbc.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_cfb.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_core.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_ctr.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_ige.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_misc.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_ofb.c", "1.0.1j/openssl-1.0.1j/crypto/aes/aes_wrap.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_bitstr.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_bool.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_bytes.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_d2i_fp.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_digest.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_dup.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_enum.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_gentm.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_i2d_fp.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_int.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_mbstr.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_object.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_octet.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_print.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_set.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_sign.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_strex.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_strnid.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_time.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_type.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_utctm.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_utf8.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/a_verify.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/ameth_lib.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn1_err.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn1_gen.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn1_lib.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn1_par.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn_mime.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn_moid.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/asn_pack.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/bio_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/bio_ndef.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/d2i_pr.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/d2i_pu.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/evp_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/f_enum.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/f_int.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/f_string.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/i2d_pr.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/i2d_pu.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/n_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/nsseq.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/p5_pbe.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/p5_pbev2.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/p8_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_bitst.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_crl.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_req.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_spki.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_x509.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/t_x509a.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_dec.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_enc.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_fre.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_new.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_prn.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_typ.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/tasn_utl.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_algor.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_attrib.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_bignum.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_crl.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_exten.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_info.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_long.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_name.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_nx509.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_pubkey.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_req.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_sig.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_spki.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_val.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_x509.c", "1.0.1j/openssl-1.0.1j/crypto/asn1/x_x509a.c", "1.0.1j/openssl-1.0.1j/crypto/bf/bf_cfb64.c", "1.0.1j/openssl-1.0.1j/crypto/bf/bf_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/bf/bf_enc.c", "1.0.1j/openssl-1.0.1j/crypto/bf/bf_ofb64.c", "1.0.1j/openssl-1.0.1j/crypto/bf/bf_skey.c", "1.0.1j/openssl-1.0.1j/crypto/bf/bftest.c", "1.0.1j/openssl-1.0.1j/crypto/bio/b_dump.c", "1.0.1j/openssl-1.0.1j/crypto/bio/b_print.c", "1.0.1j/openssl-1.0.1j/crypto/bio/b_sock.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bf_buff.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bf_nbio.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bf_null.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bio_cb.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bio_err.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bio_lib.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_acpt.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_bio.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_conn.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_dgram.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_fd.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_file.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_log.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_mem.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_null.c", "1.0.1j/openssl-1.0.1j/crypto/bio/bss_sock.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_add.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_asm.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_blind.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_const.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_ctx.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_depr.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_div.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_err.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_exp.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_exp2.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_gcd.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_gf2m.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_kron.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_lib.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_mod.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_mont.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_mpi.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_mul.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_nist.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_prime.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_print.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_rand.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_recp.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_shift.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_sqr.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_sqrt.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_word.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bn_x931p.c", "1.0.1j/openssl-1.0.1j/crypto/bn/bntest.c", "1.0.1j/openssl-1.0.1j/crypto/bn/exptest.c", "1.0.1j/openssl-1.0.1j/crypto/buffer/buf_err.c", "1.0.1j/openssl-1.0.1j/crypto/buffer/buf_str.c", "1.0.1j/openssl-1.0.1j/crypto/buffer/buffer.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/camellia.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_cbc.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_cfb.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_ctr.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_misc.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_ofb.c", "1.0.1j/openssl-1.0.1j/crypto/camellia/cmll_utl.c", "1.0.1j/openssl-1.0.1j/crypto/cast/c_cfb64.c", "1.0.1j/openssl-1.0.1j/crypto/cast/c_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/cast/c_enc.c", "1.0.1j/openssl-1.0.1j/crypto/cast/c_ofb64.c", "1.0.1j/openssl-1.0.1j/crypto/cast/c_skey.c", "1.0.1j/openssl-1.0.1j/crypto/cast/casttest.c", "1.0.1j/openssl-1.0.1j/crypto/cmac/cm_ameth.c", "1.0.1j/openssl-1.0.1j/crypto/cmac/cm_pmeth.c", "1.0.1j/openssl-1.0.1j/crypto/cmac/cmac.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_att.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_cd.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_dd.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_enc.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_env.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_err.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_ess.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_io.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_lib.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_pwri.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_sd.c", "1.0.1j/openssl-1.0.1j/crypto/cms/cms_smime.c", "1.0.1j/openssl-1.0.1j/crypto/comp/c_rle.c", "1.0.1j/openssl-1.0.1j/crypto/comp/c_zlib.c", "1.0.1j/openssl-1.0.1j/crypto/comp/comp_err.c", "1.0.1j/openssl-1.0.1j/crypto/comp/comp_lib.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_api.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_def.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_err.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_lib.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_mall.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_mod.c", "1.0.1j/openssl-1.0.1j/crypto/conf/conf_sap.c", "1.0.1j/openssl-1.0.1j/crypto/constant_time_test.c", "1.0.1j/openssl-1.0.1j/crypto/cpt_err.c", "1.0.1j/openssl-1.0.1j/crypto/cryptlib.c", "1.0.1j/openssl-1.0.1j/crypto/cversion.c", "1.0.1j/openssl-1.0.1j/crypto/des/cbc_cksm.c", "1.0.1j/openssl-1.0.1j/crypto/des/cbc_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/cfb64ede.c", "1.0.1j/openssl-1.0.1j/crypto/des/cfb64enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/cfb_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/des_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/des_old.c", "1.0.1j/openssl-1.0.1j/crypto/des/des_old2.c", "1.0.1j/openssl-1.0.1j/crypto/des/destest.c", "1.0.1j/openssl-1.0.1j/crypto/des/ecb3_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/ecb_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/ede_cbcm_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/enc_read.c", "1.0.1j/openssl-1.0.1j/crypto/des/enc_writ.c", "1.0.1j/openssl-1.0.1j/crypto/des/fcrypt.c", "1.0.1j/openssl-1.0.1j/crypto/des/fcrypt_b.c", "1.0.1j/openssl-1.0.1j/crypto/des/ofb64ede.c", "1.0.1j/openssl-1.0.1j/crypto/des/ofb64enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/ofb_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/pcbc_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/qud_cksm.c", "1.0.1j/openssl-1.0.1j/crypto/des/rand_key.c", "1.0.1j/openssl-1.0.1j/crypto/des/read2pwd.c", "1.0.1j/openssl-1.0.1j/crypto/des/rpc_enc.c", "1.0.1j/openssl-1.0.1j/crypto/des/set_key.c", "1.0.1j/openssl-1.0.1j/crypto/des/str2key.c", "1.0.1j/openssl-1.0.1j/crypto/des/xcbc_enc.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_ameth.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_check.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_depr.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_err.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_gen.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_key.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_lib.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_pmeth.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dh_prn.c", "1.0.1j/openssl-1.0.1j/crypto/dh/dhtest.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_ameth.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_depr.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_err.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_gen.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_key.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_lib.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_ossl.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_pmeth.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_prn.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_sign.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsa_vrf.c", "1.0.1j/openssl-1.0.1j/crypto/dsa/dsatest.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_beos.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_dl.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_dlfcn.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_err.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_lib.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_null.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_openssl.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_vms.c", "1.0.1j/openssl-1.0.1j/crypto/dso/dso_win32.c", "1.0.1j/openssl-1.0.1j/crypto/ebcdic.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec2_mult.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec2_oct.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec2_smpl.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_ameth.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_check.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_curve.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_cvt.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_err.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_key.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_mult.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_oct.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_pmeth.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ec_print.c", "1.0.1j/openssl-1.0.1j/crypto/ec/eck_prn.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_mont.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_nist.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_nistp224.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_nistp256.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_nistp521.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_nistputil.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_oct.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ecp_smpl.c", "1.0.1j/openssl-1.0.1j/crypto/ec/ectest.c", "1.0.1j/openssl-1.0.1j/crypto/ecdh/ecdhtest.c", "1.0.1j/openssl-1.0.1j/crypto/ecdh/ech_err.c", "1.0.1j/openssl-1.0.1j/crypto/ecdh/ech_key.c", "1.0.1j/openssl-1.0.1j/crypto/ecdh/ech_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ecdh/ech_ossl.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecdsatest.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecs_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecs_err.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecs_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecs_ossl.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecs_sign.c", "1.0.1j/openssl-1.0.1j/crypto/ecdsa/ecs_vrf.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_all.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_cnf.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_cryptodev.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_ctrl.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_dyn.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_err.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_fat.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_init.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_lib.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_list.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_openssl.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_rdrand.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_rsax.c", "1.0.1j/openssl-1.0.1j/crypto/engine/eng_table.c", "1.0.1j/openssl-1.0.1j/crypto/engine/enginetest.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_asnmth.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_cipher.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_dh.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_digest.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_dsa.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_ecdh.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_ecdsa.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_pkmeth.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_rand.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_rsa.c", "1.0.1j/openssl-1.0.1j/crypto/engine/tb_store.c", "1.0.1j/openssl-1.0.1j/crypto/err/err.c", "1.0.1j/openssl-1.0.1j/crypto/err/err_all.c", "1.0.1j/openssl-1.0.1j/crypto/err/err_prn.c", "1.0.1j/openssl-1.0.1j/crypto/evp/bio_b64.c", "1.0.1j/openssl-1.0.1j/crypto/evp/bio_enc.c", "1.0.1j/openssl-1.0.1j/crypto/evp/bio_md.c", "1.0.1j/openssl-1.0.1j/crypto/evp/bio_ok.c", "1.0.1j/openssl-1.0.1j/crypto/evp/c_all.c", "1.0.1j/openssl-1.0.1j/crypto/evp/c_allc.c", "1.0.1j/openssl-1.0.1j/crypto/evp/c_alld.c", "1.0.1j/openssl-1.0.1j/crypto/evp/digest.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_aes.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_aes_cbc_hmac_sha1.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_bf.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_camellia.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_cast.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_des.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_des3.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_idea.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_null.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_old.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_rc2.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_rc4.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_rc4_hmac_md5.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_rc5.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_seed.c", "1.0.1j/openssl-1.0.1j/crypto/evp/e_xcbc_d.c", "1.0.1j/openssl-1.0.1j/crypto/evp/encode.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_acnf.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_cnf.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_enc.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_err.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_fips.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_key.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_lib.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_pbe.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/evp/evp_test.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_dss.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_dss1.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_ecdsa.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_md4.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_md5.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_mdc2.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_null.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_ripemd.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_sha.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_sha1.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_sigver.c", "1.0.1j/openssl-1.0.1j/crypto/evp/m_wp.c", "1.0.1j/openssl-1.0.1j/crypto/evp/names.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p5_crpt.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p5_crpt2.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_dec.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_enc.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_lib.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_open.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_seal.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_sign.c", "1.0.1j/openssl-1.0.1j/crypto/evp/p_verify.c", "1.0.1j/openssl-1.0.1j/crypto/evp/pmeth_fn.c", "1.0.1j/openssl-1.0.1j/crypto/evp/pmeth_gn.c", "1.0.1j/openssl-1.0.1j/crypto/evp/pmeth_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ex_data.c", "1.0.1j/openssl-1.0.1j/crypto/fips_ers.c", "1.0.1j/openssl-1.0.1j/crypto/hmac/hm_ameth.c", "1.0.1j/openssl-1.0.1j/crypto/hmac/hm_pmeth.c", "1.0.1j/openssl-1.0.1j/crypto/hmac/hmac.c", "1.0.1j/openssl-1.0.1j/crypto/hmac/hmactest.c", "1.0.1j/openssl-1.0.1j/crypto/idea/i_cbc.c", "1.0.1j/openssl-1.0.1j/crypto/idea/i_cfb64.c", "1.0.1j/openssl-1.0.1j/crypto/idea/i_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/idea/i_ofb64.c", "1.0.1j/openssl-1.0.1j/crypto/idea/i_skey.c", "1.0.1j/openssl-1.0.1j/crypto/idea/ideatest.c", "1.0.1j/openssl-1.0.1j/crypto/krb5/krb5_asn.c", "1.0.1j/openssl-1.0.1j/crypto/lhash/lh_stats.c", "1.0.1j/openssl-1.0.1j/crypto/lhash/lhash.c", "1.0.1j/openssl-1.0.1j/crypto/md4/md4_dgst.c", "1.0.1j/openssl-1.0.1j/crypto/md4/md4_one.c", "1.0.1j/openssl-1.0.1j/crypto/md4/md4test.c", "1.0.1j/openssl-1.0.1j/crypto/md5/md5_dgst.c", "1.0.1j/openssl-1.0.1j/crypto/md5/md5_one.c", "1.0.1j/openssl-1.0.1j/crypto/md5/md5test.c", "1.0.1j/openssl-1.0.1j/crypto/mdc2/mdc2_one.c", "1.0.1j/openssl-1.0.1j/crypto/mdc2/mdc2dgst.c", "1.0.1j/openssl-1.0.1j/crypto/mdc2/mdc2test.c", "1.0.1j/openssl-1.0.1j/crypto/mem.c", "1.0.1j/openssl-1.0.1j/crypto/mem_clr.c", "1.0.1j/openssl-1.0.1j/crypto/mem_dbg.c", "1.0.1j/openssl-1.0.1j/crypto/modes/cbc128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/ccm128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/cfb128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/ctr128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/cts128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/gcm128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/ofb128.c", "1.0.1j/openssl-1.0.1j/crypto/modes/xts128.c", "1.0.1j/openssl-1.0.1j/crypto/o_dir.c", "1.0.1j/openssl-1.0.1j/crypto/o_fips.c", "1.0.1j/openssl-1.0.1j/crypto/o_init.c", "1.0.1j/openssl-1.0.1j/crypto/o_str.c", "1.0.1j/openssl-1.0.1j/crypto/o_time.c", "1.0.1j/openssl-1.0.1j/crypto/objects/o_names.c", "1.0.1j/openssl-1.0.1j/crypto/objects/obj_dat.c", "1.0.1j/openssl-1.0.1j/crypto/objects/obj_err.c", "1.0.1j/openssl-1.0.1j/crypto/objects/obj_lib.c", "1.0.1j/openssl-1.0.1j/crypto/objects/obj_xref.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_asn.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_cl.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_err.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_ext.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_ht.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_prn.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_srv.c", "1.0.1j/openssl-1.0.1j/crypto/ocsp/ocsp_vfy.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_all.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_err.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_info.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_lib.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_oth.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_pk8.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_pkey.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_seal.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_sign.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_x509.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pem_xaux.c", "1.0.1j/openssl-1.0.1j/crypto/pem/pvkfmt.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_add.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_asn.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_attr.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_crpt.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_crt.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_decr.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_init.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_key.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_kiss.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_mutl.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_npas.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_p8d.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_p8e.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/p12_utl.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs12/pk12err.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/bio_pk7.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pk7_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pk7_attr.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pk7_doit.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pk7_lib.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pk7_mime.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pk7_smime.c", "1.0.1j/openssl-1.0.1j/crypto/pkcs7/pkcs7err.c", "1.0.1j/openssl-1.0.1j/crypto/pqueue/pqueue.c", "1.0.1j/openssl-1.0.1j/crypto/rand/md_rand.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_egd.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_err.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_lib.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_nw.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_os2.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_unix.c", "1.0.1j/openssl-1.0.1j/crypto/rand/rand_win.c", "1.0.1j/openssl-1.0.1j/crypto/rand/randfile.c", "1.0.1j/openssl-1.0.1j/crypto/rand/randtest.c", "1.0.1j/openssl-1.0.1j/crypto/rc2/rc2_cbc.c", "1.0.1j/openssl-1.0.1j/crypto/rc2/rc2_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/rc2/rc2_skey.c", "1.0.1j/openssl-1.0.1j/crypto/rc2/rc2cfb64.c", "1.0.1j/openssl-1.0.1j/crypto/rc2/rc2ofb64.c", "1.0.1j/openssl-1.0.1j/crypto/rc2/rc2test.c", #"1.0.1j/openssl-1.0.1j/crypto/rc4/rc4_enc.c", #"1.0.1j/openssl-1.0.1j/crypto/rc4/rc4_skey.c", #"1.0.1j/openssl-1.0.1j/crypto/rc4/rc4_utl.c", "1.0.1j/openssl-1.0.1j/crypto/rc4/rc4test.c", "1.0.1j/openssl-1.0.1j/crypto/ripemd/rmd_dgst.c", "1.0.1j/openssl-1.0.1j/crypto/ripemd/rmd_one.c", "1.0.1j/openssl-1.0.1j/crypto/ripemd/rmdtest.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_ameth.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_chk.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_crpt.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_depr.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_eay.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_err.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_gen.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_lib.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_none.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_null.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_oaep.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_pk1.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_pmeth.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_prn.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_pss.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_saos.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_sign.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_ssl.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_test.c", "1.0.1j/openssl-1.0.1j/crypto/rsa/rsa_x931.c", "1.0.1j/openssl-1.0.1j/crypto/seed/seed.c", "1.0.1j/openssl-1.0.1j/crypto/seed/seed_cbc.c", "1.0.1j/openssl-1.0.1j/crypto/seed/seed_cfb.c", "1.0.1j/openssl-1.0.1j/crypto/seed/seed_ecb.c", "1.0.1j/openssl-1.0.1j/crypto/seed/seed_ofb.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha1_one.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha1dgst.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha1test.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha256.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha256t.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha512.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha512t.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha_dgst.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha_one.c", "1.0.1j/openssl-1.0.1j/crypto/sha/shatest.c", "1.0.1j/openssl-1.0.1j/crypto/srp/srp_lib.c", "1.0.1j/openssl-1.0.1j/crypto/srp/srp_vfy.c", "1.0.1j/openssl-1.0.1j/crypto/srp/srptest.c", "1.0.1j/openssl-1.0.1j/crypto/stack/stack.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_asn1.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_conf.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_err.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_req_print.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_req_utils.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_rsp_print.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_rsp_sign.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_rsp_utils.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_rsp_verify.c", "1.0.1j/openssl-1.0.1j/crypto/ts/ts_verify_ctx.c", "1.0.1j/openssl-1.0.1j/crypto/txt_db/txt_db.c", "1.0.1j/openssl-1.0.1j/crypto/ui/ui_compat.c", "1.0.1j/openssl-1.0.1j/crypto/ui/ui_err.c", "1.0.1j/openssl-1.0.1j/crypto/ui/ui_lib.c", "1.0.1j/openssl-1.0.1j/crypto/ui/ui_openssl.c", "1.0.1j/openssl-1.0.1j/crypto/ui/ui_util.c", "1.0.1j/openssl-1.0.1j/crypto/uid.c", "1.0.1j/openssl-1.0.1j/crypto/whrlpool/wp_block.c", "1.0.1j/openssl-1.0.1j/crypto/whrlpool/wp_dgst.c", "1.0.1j/openssl-1.0.1j/crypto/whrlpool/wp_test.c", "1.0.1j/openssl-1.0.1j/crypto/x509/by_dir.c", "1.0.1j/openssl-1.0.1j/crypto/x509/by_file.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_att.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_cmp.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_d2.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_def.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_err.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_ext.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_lu.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_obj.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_r2x.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_req.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_set.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_trs.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_txt.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_v3.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_vfy.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509_vpm.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509cset.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509name.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509rset.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509spki.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x509type.c", "1.0.1j/openssl-1.0.1j/crypto/x509/x_all.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/pcy_cache.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/pcy_data.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/pcy_lib.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/pcy_map.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/pcy_node.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/pcy_tree.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_addr.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_akey.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_akeya.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_alt.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_asid.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_bcons.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_bitst.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_conf.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_cpols.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_crld.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_enum.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_extku.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_genn.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_ia5.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_info.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_int.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_lib.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_ncons.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_ocsp.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_pci.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_pcia.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_pcons.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_pku.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_pmaps.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_prn.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_purp.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_skey.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_sxnet.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3_utl.c", "1.0.1j/openssl-1.0.1j/crypto/x509v3/v3err.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/e_gost_err.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost2001.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost2001_keyx.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost89.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost94_keyx.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_ameth.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_asn1.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_crypt.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_ctl.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_eng.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_keywrap.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_md.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_params.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_pmeth.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gost_sign.c", "1.0.1j/openssl-1.0.1j/engines/ccgost/gosthash.c", "1.0.1j/openssl-1.0.1j/engines/e_4758cca.c", "1.0.1j/openssl-1.0.1j/engines/e_aep.c", "1.0.1j/openssl-1.0.1j/engines/e_atalla.c", "1.0.1j/openssl-1.0.1j/engines/e_capi.c", "1.0.1j/openssl-1.0.1j/engines/e_chil.c", "1.0.1j/openssl-1.0.1j/engines/e_cswift.c", "1.0.1j/openssl-1.0.1j/engines/e_gmp.c", "1.0.1j/openssl-1.0.1j/engines/e_nuron.c", "1.0.1j/openssl-1.0.1j/engines/e_padlock.c", "1.0.1j/openssl-1.0.1j/engines/e_sureware.c", "1.0.1j/openssl-1.0.1j/engines/e_ubsec.c", # these are from ssl/Makefile, not sure why these didn't # show up in the Windows nt.mak file. "1.0.1j/openssl-1.0.1j/ssl/*.c" ], "sources!": [ # exclude various tests that provide an impl of main(): "1.0.1j/openssl-1.0.1j/crypto/*/*test.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha256t.c", "1.0.1j/openssl-1.0.1j/crypto/sha/sha512t.c", "1.0.1j/openssl-1.0.1j/crypto/*test.c", "1.0.1j/openssl-1.0.1j/ssl/*test.c", "1.0.1j/openssl-1.0.1j/ssl/ssl_task*.c" ], "direct_dependent_settings": { "include_dirs": [ "1.0.1j/openssl-1.0.1j/include" ] }, "include_dirs": [ "1.0.1j/openssl-1.0.1j/include", "1.0.1j/openssl-1.0.1j/crypto", # e.g. cryptlib.h "1.0.1j/openssl-1.0.1j/crypto/asn1", # e.g. asn1_locl.h "1.0.1j/openssl-1.0.1j/crypto/evp", # e.g. evp_locl.h "1.0.1j/openssl-1.0.1j/crypto/modes", "1.0.1j/openssl-1.0.1j" # e.g. e_os.h ], "defines": [ # #defines shared across platforms copied from ms\nt.mak "OPENSSL_NO_RC4", "OPENSSL_NO_RC5", "OPENSSL_NO_MD2", "OPENSSL_NO_SSL2", "OPENSSL_NO_SSL3", "OPENSSL_NO_KRB5", "OPENSSL_NO_HW", "OPENSSL_NO_JPAKE", "OPENSSL_NO_DYNAMIC_ENGINE" ], "conditions": [ ["OS=='win'", { "defines": [ # from ms\nt.mak "OPENSSL_THREADS", "DSO_WIN32", "OPENSSL_SYSNAME_WIN32", "WIN32_LEAN_AND_MEAN", "L_ENDIAN", "_CRT_SECURE_NO_DEPRECATE", "NO_WINDOWS_BRAINDEATH" # for cversion.c ], "link_settings" : { "libraries" : [ # external libs (from nt.mak) "-lws2_32.lib", "-lgdi32.lib", "-ladvapi32.lib", "-lcrypt32.lib", "-luser32.lib" ] } }], ["OS=='mac'", { "defines": [ "OPENSSL_NO_EC_NISTP_64_GCC_128", "OPENSSL_NO_GMP", "OPENSSL_NO_JPAKE", "OPENSSL_NO_MD2", "OPENSSL_NO_RC5", "OPENSSL_NO_RFC3779", "OPENSSL_NO_SCTP", "OPENSSL_NO_SSL2", "OPENSSL_NO_SSL3", "OPENSSL_NO_STORE", "OPENSSL_NO_UNIT_TEST", "NO_WINDOWS_BRAINDEATH" ] }], ["OS=='iOS'", { "defines": [ "OPENSSL_NO_EC_NISTP_64_GCC_128", "OPENSSL_NO_GMP", "OPENSSL_NO_JPAKE", "OPENSSL_NO_MD2", "OPENSSL_NO_RC5", "OPENSSL_NO_RFC3779", "OPENSSL_NO_SCTP", "OPENSSL_NO_SSL2", "OPENSSL_NO_SSL3", "OPENSSL_NO_STORE", "OPENSSL_NO_UNIT_TEST", "NO_WINDOWS_BRAINDEATH" ] }], ["OS=='linux'", { "defines": [ # from Linux Makefile after ./configure "DSO_DLFCN", "HAVE_DLFCN_H", "L_ENDIAN", # TODO: revisit! "TERMIO", # otherwise with clang 3.5 on Ubuntu it get errors around # ROTATE() macro's inline asm. Error I had not got on # Centos with clang 3.4. # Note that this is only a problem with cflags -no-integrated-as # which was necessary for clang 3.4. Messy. TODO: revisit "OPENSSL_NO_INLINE_ASM", "NO_WINDOWS_BRAINDEATH" # for cversion.c, otherwise error (where is buildinf.h?) ], "link_settings" : { "libraries" : [ "-ldl" ] } }] ] }, { "target_name": "ssltest", "type": "executable", "test": { "cwd": "1.0.1j/openssl-1.0.1j/test" }, "defines": [ # without these we get linker errors since the test assumes # by default that SSL2 & 3 was built "OPENSSL_NO_RC4", "OPENSSL_NO_RC5", "OPENSSL_NO_MD2", "OPENSSL_NO_SSL2", "OPENSSL_NO_SSL3", "OPENSSL_NO_KRB5", "OPENSSL_NO_HW", "OPENSSL_NO_JPAKE", "OPENSSL_NO_DYNAMIC_ENGINE" ], "include_dirs": [ "1.0.1j/openssl-1.0.1j" # e.g. e_os.h ], "sources": [ # note how the ssl test depends on many #defines set via # ./configure. Do these need to be passed to the test build # explicitly? Apparently not. "1.0.1j/openssl-1.0.1j/ssl/ssltest.c" ], "dependencies": [ "openssl" ], # this disables building the example on iOS "conditions": [ ["OS=='iOS'", { "type": "none" } ] ] } # compile one of the (interactive) openssl demo apps to verify correct # compiler & linker settings in upstream gyp target: # P.S.: I dont think this test can compile on Windows, so this is not # suitable as a cross-platform test. #{ # "target_name": "demos-easy_tls", # "type": "executable", # not suitable as a test, just building this to see if it links #"test": { # "cwd": "1.0.1j/openssl-1.0.1j/demos/easy_tls" #}, # "include_dir": [ "1.0.1j/openssl-1.0.1j/demos/easy_tls" ], # "sources": [ # "1.0.1j/openssl-1.0.1j/demos/easy_tls/test.c", # "1.0.1j/openssl-1.0.1j/demos/easy_tls/easy-tls.c" # ], # "dependencies": [ "openssl" ] #} ] }
def make_matrix(rows=0, columns=0, list_of_list=[[]]): ''' (int, int, list of list) -> list of list (i.e. matrix) Return a list of list (i.e. matrix) from "list_of_list" if given or if not given a "list_of_list" parameter, then prompt user to type in values for each row and return a matrix with dimentions: rows x columns. ''' if list_of_list == [[]]: matrix = make_matrix_manually(rows, columns) return matrix else: rows = size_of(list_of_list) columns = size_of(list_of_list[0]) for item in list_of_list: if size_of(item) != size_of(list_of_list[0]): print('The number of columns in every row should be equal, but isn\'t!') return None matrix = list_of_list return matrix def make_matrix_manually(rows=0, columns=0): ''' (int, int) -> list of list (i.e. matrix) Prompt user to type in values for each row and return a matrix with dimentions: rows x columns. ''' matrix = [] for i in range(rows): print('Type in values for ROW', i+1, 'seperated by commas: ', end='') current_row = convert_str_into_list(input()) matrix.append(current_row) if size_of(current_row) != columns: print('Number of values different then declared columns!') return None return matrix def make_Id_matrix(size=1): ''' (int) -> list of list (i.e. matrix) Return an Identity Matrix (1's across the diagonal and all other entries 0's) with dimentions: size x size. ''' Id_matrix = [] for i in range(1, size+1): current_row = convert_str_into_list('0,'*(i-1) + '1,' + '0,'*(size-i)) Id_matrix.append(current_row) return Id_matrix def convert_str_into_list(string): ''' (str)-> list of numbers Return a list of numbers from a string. Precondition: the string should consist of numbers separated by commas. ''' list = [] # step 1: remove all empty spaces. i = 0 length = len(string) while i <=(length-1): if string[i] == ' ': string = string[:i] + string[i+1:] length = len(string) else: i += 1 # (a += b) is equivalent to (a = a + b) # step 2: extract sections seperated by commas, turn them into floats # and append them to the list. j = 0 i = 0 for j in range(len(string)+1): if j==(len(string)) or string[j]==',': item = string[i:j] i = j+1 if item =='': pass else: list.append(float(item)) j = j + 1 return list # *values - means, that we do not know up front, what number of # parameters (atributes) we're going to pass to the function. def convert_into_list(*values): ''' (items separated by commas) -> list Return a list of values. (Return values in the form a variable of type LIST.) ''' list = [] for value in values: list.append(value) return list def size_of(list): ''' (list) -> int Return the number of entries (items) in a given list. ''' size = 0 for item in list: size = size+1 return size def add_matrix(matrix1, matrix2): ''' (list of list, list of list) -> list of list Return the result of addition of two matrices: matrix1 and matrix2. Precondition: matrix1 and matix2 have to have the same dimentions. ''' if size_of(matrix1) != size_of(matrix2): print('Error: matrices do not have the same dimentions (size)!') return None matrix_sum = [] for i in range(size_of(matrix1)): if size_of(matrix1[i]) != size_of(matrix2[i]): print('Error: matrices do not have the same dimentions (size)!') return None matrix_sum.append([]) for j in range(size_of(matrix1[i])): matrix_sum[i].append(matrix1[i][j] + matrix2[i][j]) return matrix_sum def neg_matrix(matrix1): ''' (list of list) -> list of list Return the result of the operation of negation on matrix1. ''' matrix_n = [] for i in range(size_of(matrix1)): matrix_n.append([]) for j in range(size_of(matrix1[i])): matrix_n[i].append(-matrix1[i][j]) return matrix_n def substract_matrix(matrix1, matrix2): ''' (list of list, list of list) -> list of list Return the result of substraction of two matrices: matrix1 and matrix2. Precondition: matrix1 and matix2 have to have the same dimentions. ''' sub_matrix = add_matrix(matrix1, neg_matrix(matrix2)) return sub_matrix def multiply_matrix_by_float(arg1, matrix1): ''' (number, list of list) -> list of list Return the result of multiplication of matrix1 by arg1. ''' matrix_new = [] for i in range(size_of(matrix1)): matrix_new.append([]) for j in range(size_of(matrix1[i])): matrix_new[i].append(arg1 * matrix1[i][j]) return matrix_new def multiply_matrix_by_matrix(matrix1, matrix2): ''' (list of list, list of list) -> list of list Return the result of multiplication of matrix1 by matrix2. ''' matrix_new = [] # # Checking if matrices can be multiplied. # # rows = matrix_and_list_functions.size_of(Matrix_name) # columns = matrix_and_list_functions.size_of(Matrix_name[0]) # if size_of(matrix1[0]) == size_of(matrix2): # # implementing Matrix multiplication here. # for i in range(size_of(matrix1)): matrix_new.append([]) for j in range(size_of(matrix2[0])): ABij = 0 for k in range(size_of(matrix1[0])): ABij = ABij + (matrix1[i][k]*matrix2[k][j]) matrix_new[i].append(ABij) return matrix_new else: print('Error: The number of columns in matrix1 has to be equal to the number of rows in matrix2!') return []
# date: 2019.09.24 # https://stackoverflow.com/questions/58085910/python-convert-u0048-style-unicode-to-normal-string/58086131#58086131 print('#U0048#U0045#U004C#U004C#U004F'.replace('#U', '\\u').encode().decode('raw_unicode_escape'))
a, b = map(int, input().split()) product = a * b if product % 2 == 0: print("Even") else: print("Odd")
# encoding = utf-8 f= open('listing11-4.txt','w') f.write('Hello, ') # print(f.read()) io.UnsupportedOperation: not readable 如果文件没有关闭,那么不可对其进行读操作。 f.close() f = open('listing11-4.txt','r+') print(f.read(3)) # 'r+'和'w+'之间有个重要差别:后者截断文件,而前者不会这样做。 f2= open('listing11-5.txt','r') print(f2.read())
# f = open('读文件01', mode='r', encoding='utf-8') # for line in f: # print(line.strip()) # f.seek(0) # 移动到开头 # # for line in f: # print(line.strip()) # f.close() # f = open('读文件01', mode='r', encoding='utf-8') # f.seek(3) # 3byte => 1中文 # s = f.read(1) # 读取一个字符 # print(s) # print(f.tell()) # 光标在哪儿??? # f.close() # seek(偏移量, 位置) # seek(0) # 开头 # seek(0,2) # 在末尾的偏移量是0 末尾 f = open('写文件02', mode='w', encoding='utf-8') f.write('哇哈哈哈哈压缩盖伦') f.seek(9) print(f.tell()) # 从文件开头截断到光标位置 # 如果给参数. 从头截断到参数位置 f.truncate(12) f.close()
SSS_VERSION = '1.0' SSS_FORMAT = 'json' SERVICE_TYPE = 'sss' ENDPOINT_TYPE = 'publicURL' AUTH_TYPE = "identity" ACTION_PREFIX = ""
fd=open('NIST.result','r') output = fd.readlines() BLEUStrIndex = output.index('BLEU score = ') blu_new = float(output[BLEUStrIndex+13:BLEUStrIndex+19])
class Solution: def strWithout3a3b(self, A, B): """ :type A: int :type B: int :rtype: str """ if A < B: s_list = [] while A>0 and B>0: A -= 1 B -= 1 s_list.append('ba') index = 0 while B > 0: B -= 1 try: s_list[index] = 'b' + s_list[index] except: s_list.append('b') index += 1 return ''.join(s_list) else: s_list = [] while A > 0 and B > 0: A -= 1 B -= 1 s_list.append('ab') index = 0 while A > 0: A -= 1 try: s_list[index] = 'a' + s_list[index] except: s_list.append('a') index += 1 return ''.join(s_list) S = Solution() a = S.strWithout3a3b(4, 1) print(a)
# 此处可 import 模块 """ @param string line 为单行测试数据 @return string 处理后的结果 """ def solution(line): # 缩进请使用 4 个空格,遵循 PEP8 规范 # please write your code here # return 'your_answer' a, b = line.strip().split('-') return str(int(a)-int(b)) # a_list = [int(x) for x in a] # b_list = [int(x) for x in b] # c = len(a_list) - len(b_list) # # s = [] # status = 0 # while b_list: # # if status: # a_list[-1] -= 1 # status = 0 # b_num = b_list.pop() # a_num = a_list.pop() # cc = a_num-b_num # if cc >= 0: # s.append(cc) # else: # s.append(10+c) # status = 1 # # if c > 1 and status: # if a_list[c - 1] == 0: # a_list[c - 1] += 10 # a_list[c - 2] -= 1 # status = 0 # n = 2 # # while n < c and status: # if a_list[c - n] < 0: # a_list[c - n] += 10 # a_list[c - n - 1] -= 1 # status = 0 # n += 1 # # s.reverse() # if a_list and a_list[0] == 0: # a_list.remove(0) # # ss = a_list + s # s_s = '' # for x in ss: # s_s += str(x) # return s_s # 对被减数检测 # for i in range(1, len(b)+1): # aa = int(a[-i]) # if status: # aa = aa - 1 # 是否有被借位 # status = 0 # bb = int(b[-i]) # if aa >= bb: # s.append(str(aa-bb)) # else: # s.append(str(10+aa-bb)) # status = 1 # # # 0->b),b b+1 -> a) # x = 1 # while x < len(a)-len(b): # if status: # aaa = int(a[-len(b)-x]) # if aaa > 0: # a[-len(b)-x] = str(aaa - 1) # status = 0 # x += 1 # else: # break # # for j in range(1, x): # a[-len(b)-x] = '9' # # s.reverse() # return a[:-len(b)] + ''.join(s) if __name__ == '__main__': aa = solution("1231231237812739878951331231231237812739878951331231231237812739878951331231231237812739878951331231231237812739878951331231231237812739870-89513312312312378127398789513312312312378127398789513312312312378127398789513") print(aa)
# Given an array of integers ( sorted) and integer Val.Implement a function that takes A # and Val as input parameters and returns the lower bound of Val. ex- A =[-1,-1,2,3,5] Val=4 # Ans=3. As 3 is smaller than 4 def lowerBound(arr, key): s = 0 e = len(arr) while s<=e: mid = (s+e)//2 if arr[mid] == key: arr[mid] = key elif arr[mid] < key: s = mid + 1 else: e = mid - 1 return arr[mid] print(lowerBound([-1,-1,2,3,5], 4))
expected_output = { 'model': 'C9300-24P', 'os': 'iosxe', 'platform': 'cat9k', 'version': '17.06.01', }
massA = float(input("Enter first mass - unit kg\n")) massB = float(input("Enter second mass - unit kg\n")) radius = float(input("Enter radius - unit metres\n")) Gravity = 6.673*(10**(-11)) force = Gravity * massA * massB / (radius**2) print("The force of gravity acting on the bodies is",round(force,5),"N")
#Section 1: Terminology # 1) What is a recursive function? # A recursive function is a function that call itself # # 2) What happens if there is no base case defined in a recursive function? # Without the base the recursive function wouldn't be complete and it will get an error message. # # # 3) What is the first thing to consider when designing a recursive function? #The first thing to consider when designing a recusive function is the base case which is # # # 4) How do we put data into a function call? x # #put it to variable for example p= output() # # 5) How do we get data out of a function call? # # return or print # #Section 2: Reading # Read the following function definitions and function calls. # Then determine the values of the variables q1-q20. #a1 = 8 #a2 = 8 #a3 = -1 #b1 = 2 #b2 = 2 #b3 = 4 #c1 = -2 #c2 = 4 #c3 = 45 #d1 = 6 #d2 = 8 #d3 = 2 x #Section 3: Programming #Write a script that asks the user to enter a series of numbers. #When the user types in nothing, it should return the average of all the odd numbers #that were typed in. #In your code for the script, add a comment labeling the base case on the line BEFORE the base case. #Also add a comment label BEFORE the recursive case. #It is NOT NECESSARY to print out a running total with each user input. #Grading: # +2 base case is present (MUST BE LABELED) # +2 recursive case is present (MUST BE LABELED) # -1 base case returns sum/ct (or equivalent) # -2 recursive case filters even numbers # -1 recursive case increments sum and ct correctly # -1 recursive case returns correct recursive call # +1 main function present AND called def type1(): n= raw_input("Next: ") if n == "": return avgodd #base case else: return type1() #recursive case def odd(n): if n/2 == int: return 0 else: return n #def avgOfodd(): def main(): t= type1() odd1= odd(n) main() def output(): """ The average of odd number is{}""". format(avgOfodd)
class Constants: # pylint: disable=too-few-public-methods """ TO CHANNEL FROM BOT: Login URL prefix """ TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX = 'https://login.microsoftonline.com/' """ TO CHANNEL FROM BOT: Login URL token endpoint path """ TO_CHANNEL_FROM_BOT_TOKEN_ENDPOINT_PATH = '/oauth2/v2.0/token' """ TO CHANNEL FROM BOT: Default tenant from which to obtain a token for bot to channel communication """ DEFAULT_CHANNEL_AUTH_TENANT = 'botframework.com' TO_BOT_FROM_CHANNEL_TOKEN_ISSUER = "https://api.botframework.com" TO_BOT_FROM_EMULATOR_OPEN_ID_METADATA_URL = ( "https://login.microsoftonline.com/common/v2.0/.well-known/openid-configuration") TO_BOT_FROM_CHANNEL_OPEN_ID_METADATA_URL = ( "https://login.botframework.com/v1/.well-known/openidconfiguration") ALLOWED_SIGNING_ALGORITHMS = ["RS256", "RS384", "RS512"] AUTHORIZED_PARTY = "azp" AUDIENCE_CLAIM = "aud" ISSUER_CLAIM = "iss"
a=int(input(">>")) s1=0 while a>0: s=a%10 a=int(a/10) s1=s1+s #print(s) print(s1)
STKMarketData_t = { "trading_day": "string", "update_time": "string", "update_millisec": "int", "update_sequence": "int", "instrument_id": "string", "exchange_id": "string", "exchange_inst_id": "string", "instrument_status": "int", "last_price": "double", "volume": "int", "last_volume": "int", "turnover": "double", "open_interest": "int", "open_price": "double", "highest_price": "double", "lowest_price": "double", "close_price": "double", "settlement_price": "double", "average_price": "double", "change_price": "double", "change_markup": "double", "change_swing": "double", "upper_limit_price": "double", "lower_limit_price": "double", "pre_settlement_price": "double", "pre_close_price": "double", "pre_open_interest": "int", "pre_delta": "double", "curr_delta": "double", "best_ask_price": "double", "best_ask_volume": "int", "best_bid_price": "double", "best_bid_volume": "int", "ask_price1": "double", "ask_volume1": "int", "bid_price1": "double", "bid_volume1": "int", "ask_price2": "double", "ask_volume2": "int", "bid_price2": "double", "bid_volume2": "int", "ask_price3": "double", "ask_volume3": "int", "bid_price3": "double", "bid_volume3": "int", "ask_price4": "double", "ask_volume4": "int", "bid_price4": "double", "bid_volume4": "int", "ask_price5": "double", "ask_volume5": "int", "bid_price5": "double", "bid_volume5": "int", "ask_price6": "double", "ask_volume6": "int", "bid_price6": "double", "bid_volume6": "int", "ask_price7": "double", "ask_volume7": "int", "bid_price7": "double", "bid_volume7": "int", "ask_price8": "double", "ask_volume8": "int", "bid_price8": "double", "bid_volume8": "int", "ask_price9": "double", "ask_volume9": "int", "bid_price9": "double", "bid_volume9": "int", "ask_price10": "double", "ask_volume10": "int", "bid_price10": "double", "bid_volume10": "int", "md_source": "string", } ERRORMSGINFO = { "error_code": "int", "error_message": "string", "response_code": "int", "response_string": "string", "utp_server_id": "int", "oms_server_id": "int", } ERRORMSGINFO_t = ERRORMSGINFO ReqUtpLoginField = { "developer_code": "string", "developer_license": "string", "user_id": "string", "user_password": "string", "user_one_time_password": "string", "user_ca_info": "string", } ReqUtpLoginField_t = ReqUtpLoginField RspUtpLoginField = { "response_code": "int", "response_string": "string", "session_public_key": "string", "utp_checking_server_id": "string", "utp_checking_server_time": "int", "last_login_ip_address": "string", "last_login_time": "int", "session_encrypted": "bool", } RspUtpLoginField_t = RspUtpLoginField RspUtpLogoutField = { "response_code": "int", "response_string": "string", "utp_server_id": "int", } RspUtpLogoutField_t = RspUtpLogoutField ReqSubscribeField = { "routing_key": "string", } ReqSubscribeField_t = ReqSubscribeField RspSubscribeField = { "response_code": "int", "response_string": "string", "routing_key": "string", } RspSubscribeField_t = RspSubscribeField ReqUnSubscribeField = { "routing_key": "string", } ReqUnSubscribeField_t = ReqUnSubscribeField RspUnSubscribeField = { "response_code": "int", "response_string": "string", "routing_key": "string", } RspUnSubscribeField_t = RspUnSubscribeField ReqAuthUserPassworField = { "user_id": "string", "password": "string", "save_int": "int", "save_double": "double", "save_string": "string", } ReqAuthUserPassworField_t = ReqAuthUserPassworField RspAuthUserPassworField = { "response_code": "int", "response_string": "string", } RspAuthUserPassworField_t = RspAuthUserPassworField ReqOrderInsertData = { "client_id": "string", "commodity_id": "string", "instrument_id": "string", "order_type": "char", "order_mode": "char", "order_way": "char", "valid_datetime": "string", "is_riskorder": "char", "direct": "char", "offset": "char", "hedge": "char", "order_price": "double", "trigger_price": "double", "order_vol": "int", "min_matchvol": "int", "save_int": "int", "save_double": "double", "save_string": "string", } ReqOrderInsertData_t = ReqOrderInsertData ReqOrderInsertField = { "oms_server_id": "int", "exchange_id": "string", } ReqOrderInsertField_t = ReqOrderInsertField RspOrderInsertField = { "response_code": "int", "response_string": "string", "utp_server_id": "int", "oms_server_id": "int", "order_stream_id": "int", "order_id": "int", "local_id": "string", "trade_id": "string", "insert_id": "string", "insert_datetime": "string", "order_state": "char", } RspOrderInsertField_t = RspOrderInsertField ReqQryExchangeField = { "oms_server_id": "int", } ReqQryExchangeField_t = ReqQryExchangeField RspQryExchangeField = { "response_code": "int", "response_string": "string", "utp_server_id": "int", "oms_server_id": "int", "exchange_id": "string", "exchange_name": "string", "exchange_status": "char", } RspQryExchangeField_t = RspQryExchangeField ReqQryInstrumentField = { "oms_server_id": "int", "exchange_id": "string", "product_id": "string", "instrument_id": "string", "product_class": "char", } ReqQryInstrumentField_t = ReqQryInstrumentField RspQryInstrumentField = { "response_code": "int", "response_string": "string", "utp_server_id": "int", "oms_server_id": "int", "product_id": "string", "product_name": "string", "exchange_id": "string", "product_class": "char", "instrument_id": "string", "instrument_name": "string", "instrument_class": "char", "instrument_status": "char", "delivery_year": "int", "delivery_month": "int", "volume_multiple": "double", "price_tick": "double", "price_tick_dividend": "int", "max_marketorder_volume": "int", "min_marketorder_volume": "int", "max_limitorder_volume": "int", "min_limitorder_volume": "int", "create_date": "string", "open_date": "string", "expire_date": "string", "last_trading_date": "string", "start_delivery_date": "string", "end_delivery_date": "string", "first_notice_date": "string", } RspQryInstrumentField_t = RspQryInstrumentField
#!/usr/bin/evn python3 # coding=utf-8 ####################### # 此配置文件用于生产环境 # ####################### """ # server 配置 """ server = { # 67108864 64M # 134217728 128M # 268435456 256M # 536870912 512M # 1073741824 1G "max_buffer_size": 268435456, "port": 8899, } """ # App 配置 """ app = { "debug": False, "autoreload": True, "ui_modules": {}, # 设置默认的处理函数类,如:404页面等 "default_handler_class": None, "serve_traceback": False, "template_path": "template", "compiled_template_cache": True, "autoescape": None, "cookie_secret": "b6fc5e14875abbb980979e5d9ee99891", "login_url": "/api/admin/login/", "xsrf_cookies": True, "xsrf_cookie_version": 2, "static_hash_cache": False, "static_path": "../static", "static_url_prefix": "/static/", # 图片上传路径 "upload_path": "/usr/local/src/web/netdisk/upload/" } """ # redis 配置 """ redis = { "master": { "default": { "host": "127.0.0.1", "port": 6379, "db": 0, "exp": 86400 }, }, } """ # orm 配置 """ orm = { "sql_echo": False } """ # mysql 配置 """ mysql = { "master": { "default": { "username": "root", "hostname": "127.0.0.1", "port": "3306", "database": "mydb", "password": "mima", "charset": "utf8mb4" }, }, } """ # Session 配置 """ session = { # session 名字 "name": "sid", # 域名 "domain": None, # 路径 "path": "/", # 过期时间 "expires": 3600, # 记住密码情况过期时间 "rem_session_exp": 86400 } """ # 网盘配置 """ net_disk = { # 网盘的根目录,对应系统的文件夹,注意:最后一个反斜杠必须存在 "base_dir": "/usr/local/src/web/netdisk/" } """ # 音乐功能配置 """ music = { # 注意支持成3个字符的后缀 "type": [".mp3", ".ogg"] } """ # 日志配置 """ log = { # 日志存储路径,注意:最后一个反斜杠必须存在 "path": "/usr/local/src/web/netdisk/log/", # 日志记录格式 "format": "%(asctime)s - %(levelname)s - %(name)s ==*== %(message)s ==*==", # 日志文件名前缀 "file": "tornado", # 是否关闭日志功能 "disabled": False, # 控制台是否输出日志 "propagate": False, ############# # 日志级别 ############# # CRITICAL 50 # ERROR 40 # WARNING 30 # INFO 20 # DEBUG 10 # NOTSET 0 ############# "level": "INFO", ############# # when 是一个字符串的定义如下,: ############# # "S": 秒 # "M": 分 # "H": 时 # "D": 天 # "W[0-6]": 周 (0=Monday) ############# "when": "D", # 设置日志后缀名称,跟strftime的格式一样,要和when单位保持一致 # "%Y-%m-%d_%H-%M-%S.log" "suffix": "%Y-%m-%d.log", # interval 是指等待多少个when单位的时间后,会自动重建日志文件 "interval": 1, # 是保留日志数量 "backup_count": 10 } """ # 登陆密码、访问码配置 """ login = { # 最大尝试次数 "max_try_count": 5, # 多少秒以后可以继续尝试 "timeout": 1800 } """ # 邮箱配置 """ mail = { # 服务器连接超时时间 "timeout": 15, }
# Crie um programa onde o usuário possa digitar sete valores numéricos e cadastre-os em uma lista única que mantenha separados os valores pares e ímpares. No final, mostre os valores pares e ímpares em ordem crescente. valores = [[],[]] numero = 0 for x in range(0,7): numero = int(input('Digite um número: ')) if numero % 2 == 0: valores[0].append(numero) else: valores[1].append(numero) print('='*30) valores[0].sort() # Ordena de forma permanente print(f'Os valores pares são {valores[0]}') print(f'Os valores impares são {sorted(valores[1])}') # Ordena de forma temporaria mantem lista original
class Circle: def __init__(self, x, y, radius): self.radius = radius self.x = x self.y = y self.perimeter = self.calcPerimeter() #init des différentes variables de la classe def calcPerimeter(self): self.perimeter = self.x + self.y #methode de la fonction p = Circle(1,2,12) p.calcPerimeter() print(p.perimeter)
class Other: def __init__(self, _name): self._name = _name @property def name(self): return self._name @staticmethod def decode(data): f_name = data["name"] if not isinstance(f_name, unicode): raise Exception("not a string") return Other(f_name) def encode(self): data = dict() if self._name is None: raise Exception("name: is a required field") data["name"] = self._name return data def __repr__(self): return "<Other name:{!r}>".format(self._name)
''' dengan python bisa melakukan manipulasi sebuah file sumber referensi: https://www.petanikode.com/python-file/ ditulis pada: 14-02-2021 ''' #membaca file yang akan di tulis #w = write, ini digunakan untuk mengubah isi dari sebuah file file = open('file.txt', 'w') #membuat sebuah user inputan text = input('masukkan kata >>> ') #apply atau commit ke file yang dituju untuk mengubah file tersebut file.write(text) #menampilkan output dari file user inputan print('menulis isi file dengan output = {isi}'.format(isi=text))
# EX 1: Bank Exercise # # Create a Bank, an Account, and a Customer class. # All classes should be in a single file. # The bank class should be able to hold many account. # You should be able to add new accounts. # he Account class should have relevant details. # The Customer class Should also have relevant details. # # Stick to the techniques we have covered so far. # Add the ability for your __init__ method to handle # different inputs (parameters). class Bank: def __init__(self): self.accounts = list() def add_account(self, new_entry): self.accounts.append(new_entry) def print_accounts(self): for account in self.accounts: print("account number: ", account.id) class Account: def __init__(self, id): self.id = id def __repr__(self): return str(self.__dict__) def __str__(self): return str(self.__dict__) class Customer: def __init__(self, name, age, sex, account): self.name = name self.age = age self.sex = sex self.account = account def __repr__(self): return str(self.__dict__) def main(): one = Account(1) two = Account(2) three = Account(3) cus1 = Customer("peter", 23, "helicopter", one) cus2 = Customer("Cris", 12, "m?", two) cus3 = Customer("NameHere", 99, "sex", three) print(cus1) # this requires repr and str for it to work print("customer one: ", cus1.name, cus1.age, cus1.sex, cus1.account.id) print("customer two: ", cus2.name, cus2.age, cus2.sex, cus2.account.id) print("customer three: ", cus3.name, cus3.age, cus3.sex, cus3.account.id) bankOne = Bank() bankOne.add_account(one) bankOne.add_account(two) bankOne.add_account(three) bankOne.print_accounts() main()