content stringlengths 7 1.05M |
|---|
T = [3614090360, 3905402710, 606105819, 3250441966,
4118548399, 1200080426, 2821735955, 4249261313,
1770035416, 2336552879, 4294925233, 2304563134,
1804603682, 4254626195, 2792965006, 1236535329,
4129170786, 3225465664, 643717713, 3921069994,
3593408605, 38016083, 3634488961, 3889429448,
568446438, 3275163606, 4107603335, 1163531501,
2850285829, 4243563512, 1735328473, 2368359562,
4294588738, 2272392833, 1839030562, 4259657740,
2763975236, 1272893353, 4139469664, 3200236656,
681279174, 3936430074, 3572445317, 76029189,
3654602809, 3873151461, 530742520, 3299628645,
4096336452, 1126891415, 2878612391, 4237533241,
1700485571, 2399980690, 4293915773, 2240044497,
1873313359, 4264355552, 2734768916, 1309151649,
4149444226, 3174756917, 718787259, 3951481745]
|
def driver():
list = []
age1 = input("Enter the first number:")
age2 = input("Enter the second number:")
def swap(num1, num2):
if age1 > age2:
list.append(age2)
list.append(age1)
if age1 < age2:
list.append(age1)
list.append(age2)
swap(age1,age2)
print("the swapped result (from least to greatest)", list)
if __name__ == "__main__":
driver() |
db_config = {
# 'host': 'localhost',
# 'port': 1337,
# 'unix_socket': '/var/lib/mysql/mysql.sock',
'user': 'root',
# 'passwd': 'password,
'db': 'logs_db'
}
|
def Ro_decimal_constr(x_par):
"""
The function to compute the non linear contraint of the Ro parameter.
:param list x_par: list of parameters, beta, r and pop.
:return: the Ro value.
:rtype: float
"""
if len(x_par) == 3:
return x_par[0] * x_par[2] / x_par[1]
else:
return x_par[0] / x_par[1] |
make_numbers = {'American Motors': 1,
'Jeep/Kaiser-Jeep/Willys Jeep': 2,
'AM General': 3,
'Chrysler': 6,
'Dodge': 7,
'Imperial': 8,
'Plymouth': 9,
'Eagle': 10,
'Ford': 12,
'Lincoln': 13,
'Mercury': 14,
'Buick/Opel': 18,
'Cadillac': 19,
'Chevrolet': 20,
'Oldsmobile': 21,
'Pontiac': 22,
'GMC': 23,
'Saturn': 24,
'Grumman': 25,
'Coda': 26,
'Other Domestic': 29,
'Volkswagon': 30,
'Alfa Romeo': 31,
'Audi': 32,
'Austin/Austin-Healey': 33,
'BMW': 34,
'Datsun/Nissan': 35,
'Fiat': 36,
'Honda': 37,
'Isuzu': 38,
'Jaguar': 39,
'Lancia': 40,
'Mazda': 41,
'Mercedes-Benz': 42,
'MG': 43,
'Peugeot': 44,
'Porsche': 45,
'Renault': 46,
'Saab': 47,
'Subaru': 48,
'Toyota': 49,
'Triumph': 50,
'Volvo': 51,
'Mitsubishi': 52,
'Suzuki': 53,
'Acura': 54,
'Hyundai': 55,
'Merkur': 56,
'Yugo': 57,
'Infiniti': 58,
'Lexus': 59,
'Daihatsu': 60,
'Land Rover': 62,
'Kia': 63,
'Daewoo': 64,
'Smart': 65,
'Mahindra': 66,
'Scion': 67,
'Other Imports': 69,
'BSA': 70,
'Ducati': 71,
'Harley-Davidson': 72,
'Kawasaki': 73,
'Moto Guzzi': 74,
'Norton': 75,
'Yamaha': 76,
'Victory': 77,
'Other Make Moped': 78,
'Other Make Motored Cycle': 79,
'Brockway': 80,
'Diamond Reo/Reo': 81,
'Freightliner': 82,
'FWD': 83,
'International Harvester/Navistar': 84,
'Kenworth': 85,
'Mack': 86,
'Peterbilt': 87,
'Iveco/Magirus': 88,
'White/Autocar, White/GMC': 89,
'Bluebird': 90,
'Eagle Coach': 91,
'Gillig': 92,
'MCI': 93,
'Thomas Built': 94,
'Not Reported': 97,
'Other Make': 98,
'Unknown': 99
}
model_numbers = {
'Automobiles': 0,
'Other(Autos)': 1,
'Unknown (Autos)': 2,
'Light Trucks': 3,
'Other (Light Trucks)': 4,
'Unknown (LT)': 5,
'Other (LSV or NEV)': 6,
'Unknown (LSV OR NEV)': 7,
'Motorcycles': 8,
'Electric Motorcycle': 9,
'Unknown cc (Motorcycles)': 10,
'All-Terrain Vehicles': 11,
'Unknown cc (ATV)Unkown (motored cycle)': 12,
'Other Make (Med/Heavy Trucks)': 13,
'Motor Home': 14,
'Med/Heavy Van-Based Vehicle': 15,
'Med/Heavy Pickup': 16,
'Med/Heavy Trucks - CBE': 17,
'Med/Heavy Trucks - COE': 18,
'Med/Heavy Trucks - COE (low entry)': 19,
'Med/Heavy Trucks - COE (high entry)': 20,
'Med/Heavy Trucks - Unknown engine location': 21,
'Med/Heavy Trucks - COE (entry position unknown)': 22,
'Other (Med/Heavy Trucks)': 23,
'Other Make (Buses)': 24,
'Buses': 25,
'Other (Bus)': 26,
'Unknown (Bus)': 27,
'Not Reported': 28,
'Other (Vehicle)': 29
}
stateDict = {'AL': '1', 'AK': '2', 'AZ': '4', 'AR': '5', 'CA': '6', 'CO': '8', 'CT': '9', 'DE': '10', 'DC': '11', 'FL': '12', 'GA': '14', 'HI': '15', 'ID': '16', 'IL': '17', 'IN': '18', 'IA': '19', 'KS': '20', 'KY': '21', 'LA': '22', 'ME': '23', 'MD': '24', 'MA': '25', 'MI': '26', 'MN': '27', 'MS': '28', 'MO': '29', 'MT': '30', 'NE': '31', 'NV': '32', 'NH': '33', 'NJ': '34', 'NM': '35', 'NY': '36', 'NC': '37', 'ND': '38', 'OH': '39', 'OK': '40', 'OR': '41', 'PA': '42', 'RI': '44', 'SC': '45', 'SD': '46', 'TN': '47', 'TX': '48', 'UT': '49', 'VT': '50', 'VA': '51', 'WA': '53', 'WV': '54', 'WI': '55', 'WY': '56'}
percentDict = {
1: 2.48,
2: 0.18,
4: 2.42,
5: 1.47,
6: 9.73,
8: 1.72,
9: 0.77,
10: 0.35,
11: 0.06,
12: 8.41,
13: 4.30,
15: 0.25,
16: 0.61,
17: 2.85,
18: 2.35,
19: 0.88,
20: 1.05,
21: 2.21,
22: 2.08,
23: 0.42,
24: 1.49,
25: 0.89,
26: 2.88,
27: 1.10,
28: 1.80,
29: 2.50 ,
30: 0.46,
31: 0.63,
32: 0.91,
33: 0.29,
34: 1.59,
35: 0.89,
36: 2.77,
37: 3.95,
38: 0.30,
39: 3.23,
40: 1.84,
41: 1.22,
42: 3.39,
44: 0.14,
45: 2.74,
46: 0.30,
47: 2.79,
48: 10.08,
49: 0.79,
50: 0.15,
51: 2.12,
53: 1.55,
54: 0.72,
55: 1.60,
56: 0.32,
}
percentAvg = 0.87
agePctDict = {
"1923" : 0.0007 ,
"1927" : 0.0007 ,
"1928" : 0.0014 ,
"1929" : 0.0033 ,
"1930" : 0.0014 ,
"1931" : 0.0014 ,
"1932" : 0.0049 ,
"1933" : 0.0007 ,
"1934" : 0.0019 ,
"1936" : 0.0007 ,
"1937" : 0.0021 ,
"1938" : 0.0007 ,
"1939" : 0.0005 ,
"1940" : 0.0042 ,
"1941" : 0.0026 ,
"1942" : 0.0005 ,
"1945" : 0.0007 ,
"1946" : 0.0026 ,
"1947" : 0.0021 ,
"1948" : 0.0012 ,
"1949" : 0.0014 ,
"1950" : 0.0021 ,
"1951" : 0.0007 ,
"1952" : 0.0014 ,
"1954" : 0.0026 ,
"1955" : 0.0007 ,
"1956" : 0.0021 ,
"1957" : 0.0035 ,
"1958" : 0.0035 ,
"1959" : 0.0007 ,
"1960" : 0.0007 ,
"1961" : 0.0021 ,
"1962" : 0.0033 ,
"1963" : 0.0040 ,
"1964" : 0.0061 ,
"1965" : 0.0098 ,
"1966" : 0.0107 ,
"1967" : 0.0184 ,
"1968" : 0.0168 ,
"1969" : 0.0165 ,
"1970" : 0.0235 ,
"1971" : 0.0168 ,
"1972" : 0.0191 ,
"1973" : 0.0205 ,
"1974" : 0.0275 ,
"1975" : 0.0273 ,
"1976" : 0.0291 ,
"1977" : 0.0312 ,
"1978" : 0.0419 ,
"1979" : 0.0485 ,
"1980" : 0.0408 ,
"1981" : 0.0508 ,
"1982" : 0.0627 ,
"1983" : 0.0767 ,
"1984" : 0.1230 ,
"1985" : 0.1524 ,
"1986" : 0.1904 ,
"1987" : 0.1932 ,
"1988" : 0.2880 ,
"1989" : 0.3248 ,
"1990" : 0.4264 ,
"1991" : 0.5042 ,
"1992" : 0.6249 ,
"1993" : 0.8579 ,
"1994" : 1.2075 ,
"1995" : 1.5372 ,
"1996" : 1.6099 ,
"1997" : 2.3112 ,
"1998" : 2.7998 ,
"1999" : 3.4210 ,
"2000" : 4.2897 ,
"2001" : 4.5612 ,
"2002" : 5.0798 ,
"2003" : 5.5447 ,
"2004" : 5.7146 ,
"2005" : 6.1470 ,
"2006" : 6.1405 ,
"2007" : 6.1072 ,
"2008" : 5.0095 ,
"2009" : 3.2556 ,
"2010" : 3.3213 ,
"2011" : 3.7244 ,
"2012" : 4.2890 ,
"2013" : 4.6695 ,
"2014" : 4.9049 ,
"2015" : 5.0565 ,
"2016" : 3.4043 ,
"2017" : 1.4311 ,
"2018" : 0.1200 ,
}
ageAvg = 1.4487
topTenStates = {'TX': 10.079759161352108, 'CA': 9.734905060267915, 'FL': 8.414906085509836, 'GA': 4.301355882442033, 'NC': 3.9460163526086585, 'PA': 3.3898226098465165, 'OH': 3.2283470071091203, 'MI': 2.8816288298028505, 'IL': 2.845046333944595, 'TN': 2.7949492854762834}
topTenYears = {2005: 6.1470243518257455, 2006: 6.140500085048477, 2007: 6.107179722578857, 2004: 5.714558668303947, 2003: 5.544694722567206, 2002: 5.079840714686823, 2015: 5.056539761910864, 2008: 5.009471837303427, 2014: 4.904850559339372, 2013: 4.669510936302186}
topTenModels = {
"Automobiles" : 41.301405,
"Light Trucks" : 39.180319,
"Motorcycles" : 9.518905,
"Med/Heavy Trucks - COE" : 5.799607,
"Med/Heavy Trucks - Unknown engine location" : 1.640387,
"Med/Heavy Trucks - CBE" : 0.629126,
"Unknown cc (Motorcycles)" : 0.392155,
"All-Terrain Vehicles" : 0.260272,
"Buses" : 0.183145,
"Med/Heavy Trucks - COE (low entry)" : 0.154718
}
topTenMakes = {"Ford" :14.727600,
"Chevrolet" :14.258552,
"Toyota" :8.214285,
"Honda" :7.517353,
"Dodge" :6.421044,
"Datsun/Nissan" :4.922559,
"Harley-Davidson" :3.934599,
"GMC" :3.142833,
"Jeep/Kaiser-Jeep/Willys Jeep" :2.622289,
"Freightliner" :2.437513
} |
a = int(input('Digite o comprimento de uma reta: '))
b = int(input('Digite o comprimento de outra reta: '))
c = int(input('Digite o comprimento de mais uma reta: '))
print(abs(b-c))
if a > abs(b-c) and a < b+c:
print('As retas {}, {} e {} formam um triângulo.'.format(a, b, c))
if b > abs(a-c) and b < a+c:
print('As retas {}, {} e {} formam um triângulo.'.format(a, b, c))
if c > abs(b-a) and a < b+a:
print('As retas {}, {} e {} formam um triângulo.'.format(a, b, c))
else:
print('As retas {}, {} e {} NÂO formam um triângulo.'.format(a, b, c)) |
def twoNumberSum(array, targetSum):
# Write your code here.
dic = {}
for i in range(len(array)):
if targetSum - array[i] in dic:
return [array[i], targetSum - array[i]]
else:
dic[array[i]] = i
return [] |
# Calcular todos em:
# KM HM DAM M DM CM MM
var = float(input("Digite um valor: "))
print("O valor em decimetro fica {}".format(var * 10)) #DM
print("O valor em centimetros fica {}".format(var * 100)) #CM
print("O valor e milimetros fica {}".format(var * 1000)) #MM
print("O valor em metros fica {}".format(var * 10000)) #M
print("O valor em decametro fica {}".format(var / 10)) #DAM
print("O valor em micrometro fica {}".format(var / 100)) #HM
print("O valor em Kilometro fica {}".format(var / 1000)) #KM |
#!/usr/bin/env python3
#encoding=utf-8
#------------------------------------------------
# Usage: python3 simplest_class.py
# Description: class attribute and method
#------------------------------------------------
class Rec: # Empty namespace object
...
if __name__ == '__main__':
Rec.name = 'Bob' # Just objects with attributes, class attribute
Rec.age = 40
print(Rec.name) # Like a C struct
x = Rec() # Instances inherit class names
y = Rec()
print(x.name) # name is stored on the class only
print(y.name)
x.name = 'Sue' # but assignment changes x only
print(Rec.name, x.name, y.name)
print()
print('the dict of Rec class')
print(list(Rec.__dict__.keys()))
print()
print('the dict of Rec class without __xxx__ methods')
print(list(x for x in Rec.__dict__ if not x.startswith('__')))
print()
print('the dict of object x')
print(list(x.__dict__.keys()))
print()
print('the dict of object y')
print(list(y.__dict__.keys()))
print()
'''
Here, the class's namespace dictionary shows the name and age attributes we assigned
to it, x has its own name, and y is still empty. Because of this model, an attribute can
often be fetched by either dictionary indexing or attribute notation, but only if it's
present on the object in question—attribute notation kicks off inheritance search, but
indexing looks in the single object only (as we'll see later, both have valid roles):
'''
print('the difference between attribute notation and dictionary indexing')
print('the name of x is ', x.name, x.__dict__['name'])
print('the age of x by attribute notation is %s' % x.age) # But attribute fetch checks classes too
# print('the age of x by dictionary indexing is %s' % x.__dict__['age'])
try:
print('the age of x by dictionary indexing is %s' % x.__dict__['age']) # Indexing dict does not do inherientance
except Exception as e:
print('x has not age key in x.__dict__')
'''
To facilitate inheritance search on attribute fetches, each instance has a link to its class
that Python creates for us—itt's called __class__, if you want to inspect it:
'''
print()
print('x.__class__ is %s' % str(x.__class__))
'''
Classes also have a __bases__ attribute, which is a tuple of references to their superclass
objects—in this example just the implied object root class in Python 3.X wee'll explore
later (you'll get an empty tuple in 2.X instead):
'''
print()
print('The __bases__ of Rec class is %s' % Rec.__bases__)
'''
Even methods, normally created by a def nested in a class, can be created completely
independently of any class object. The following, for example, defines a simple function
outside of any class that takes one argument:
'''
def uppername(obj):
return obj.name.upper() # Still needs a self argument (obj)
'''
There is nothing about a class here yet—itt's a simple function, and it can be called as
such at this point, provided we pass in an object obj with a name attribute, whose value
in turn has an upper method—our class instances happen to fit the expected interface,
and kick off string uppercase conversion:
'''
print()
print('the uppercase name of x is ', uppername(x))
'''
If we assign this simple function to an attribute of our class, though, it becomes a
method, callable through any instance, as well as through the class name itself as long
as we pass in an instance manually
'''
Rec.method = uppername # Now it's a class's method!
print('after add uppername method to Rec class')
print('the uppercase name of x with x.method is ', x.method()) # Run method to process x
print('the uppercase name of y with y.method is ', y.method()) # The same, but pass y to self
print('the uppercase name of x with Rec.method(x) is ', y.method()) # Can call through instance or class
|
class SOG():
"""The class for SOG training
"""
def __init__(self, *args):
"""Set the training hyper-parameters including the network, the criterion, the optimizer,
training epoches, sampling distribution and sampling number etc.
Initialize the components here
"""
pass
def train(self, train_loader):
"""The top training API. Should perform training and validation and checkpoint-saving
After calling this funcition, the self.model should be trained
The train_loader gives X, y, c and fake_c
"""
pass
|
def slices(num_string,slice_size):
if slice_size>len(num_string):
raise ValueError
list_of_slices = []
for i in range(len(num_string)-slice_size+1):
temp_answer = []
for j in range(slice_size):
temp_answer.append(int(num_string[i+j]))
list_of_slices.append(temp_answer)
return list_of_slices
def largest_product(num_string,slice_size):
slices_to_test = slices(num_string,slice_size)
answer = 1
for i in range(len(slices_to_test)):
working_value = 1
for j in range(slice_size):
working_value*=slices_to_test[i][j]
if working_value > answer:
answer = working_value
return answer |
global LAYER_UNKNOWN
LAYER_UNKNOWN = 'unknown'
class Design(object):
def __init__(self, layers, smells) -> None:
self.layers = layers
self.smells = smells
super().__init__()
|
"""
Initialization file for tweets library module.
These exist here in lib as some of them are useful as help functions of other
scripts (such as getting available campaigns). However, these could be moved to
utils/reporting/ as individual scripts. And they could be called directly or
with make, to avoid having multiple ways of calling something.
"""
|
"""
Coin change
given coins of different denominations and a total amount of money.
Write a function to compute the number of combinations that make up that amount.
You may assume that you have infinite number of each kind of coin.
Input: amount = 25, coins = [1, 2, 5]
"""
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
dic = {0:1}
for coin in coins:
for i in range(amount+1):
dic[i] =dic.get(i,0) + dic.get(i-coin,0)
return dic.get(amount,0)
amount = 25
coins = [1, 2, 5]
a = Solution()
a.change(amount, coins)
# 42 |
p = float(input('Digite seu peso: '))
a = float(input('Digite sua altura: '))
imc = p / (a**2)
if imc <= 18.5:
print('Seu imc é {:.2f} você está abaixo do peso'.format(imc))
elif 18.5 < imc <= 25.0:
print('Seu imc é {:.2f} você está no peso ideal'.format(imc))
elif 25.0 < imc <= 30.0:
print('Seu imc é {:.2f} você está no sobrepeso'.format(imc))
elif 30.0 < imc <= 40.0:
print('Seu imc é {:.2f} você está obeso'.format(imc))
elif imc > 40.0:
print('Seu imc é {:.2f} você é obeso mórbido'.format(imc))
|
class RecentCounter:
def __init__(self):
self.slide_window = deque()
def ping(self, t: int) -> int:
self.slide_window.append(t)
# invalidate the outdated pings
while self.slide_window:
if self.slide_window[0] < t - 3000:
self.slide_window.popleft()
else:
break
return len(self.slide_window)
# Your RecentCounter object will be instantiated and called as such:
# obj = RecentCounter()
# param_1 = obj.ping(t) |
# --------------
# Code starts here
class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2=['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class=class_1+class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove("Carla Gentry")
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {'Math':65,'English':70,'History':80,'French':70,'Science':60}
marks=[]
for i in courses:
marks.append(courses[i])
print(marks)
print(courses['Math'])
print(courses['English'])
print(courses['History'])
print(courses['French'])
print(courses['Science'])
total =courses['Math'] + courses['English'] +courses['History'] + courses['French'] +courses['Science']
print(total)
percentage=(total*100/500)
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics= {'Geoffrey Hinton': 78,'Andrew Ng':95,'Sebastian Raschka':65,'Yoshua Benjio':50,'Hilary Mason':70,'Corinna Cortes':66,'Peter Warden':75}
max_marks_scored =max(mathematics,key=mathematics.get)
topper=max_marks_scored
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
# Create variable first_name
first_name = (topper.split()[0])
print (first_name)
# Create variable Last_name and store last two element in the list
last_name = (topper.split()[1])
print (last_name)
# Concatenate the string
full_name = last_name + ' ' + first_name
# print the full_name
print (full_name)
# print the name in upper case
certificate_name = full_name.upper()
print (certificate_name)
# Code ends here
|
widget = WidgetDefault()
widget.border = "None"
widget.background = "None"
commonDefaults["RadialMenuWidget"] = widget
def generateRadialMenuWidget(file, screen, menu, parentName):
name = menu.getName()
file.write(" %s = leRadialMenuWidget_New();" % (name))
generateBaseWidget(file, screen, menu)
writeSetInt(file, name, "NumberOfItemsShown", menu.getItemsShown(), 5)
writeSetBoolean(file, name, "HighlightProminent", menu.getHighlightProminent(), False)
writeSetInt(file, name, "Theta", menu.getTheta(), 0)
width = menu.getSize().width
height = menu.getSize().height
touchX = menu.getTouchX()
touchY = menu.getTouchY()
touchWidth = menu.getTouchWidth()
touchHeight = menu.getTouchHeight()
file.write(" %s->fn->setTouchArea(%s, %d, %d, %d, %d);" % (name, name, touchX, touchY, width * touchWidth / 100, height * touchHeight / 100))
ellipseType = menu.getEllipseType().toString()
if ellipseType == "Default":
ellipseType = "LE_RADIAL_MENU_ELLIPSE_TYPE_DEFAULT"
elif ellipseType == "Orbital":
ellipseType = "LE_RADIAL_MENU_ELLIPSE_TYPE_OBITAL"
else:
ellipseType = "LE_RADIAL_MENU_ELLIPSE_TYPE_ROLLODEX"
writeSetLiteralString(file, name, "EllipseType", ellipseType, "LE_RADIAL_MENU_ELLIPSE_TYPE_DEFAULT")
writeSetBoolean(file, name, "DrawEllipse", menu.getEllipseVisible(), True)
scaleMode = menu.getSizeScale().toString()
if scaleMode == "Off":
scaleMode = "LE_RADIAL_MENU_INTERPOLATE_OFF"
elif scaleMode == "Gradual":
scaleMode = "LE_RADIAL_MENU_INTERPOLATE_GRADUAL"
else:
scaleMode = "LE_RADIAL_MENU_INTERPOLATE_PROMINENT"
writeSetLiteralString(file, name, "ScaleMode", scaleMode, "LE_RADIAL_MENU_INTERPOLATE_GRADUAL")
blendMode = menu.getAlphaScale().toString()
if blendMode == "Off":
blendMode = "LE_RADIAL_MENU_INTERPOLATE_OFF"
elif blendMode == "Gradual":
blendMode = "LE_RADIAL_MENU_INTERPOLATE_GRADUAL"
else:
blendMode = "LE_RADIAL_MENU_INTERPOLATE_PROMINENT"
writeSetLiteralString(file, name, "BlendMode", blendMode, "LE_RADIAL_MENU_INTERPOLATE_GRADUAL")
min = menu.getMinSizePercent()
max = menu.getMaxSizePercent()
if min != 30 or max != 100:
file.write(" %s->fn->setScaleRange(%s, %d, %d);" % (name, name, min, max))
min = menu.getMinAlphaAmount()
max = menu.getMaxAlphaAmount()
if min != 128 or max != 255:
file.write(" %s->fn->setBlendRange(%s, %d, %d);" % (name, name, min, max))
touchX = menu.getTouchX()
touchY = menu.getTouchY()
touchWidth = menu.getTouchWidth()
touchHeight = menu.getTouchHeight()
if touchX != 0 or touchY != 75 or touchWidth != 100 or touchHeight != 50:
file.write(" %s->fn->setTouchArea(%s, %d, %d, %d, %d);" % (name, name, touchX, touchY, touchWidth, touchHeight))
x = menu.getLocation(False).x
y = menu.getLocation(False).y
width = menu.getSize().width
height = menu.getSize().height
xp = x + width / 2;
yp = y + height / 2;
items = menu.getItemList()
if len(items) > 0:
for idx, item in enumerate(items):
varName = "%s_image_%d" % (name, idx)
file.write(" %s = leImageScaleWidget_New();" % (varName))
imageName = craftAssetName(item.image)
if imageName != "NULL":
file.write(" %s->fn->setImage(%s, %s);" % (varName, varName, imageName))
file.write(" %s->fn->setTransformWidth(%s, %d);" % (varName, varName, item.currentSize.width))
file.write(" %s->fn->setTransformHeight(%s, %s);" % (varName, varName, item.currentSize.height))
file.write(" %s->fn->setStretchEnabled(%s, LE_TRUE);" % (varName, varName))
file.write(" %s->fn->setPreserveAspectEnabled(%s, LE_TRUE);" % (varName, varName))
else:
file.write(" %s->fn->setBackgroundType(%s, LE_WIDGET_BACKGROUND_FILL);" % (varName, varName))
file.write(" %s->fn->setBorderType(%s, LE_WIDGET_BORDER_LINE);" % (varName, varName))
if not (item.t == 270 and menu.getItemsShown() < len(items)):
file.write(" %s->fn->setVisible(%s, LE_FALSE);" % (varName, varName))
file.write(" %s->fn->setPosition(%s, %d, %d);" % (varName, varName, xp, yp))
file.write(" %s->fn->setSize(%s, %d, %d);" % (varName, varName, item.originalSize.width, item.originalSize.height))
if item.originalAlphaAmount != 255:
file.write(" %s->fn->setAlphaAmount(%s, %d);" % (varName, varName, item.originalAlphaAmount));
file.write(" %s->fn->addWidget(%s, (leWidget*)%s);" % (name, name, varName))
writeEvent(file, name, menu, "ItemSelectedEvent", "ItemSelectedEventCallback", "OnItemSelected")
writeEvent(file, name, menu, "ItemProminenceChangedEvent", "ItemProminenceChangedEvent", "OnItemProminenceChanged")
file.write(" %s->fn->addChild(%s, (leWidget*)%s);" % (parentName, parentName, name))
file.writeNewLine()
def generateRadialMenuEvent(screen, widget, event, genActions):
text = ""
if event.name == "ItemSelectedEvent":
text += "void %s_OnItemSelected(%s)\n" % (widget.getName(), getWidgetVariableName(widget))
if event.name == "ItemProminenceChangedEvent":
text += "void %s_OnItemProminenceChanged(%s)\n" % (widget.getName(), getWidgetVariableName(widget))
text += generateActions(widget, event, genActions, None, None)
return text
def generateRadialMenuAction(text, variables, owner, event, action):
i = 0 |
def find_divisor(numbers):
for index, number in enumerate(numbers):
print("len", len(numbers[index + 1:]))
for divider in reversed(numbers[index + 1:]):
if number % divider == 0:
print("found {} and {}. Rest: {}".format(
number, divider, number % divider))
return int(number / divider)
return 0
with open("input", "r") as f:
input = f.read()
lines = input.split("\n")
sum = 0
for line in lines:
if not len(line):
continue
numbers = sorted([int(x) for x in line.split("\t")], reverse=True)
sum = sum + find_divisor(numbers)
print("CS", sum)
|
class University:
def __init__(self, name, country, world_rank):
self.name = name
self.country = country
self.world_rank = world_rank |
# FROM ATUS 2016
# ACTIVITY FILE
# TUCASEID: ATUS person ID
# TUACTIVITY_N: Activity line number
# TRTCCTOT_LN: Total time spent during activity providing secondary childcare for all children < 13 (in minutes)
# TRTEC_LN: Time spent providing eldercare by activity (in minutes)
# TUACTDUR: Duration of activity in minutes
# TUCC5: Was at least one of your own household children < 13 in your care during this activity?
# TUCC5B: Was at least one of your non-own household children < 13 in your care during this activity?
# TUSTARTTIM: Activity start time
# TUSTOPTIME: Activity stop time
# TUTIER1CODE: Lexicon Tier 1: 1st and 2nd digits of 6-digit activity code
# TUTIER2CODE: Lexicon Tier 2: 3rd and 4th digits of 6-digit activity code
# TUTIER3CODE: Lexicon Tier 3: 5th and 6th digits of 6-digit activity code
# TEWHERE
ACTIVITY_FIELDS = [
'TUCASEID',
'TUACTIVITY_N',
'TUSTARTTIM',
'TUSTOPTIME',
'TUACTDUR',
'TEWHERE',
'TUTIER1CODE',
'TUTIER2CODE',
'TUTIER3CODE'
]
ACTIVITY_CODE = {
'01': {'Personal Care': {
'01': {'Sleeping': {
'01': 'Sleeping',
'02': 'Sleeplessness',
'99': 'NEC',
}},
'02': {'Grooming': {
'01': 'Washing, dressing and grooming oneself',
'99': 'NEC',
}},
'03': {'Health-related Self Care': {
'01': 'Health-related Self Care',
'99': 'NEC',
}},
'04': {'Personal Activities': {
'01': 'Personal/Private activities',
'99': 'NEC',
}},
'05': {'Personal Care Emergencies': {
'01': 'Personal Emergencies',
'99': 'NEC',
}},
'99': {'Personal Care, NEC': {
'99': 'NEC',
}},
}},
'02': {'Household Activities': {
'01': {'Housework': {
'01': 'Interior cleaning',
'02': 'Laundry',
'03': 'Sewing, repairing, & maintaining textiles',
'04': 'Storing interior hh items, inc. food',
'99': 'NEC',
}},
'02': {'Food & Drink Prep., Presentation, & Clean-up': {
'01': 'Food and drink preparation',
'02': 'Food presentation',
'03': 'Kitchen and food clean-up',
'99': 'NEC',
}},
'03': {'Interior Maintenance, Repair, & Decoration': {
'01': 'Interior arrangement, decoration, & repairs',
'02': 'Building and repairing furniture',
'03': 'Heating and cooling',
'99': 'NEC',
}},
'04': {'Exterior Maintenance, Repair, & Decoration': {
'01': 'Exterior cleaning',
'02': 'Exterior repair, improvements, & decoration',
'99': 'NEC',
}},
'05': {'Lawn, Garden, and Houseplants': {
'01': 'Lawn, garden, and houseplant care',
'02': 'Ponds, pools, and hot tubs',
'99': 'NEC',
}},
'06': {'Animals and Pets': {
'01': 'Care for animals and pets (not veterinary care)',
'02': 'Walking / exercising / playing with animals',
'99': 'NEC',
}},
'07': {'Vehicles': {
'01': 'Vehicle repair and maintenance (by self)',
'99': 'NEC',
}},
'08': {'Appliances, Tools, and Toys': {
'01': 'Appliance, tool, and toy set-up, repair, & maintenance (by self)',
'99': 'NEC',
}},
'09': {'Household Management': {
'01': 'Financial management',
'02': 'Household & personal organization and planning',
'03': 'HH & personal mail & messages (except e-mail)',
'04': 'HH & personal e-mail and messages',
'05': 'Home security',
'99': 'NEC',
}},
'99': {'Household Activities, NEC': {
'99': 'NEC',
}},
}},
'03': {'Caring For & Helping Household (HH) Members': {
'01': {'Caring For & Helping HH Children': {
'01': 'Physical care for hh children',
'02': 'Reading to/with hh children',
'03': 'Playing with hh children, not sports',
'04': 'Arts and crafts with hh children',
'05': 'Playing sports with hh children',
'06': 'Talking with/listening to hh children',
'08': 'Organization & planning for hh children',
'09': 'Looking after hh children (as a primary activity)',
'10': 'Attending hh childrens events',
'11': 'Waiting for/with hh children',
'12': 'Picking up/dropping off hh children',
'99': 'NEC'
}},
'02': {'Activities Related to HH Childrens Education': {
'01': 'Homework (hh children)',
'02': 'Meetings and school conferences (hh children)',
'03': 'Home schooling of hh children',
'04': 'Waiting associated with hh childrens education',
'99': 'NEC'
}},
'03': {'Activities Related to HH Childrens Health': {
'01': 'Providing medical care to hh children',
'02': 'Obtaining medical care for hh children',
'03': 'Waiting associated with hh childrens health',
'99': 'NEC'
}},
'04': {'Caring for Household Adults': {
'01': 'Physical care for hh adults',
'02': 'Looking after hh adult (as a primary activity)',
'03': 'Providing medical care to hh adult',
'04': 'Obtaining medical and care services for hh adult',
'05': 'Waiting associated with caring for household adults',
'99': 'NEC'
}},
'05': {'Helping Household Adults': {
'01': 'Helping hh adults',
'02': 'Organization & planning for hh adults',
'03': 'Picking up/dropping off hh adult',
'04': 'Waiting associated with helping hh adults',
'99': 'NEC'
}},
'99': {'Caring For & Helping Household (HH) Members, NEC': {
'99': 'NEC',
}},
}},
'04': {'Caring For & Helping NonHousehold (NonHH) Members': {
'01': {'Caring For & Helping NonHH Children': {
'01': 'Physical care for nonhh children',
'02': 'Reading to/with nonhh children',
'03': 'Playing with nonhh children, not sports',
'04': 'Arts and crafts with nonhh children',
'05': 'Playing sports with nonhh children',
'06': 'Talking with/listening to nonhh children',
'08': 'Organization & planning for nonhh children',
'09': 'Looking after nonhh children (as a primary activity)',
'10': 'Attending nonhh childrens events',
'11': 'Waiting for/with nonhh children',
'12': 'Dropping off/picking up nonhh children',
'99': 'NEC'
}},
'02': {'Activities Related to NonHH Childrens Education': {
'01': 'Homework (nonhh children)',
'02': 'Meetings and school conferences (nonhh children)',
'03': 'Home schooling of nonhh children',
'04': 'Waiting associated with nonhh childrens education',
'99': 'NEC'
}},
'03': {'Activities Related to NonHH Childrens Health': {
'01': 'Providing medical care to nonhh children',
'02': 'Obtaining medical care for nonhh children',
'03': 'Waiting associated with nonhh childrens health',
'99': 'NEC'
}},
'04': {'Caring for NonHousehold Adults': {
'01': 'Physical care for nonhh adults',
'02': 'Looking after nonhh adult (as a primary activity)',
'03': 'Providing medical care to nonhh adult',
'04': 'Obtaining medical and care services for nonhh adult',
'05': 'Waiting associated with caring for nonhh adults',
'99': 'NEC'
}},
'05': {'Helping NonHousehold Adults': {
'01': 'Housework, cooking, & shopping assistance for nonhh adults',
'02': 'House & lawn maintenance & repair assistance for nonhh adults',
'03': 'Animal & pet care assistance for nonhh adults',
'04': 'Vehicle & appliance maintenance/repair assistance for nonhh adults',
'05': 'Financial management assistance for nonhh adults',
'06': 'Household management & paperwork assistance for nonhh adults',
'07': 'Picking up/dropping off nonhh adult',
'08': 'Waiting associated with helping nonhh adults',
'99': 'NEC'
}},
'99': {'Caring For & Helping NonHH Members, NEC': {
'99': 'NEC',
}},
}},
'05': {'Work & Work Related Activities': {
'01': {'Working': {
'01': 'Work, main job',
'02': 'Work, other job(s)',
'03': 'Security procedures related to work',
'04': 'Waiting associated with working',
'99': 'NEC'
}},
'02': {'Work-Related Activities': {
'01': 'Socializing, relaxing, and leisure as part of job',
'02': 'Eating and drinking as part of job',
'03': 'Sports and exercise as part of job',
'04': 'Security procedures as part of job',
'05': 'Waiting associated with work-related activities',
'99': 'NEC'
}},
'03': {'Other Income-generating Activities': {
'01': 'Income-generating hobbies, crafts, and food',
'02': 'Income-generating performances',
'03': 'Income-generating services',
'04': 'Income-generating rental property activities',
'05': 'Waiting associated with other income-generating activities',
'99': 'NEC'
}},
'04': {'Job Search and Interviewing': {
'01': 'Job search activities',
'03': 'Job interviewing',
'04': 'Waiting associated with job search or interview',
'05': 'Security procedures rel. to job search/interviewing',
'99': 'NEC'
}},
'99': {'Work and Work-Related Activities, NEC': {
'99': 'NEC',
}},
}},
'06': {'Education': {
'01': {'Taking Class': {
'01': 'Taking class for degree, certification, or licensure',
'02': 'Taking class for personal interest',
'03': 'Waiting associated with taking classes',
'04': 'Security procedures rel. to taking classes',
'99': 'NEC'
}},
'02': {'Extracurricular School Activities (Except Sports)': {
'01': 'Extracurricular club activities',
'02': 'Extracurricular music & performance activities',
'03': 'Extracurricular student government activities',
'04': 'Waiting associated with extracurricular activities',
'99': 'NEC'
}},
'03': {'Research/Homework': {
'01': 'Research/homework for class for degree, certification, or licensure',
'02': 'Research/homework for class for pers. Interest',
'03': 'Waiting associated with research/homework',
'99': 'NEC'
}},
'04': {'Registration/Administrative activities': {
'01': 'Administrative activities: class for degree, certification, or licensure',
'02': 'Administrative activities: class for personal interest',
'03': 'Waiting associated w/ admin. activities (education)',
'99': 'NEC'
}},
'99': {'Education, NEC': {
'99': 'NEC',
}},
}},
'07': {'Consumer Purchases': {
'01': {'Shopping (Store, Telephone, Internet)': {
'01': 'Grocery shopping',
'02': 'Purchasing gas',
'03': 'Purchasing food (not groceries)',
'04': 'Shopping, except groceries, food and gas',
'05': 'Waiting associated with shopping',
'99': 'NEC'
}},
'02': {'Researching Purchases': {
'01': 'Comparison shopping',
'99': 'NEC'
}},
'03': {'Security Procedures Rel. to Consumer Purchases': {
'01': 'Security procedures rel. to consumer purchases',
'99': 'NEC'
}},
'99': {'Consumer Purchases, NEC': {
'99': 'NEC',
}},
}},
'08': {'Professional & Personal Care Services': {
'01': {'Childcare Services': {
'01': 'Using paid childcare services',
'02': 'Waiting associated w/purchasing childcare svcs',
'99': 'NEC'
}},
'02': {'Financial Services and Banking': {
'01': 'Banking',
'02': 'Using other financial services',
'03': 'Waiting associated w/banking/financial services',
'99': 'NEC'
}},
'03': {'Legal Services': {
'01': 'Using legal services',
'02': 'Waiting associated with legal services',
'99': 'NEC'
}},
'04': {'Medical and Care Services': {
'01': 'Using health and care services outside the home',
'02': 'Using in-home health and care services',
'03': 'Waiting associated with medical services',
'99': 'NEC'
}},
'05': {'Personal Care Services': {
'01': 'Using personal care services',
'02': 'Waiting associated w/personal care services',
'99': 'NEC'
}},
'06': {'Real Estate': {
'01': 'Activities rel. to purchasing/selling real estate',
'02': 'Waiting associated w/purchasing/selling real estate',
'99': 'NEC'
}},
'07': {'Veterinary Services (excluding grooming)': {
'01': 'Using veterinary services',
'02': 'Waiting associated with veterinary services',
'99': 'NEC'
}},
'08': {'Security Procedures Rel. to Professional/Personal Svcs.': {
'01': 'Security procedures rel. to professional/personal svcs.',
'99': 'NEC'
}},
'99': {'Professional and Personal Services, NEC': {
'99': 'NEC',
}},
}},
'09': {'Household Services': {
'01': {'Household Services (not done by self)': {
'01': 'Using interior cleaning services',
'02': 'Using meal preparation services',
'03': 'Using clothing repair and cleaning services',
'04': 'Waiting associated with using household services',
'99': 'NEC'
}},
'02': {'Home Maint/Repair/Décor/Construction (not done by self)': {
'01': 'Using home maint/repair/décor/construction svcs',
'02': 'Waiting associated w/ home main/repair/décor/constr',
'99': 'NEC'
}},
'03': {'Pet Services (not done by self, not vet)': {
'01': 'Using pet services',
'02': 'Waiting associated with pet services',
'99': 'NEC'
}},
'04': {'Lawn & Garden Services (not done by self)': {
'01': 'Using lawn and garden services',
'02': 'Waiting associated with using lawn & garden services',
'99': 'NEC'
}},
'05': {'Vehicle Maint. & Repair Services (not done by self)': {
'01': 'Using vehicle maintenance or repair services',
'02': 'Waiting associated with vehicle main. or repair svcs',
'99': 'NEC'
}},
'99': {'Household Services, NEC': {
'99': 'NEC',
}},
}},
'10': {'Government Services & Civic Obligations': {
'01': {'Using Government Services': {
'01': 'Using police and fire services',
'02': 'Using social services',
'03': 'Obtaining licenses & paying fines, fees, taxes',
'99': 'NEC'
}},
'02': {'Civic Obligations & Participation': {
'01': 'Civic obligations & participation',
'99': 'NEC'
}},
'03': {'Waiting Associated w/Govt Svcs or Civic Obligations': {
'04': 'Waiting associated with using government services',
'05': 'Waiting associated with civic obligations & participation',
'99': 'NEC'
}},
'04': {'Security Procedures Rel. to Govt Svcs/Civic Obligations': {
'01': 'Security procedures rel. to govt svcs/civic obligations',
'99': 'NEC'
}},
'99': {'Government Services, NEC': {
'99': 'NEC',
}},
}},
'11': {'Eating and Drinking': {
'01': {'Eating and Drinking': {
'01': 'Eating and drinking',
'99': 'NEC'
}},
'02': {'Waiting associated with eating & drinking': {
'01': 'Waiting associated with eating & drinking',
'99': 'NEC'
}},
'99': {'Eating and Drinking, NEC': {
'99': 'NEC',
}},
}},
'12': {'Socializing, Relaxing, and Leisure': {
'01': {'Socializing and Communicating': {
'01': 'Socializing and communicating with others',
'99': 'NEC'
}},
'02': {'Attending or Hosting Social Events': {
'01': 'Attending or hosting parties/receptions/ceremonies',
'02': 'Attending meetings for personal interest (not volunteering)',
'99': 'NEC'
}},
'03': {'Relaxing and Leisure': {
'01': 'Relaxing, thinking',
'02': 'Tobacco and drug use',
'03': 'Television and movies (not religious)',
'04': 'Television (religious)',
'05': 'Listening to the radio',
'06': 'Listening to/playing music (not radio)',
'07': 'Playing games',
'08': 'Computer use for leisure (exc. Games)',
'09': 'Arts and crafts as a hobby',
'10': 'Collecting as a hobby',
'11': 'Hobbies, except arts & crafts and collecting',
'12': 'Reading for personal interest',
'13': 'Writing for personal interest',
'99': 'NEC'
}},
'04': {'Arts and Entertainment (other than sports)': {
'01': 'Attending performing arts',
'02': 'Attending museums',
'03': 'Attending movies/film',
'04': 'Attending gambling establishments',
'05': 'Security procedures rel. to arts & entertainment',
'99': 'NEC'
}},
'05': {'Waiting Associated with Socializing, Relaxing, and Leisure': {
'01': 'Waiting assoc. w/socializing & communicating',
'02': 'Waiting assoc. w/attending/hosting social events',
'03': 'Waiting associated with relaxing/leisure',
'04': 'Waiting associated with arts & entertainment',
'99': 'NEC'
}},
'99': {'Socializing, Relaxing, and Leisure, NEC': {
'99': 'NEC',
}},
}},
'13': {'Sports, Exercise, and Recreation': {
'01': {'Participating in Sports, Exercise, or Recreation': {
'01': 'Doing Aerobics',
'02': 'Playing baseball',
'03': 'Playing basketball',
'04': 'Biking',
'05': 'Playing billiards',
'06': 'Boating',
'07': 'Bowling',
'08': 'Climbing, spelunking, caving',
'09': 'Dancing',
'10': 'Participating in equestrian sports',
'11': 'Fencing',
'12': 'Fishing',
'13': 'Playing football',
'14': 'Golfing',
'15': 'Doing gymnastics',
'16': 'Hiking',
'17': 'Playing hockey',
'18': 'Hunting',
'19': 'Participating in martial arts',
'20': 'Playing racquet sports',
'21': 'Participating in rodeo competitions',
'22': 'Rollerblading',
'23': 'Playing rugby',
'24': 'Running',
'25': 'Skiing, ice skating, snowboarding',
'26': 'Playing soccer',
'27': 'Softball',
'28': 'Using cardiovascular equipment',
'29': 'Vehicle touring/racing',
'30': 'Playing volleyball',
'31': 'Walking',
'32': 'Participating in water sports',
'33': 'Weightlifting/strength training',
'34': 'Working out, unspecified',
'35': 'Wrestling',
'36': 'Doing yoga',
'99': 'NEC'
}},
'02': {'Attending Sporting/Recreational Events': {
'01': 'Watching aerobics',
'02': 'Watching baseball',
'03': 'Watching basketball',
'04': 'Watching biking',
'05': 'Watching billiards',
'06': 'Watching boating',
'07': 'Watching bowling',
'08': 'Watching climbing, spelunking, caving',
'09': 'Watching dancing',
'10': 'Watching equestrian sports',
'11': 'Watching fencing',
'12': 'Watching fishing',
'13': 'Watching football',
'14': 'Watching golfing',
'15': 'Watching gymnastics',
'16': 'Watching hockey',
'17': 'Watching martial arts',
'18': 'Watching racquet sports',
'19': 'Watching rodeo competitions',
'20': 'Watching rollerblading',
'21': 'Watching rugby',
'22': 'Watching running',
'23': 'Watching skiing, ice skating, snowboarding',
'24': 'Watching soccer',
'25': 'Watching softball',
'26': 'Watching vehicle touring/racing',
'27': 'Watching volleyball',
'28': 'Watching walking',
'29': 'Watching water sports',
'30': 'Watching weightlifting/strength training',
'31': 'Watching people working out, unspecified',
'32': 'Watching wrestling',
'99': 'NEC'
}},
'03': {'Waiting Associated with Sports, Exercise, & Recreation': {
'01': 'Waiting related to playing sports or exercising',
'02': 'Waiting related to attending sporting events',
'99': 'NEC'
}},
'04': {'Security Procedures Rel. to Sports, Exercise, & Recreation': {
'01': 'Security related to playing sports or exercising',
'02': 'Security related to attending sporting events',
'99': 'NEC'
}},
'99': {'Sports, Exercise, & Recreation, NEC': {
'99': 'NEC',
}},
}},
'14': {'Religious and Spiritual Activities': {
'01': {'Religious/Spiritual Practices': {
'01': 'Attending religious services',
'02': 'Participation in religious practices',
'03': 'Waiting associated w/religious & spiritual activities',
'04': 'Security procedures rel. to religious & spiritual activities',
'05': 'Religious education activities',
}},
'99': {'Religious and Spiritual Activities, NEC': {
'99': 'NEC',
}},
}},
'15': {'Volunteer Activities': {
'01': {'Administrative & Support Activities': {
'01': 'Computer Use',
'02': 'Organizing and preparing',
'03': 'Reading',
'04': 'Telephone calls (except hotline counseling)',
'05': 'Writing',
'06': 'Fundraising',
'99': 'NEC'
}},
'02': {'Social Service & Care Activities (Except Medical)': {
'01': 'Food preparation, presentation, clean-up',
'02': 'Collecting & delivering clothing & other goods',
'03': 'Providing care',
'04': 'Teaching, leading, counseling, mentoring',
'99': 'NEC'
}},
'03': {'Indoor & Outdoor Maintenance, Building, & Clean-up Activities': {
'01': 'Building houses, wildlife sites, & other structures',
'02': 'Indoor & outdoor maintenance, repair, & clean-up',
'99': 'NEC'
}},
'04': {'Participating in Performance & Cultural Activities': {
'01': 'Performing',
'02': 'Serving at volunteer events & cultural activities',
'99': 'NEC'
}},
'05': {'Attending Meetings, Conferences, & Training': {
'01': 'Attending meetings, conferences, & training',
'99': 'NEC'
}},
'06': {'Public Health & Safety Activities': {
'01': 'Public health activities',
'02': 'Public safety activities',
'99': 'NEC'
}},
'07': {'Waiting Associated with Volunteer Activities': {
'01': 'Waiting associated with volunteer activities',
'99': 'NEC'
}},
'08': {'Security Procedures related to volunteer activities': {
'01': 'Security Procedures related to volunteer activities',
'99': 'NEC'
}},
'99': {'Volunteer Activities, NEC': {
'99': 'NEC',
}},
}},
'16': {'Telephone Calls': {
'01': {'Telephone Calls (to or from)': {
'01': 'Telephone calls to/from family members',
'02': 'Telephone calls to/from friends, neighbors, or acquaintances',
'03': 'Telephone calls to/from education services providers',
'04': 'Telephone calls to/from salespeople',
'05': 'Telephone calls to/from professional or personal care svcs providers',
'06': 'Telephone calls to/from household services providers',
'07': 'Telephone calls to/from paid child or adult care providers',
'08': 'Telephone calls to/from government officials',
'99': 'NEC'
}},
'02': {'Waiting Associated with Telephone Calls': {
'01': 'Waiting associated with telephone calls',
'99': 'NEC'
}},
'99': {'Telephone Calls, NEC': {
'99': 'NEC',
}},
}},
'18': {'Traveling': {
'01': {'Travel Related to Personal Care': {
'01': 'Travel related to personal care',
'99': 'NEC'
}},
'02': {'Travel Related to Household Activities': {
'01': 'Travel related to housework',
'02': 'Travel related to food & drink prep., clean-up, & presentation',
'03': 'Travel related to interior maintenance, repair, & decoration',
'04': 'Travel related to exterior maintenance, repair, & decoration',
'05': 'Travel related to lawn, garden, and houseplant care',
'06': 'Travel related to care for animals and pets (not vet care)',
'07': 'Travel related to vehicle care & maintenance (by self)',
'08': 'Travel related to appliance, tool, and toy set-up, repair, & maintenance (by self)',
'09': 'Travel related to household management',
'99': 'NEC'
}},
'03': {'Travel Related to Caring For & Helping HH Members': {
'01': 'Travel related to caring for & helping hh children',
'02': 'Travel related to hh childrens education',
'03': 'Travel related to hh childrens health',
'04': 'Travel related to caring for hh adults',
'05': 'Travel related to helping hh adults',
'99': 'NEC'
}},
'04': {'Travel Related to Caring For & Helping Nonhh Members': {
'01': 'Travel related to caring for and helping nonhh children',
'02': 'Travel related to nonhh childrens education',
'03': 'Travel related to nonhh childrens health',
'04': 'Travel related to caring for nonhh adults',
'05': 'Travel related to helping nonhh adults',
'99': 'NEC'
}},
'05': {'Travel Related to Work': {
'01': 'Travel related to working',
'02': 'Travel related to work-related activities',
'03': 'Travel related to income-generating activities',
'04': 'Travel related to job search & interviewing',
'99': 'NEC'
}},
'06': {'Travel Related to Education': {
'01': 'Travel related to taking class',
'02': 'Travel related to extracurricular activities (ex. Sports)',
'03': 'Travel related to research/homework',
'04': 'Travel related to registration/administrative activities',
'99': 'NEC'
}},
'07': {'Travel Related to Consumer Purchases': {
'01': 'Travel related to grocery shopping',
'02': 'Travel related to purchasing gas',
'03': 'Travel related to purchasing food (not groceries)',
'04': 'Travel related to shopping, ex groceries, food, and gas',
'99': 'NEC'
}},
'08': {'Travel Related to Using Professional and Personal Care Services': {
'01': 'Travel related to using childcare services',
'02': 'Travel related to using financial services and banking',
'03': 'Travel related to using legal services',
'04': 'Travel related to using medical services',
'05': 'Travel related to using personal care services',
'06': 'Travel related to using real estate services',
'07': 'Travel related to using veterinary services',
'99': 'NEC'
}},
'09': {'Travel Related to Using Household Services': {
'01': 'Travel related to using household services',
'02': 'Travel related to using home main./repair/décor./construction svcs',
'03': 'Travel related to using pet services (not vet)',
'04': 'Travel related to using lawn and garden services',
'05': 'Travel related to using vehicle maintenance & repair services',
'99': 'NEC'
}},
'10': {'Travel Related to Using Govt Services & Civic Obligations': {
'01': 'Travel related to using government services',
'02': 'Travel related to civic obligations & participation',
'99': 'NEC'
}},
'11': {'Travel Related to Eating and Drinking': {
'01': 'Travel related to eating and drinking',
'99': 'NEC'
}},
'12': {'Travel Related to Socializing, Relaxing, and Leisure': {
'01': 'Travel related to socializing and communicating',
'02': 'Travel related to attending or hosting social events',
'03': 'Travel related to relaxing and leisure',
'04': 'Travel related to arts and entertainment',
'05': 'Travel as a form of entertainment',
'99': 'NEC'
}},
'13': {'Travel Related to Sports, Exercise, & Recreation': {
'01': 'Travel related to participating in sports/exercise/recreation',
'02': 'Travel related to attending sporting/recreational events',
'99': 'NEC'
}},
'14': {'Travel Related to Religious/Spiritual Activities': {
'01': 'Travel related to religious/spiritual practices',
'99': 'NEC'
}},
'15': {'Travel Related to Volunteer Activities': {
'01': 'Travel related to volunteering',
'99': 'NEC'
}},
'16': {'Travel Related to Telephone Calls': {
'01': 'Travel related to phone calls',
'99': 'NEC'
}},
'18': {'Security Procedures Related to Traveling': {
'01': 'Security procedures related to traveling',
'99': 'NEC'
}},
'99': {'Traveling, NEC': {
'99': 'NEC',
}},
}},
'50': {'Data Codes': {
'01': {'Unable to Code': {
'01': 'Insufficient detail in verbatim',
'03': 'Missing travel or destination',
'05': 'Respondent refused to provide information/"none of your business"',
'06': 'Gap/cannott remember',
'07': 'Unable to code activity at 1st tier',
}},
'99': {'Data codes, NEC': {
'99': 'NEC',
}},
}}
}
# '99': {'Caring For & Helping Household (nonHH) Members': {
# '01': {'Sleeping': {
# '01': 'Sleeping',
# '02': 'Sleeping',
# '03': 'Sleeping',
# '04': 'Sleeping',
# '05': 'Sleeping',
# '06': 'Sleeping',
# '99': 'NEC'
# }},
# '02': {'Sleeping': {
# '01': 'Sleeping',
# '02': 'Sleeping',
# '03': 'Sleeping',
# '04': 'Sleeping',
# '05': 'Sleeping',
# '06': 'Sleeping',
# '99': 'NEC'
# }},
# '03': {'Sleeping': {
# '01': 'Sleeping',
# '02': 'Sleeping',
# '03': 'Sleeping',
# '04': 'Sleeping',
# '05': 'Sleeping',
# '06': 'Sleeping',
# '99': 'NEC'
# }},
# '04': {'Sleeping': {
# '01': 'Sleeping',
# '02': 'Sleeping',
# '03': 'Sleeping',
# '04': 'Sleeping',
# '05': 'Sleeping',
# '06': 'Sleeping',
# '99': 'NEC'
# }},
# '05': {'Sleeping': {
# '01': 'Sleeping',
# '02': 'Sleeping',
# '03': 'Sleeping',
# '04': 'Sleeping',
# '05': 'Sleeping',
# '06': 'Sleeping',
# '99': 'NEC'
# }},
# '06': {'Sleeping': {
# '01': 'Sleeping',
# '02': 'Sleeping',
# '03': 'Sleeping',
# '04': 'Sleeping',
# '05': 'Sleeping',
# '06': 'Sleeping',
# '99': 'NEC'
# }},
# '99': {'Household Activities, NEC': {
# '99': 'NEC',
# }},
# }},
ACTIVITY_WHERE = {1: "Respondent's home or yard",
2: "Respondent's workplace",
3: "Someone else's home",
4: "Restaurant or bar",
5: "Place of worship",
6: "Grocery store",
7: "Other store/mall",
8: "School",
9: "Outdoors away from home",
10: "Library",
11: "Other place",
12: "Car, truck, or motorcycle (driver)",
13: "Car, truck, or motorcycle (passenger)",
14: "Walking",
15: "Bus",
16: "Subway/train",
17: "Bicycle",
18: "Boat/ferry",
19: "Taxi/limousine service",
20: "Airplane",
21: "Other mode of transportation",
30: "Bank",
31: "Gym/health club",
32: "Post Office",
89: "Unspecified place",
99: "Unspecified mode of transportation"
}
# FROM ATUS 2016
# WHO FILE
# TUACTIVITY_N: Activity line number
# TULINENO: ATUS person line number
# TUWHO_CODE: Who was in the room with you / Who accompanied you?
ACTIVITY_WHO = {18: "Alone",
19: "Alone",
20: "Spouse",
21: "Unmarried partner",
22: "Own household child",
23: "Grandchild",
24: "Parent",
25: "Brother/sister",
26: "Other related person",
27: "Foster child",
28: "Housemate/roommate",
29: "Roomer/boarder",
30: "Other nonrelative",
40: "Own nonhousehold child < 18",
51: "Parents (not living in household)",
52: "Other nonhousehold family members < 18",
53: "Other nonhousehold family members 18 and older (including parents-in-law)",
54: "Friends",
56: "Neighbors/acquaintances",
57: "Other nonhousehold children < 18",
58: "Other nonhousehold adults 18 and older",
59: "Boss or manager",
60: "People whom I supervise",
61: "Co-workers",
62: "Customers"
}
# PERSON PROPERTIES DEVELOPED FOR LOCATION PERSON MATCHING
# # https://www.bls.gov/tus/overview.htm
# # https://factfinder.census.gov/faces/tableservices/jsf/pages/productview.xhtml?pid=DEC_10_DP_DPDP1&src=pt
LIFECYCLES = [(0.24, 'Dependent'), # 24%
(0.38, 'Pre-family'), # 14%
(0.54, 'Early Family'), # 16%
(0.70, 'Late Family'), # 16%
(0.84, 'Post Family'), # 14%
(1.00, 'Senior') # 16%
]
SOCIAL_CLASSES = [(0.01, 'Upper', 440), # 1%
(0.16, 'Upper Middle', 147), # 15%
(0.46, 'Lower Middle', 75), # 30%
(0.76, 'Working', 35), # 30%
(0.89, 'Working Poor', 20), # 12%
(1.00, 'Underclass', 0) # 12%
] # from CPS Household Income jupyter notebook
PSYCHOGRAPHICS = ['active',
'sedentary',
'healthful',
'apathetic',
'environmentalist',
'extractionist',
'introvert',
'extrovert',
'stressed',
'relaxed',
'technophile',
'technophobe',
'republican',
'democrat',
'entertainment',
'enrichment',
'status',
'humble',
'planner',
'impulse',
]
|
#!/usr/bin/env python
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
curr = head
while curr:
node = Node(curr.val, curr.next, None)
curr.next, curr = node, curr.next
curr = head
while curr:
copy = curr.next
copy.random = curr.random.next
curr.next = copy.next
ret = copy = head.next
while copy.next:
copy = copy.next = copy.next.next
return ret
|
valores = list()
while True:
n = int(input('Digite um valor: '))
if n not in valores:
valores.append(n)
print('Valor add')
else:
print('Valor duplicado... não foi add')
op = ' '
while op not in 'SN':
op = str(input('Deseja continuar [S/N]? ')).strip().upper()
if op == 'N':
break
print(sorted(valores))
|
def _responses_path(
config: "Config",
sim_runner: "FEMRunner",
sim_params: "SimParams",
response_type: "ResponseType",
) -> str:
"""Path to fem that were generated with given parameters."""
return sim_runner.sim_out_path(
config=config, sim_params=sim_params, ext="npy", response_types=[response_type]
)
# determinant of matrix a
def det(a):
return (
a[0][0] * a[1][1] * a[2][2]
+ a[0][1] * a[1][2] * a[2][0]
+ a[0][2] * a[1][0] * a[2][1]
- a[0][2] * a[1][1] * a[2][0]
- a[0][1] * a[1][0] * a[2][2]
- a[0][0] * a[1][2] * a[2][1]
)
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = det([[1, a[1], a[2]], [1, b[1], b[2]], [1, c[1], c[2]]])
y = det([[a[0], 1, a[2]], [b[0], 1, b[2]], [c[0], 1, c[2]]])
z = det([[a[0], a[1], 1], [b[0], b[1], 1], [c[0], c[1], 1]])
magnitude = (x ** 2 + y ** 2 + z ** 2) ** 0.5
return x / magnitude, y / magnitude, z / magnitude
# dot product of vectors a and b
def dot(a, b):
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
# cross product of vectors a and b
def cross(a, b):
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
return x, y, z
# area of polygon poly
def poly_area(poly):
if len(poly) < 3: # not a plane - no area
raise ValueError("Not a plane, need >= 3 points")
total = [0, 0, 0]
for i in range(len(poly)):
vi1 = poly[i]
if i is len(poly) - 1:
vi2 = poly[0]
else:
vi2 = poly[i + 1]
prod = cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result / 2)
|
KEY_PRESS = 0
MOUSE_DOWN = 1
MOUSE_UP = 2
MOUSE_DOUBLE_CLICK = 3
MOUSE_MOVE = 5
SCROLL_DOWN = 6
SCROLL_UP = 7
SCROLL_STEP = 1
CTRL = 'ctrl'
SHIFT = 'shift'
ALT = 'alt'
MODIFIER_KEYS = (CTRL, SHIFT, ALT,)
MODIFIER_KEYS_PRESS_DELAY = .4
EVENTS_DELAY = .05
LEFT = "left"
MIDDLE = "middle"
RIGHT = "right"
HIGH_QUALITY = 75
MEDIUM_QUALITY = 60
LOW_QUALITY = 40
HIGH_SCALE = 70/100
MEDIUM_SCALE = 50/100
LOW_SCALE = 40/100
|
#Celsius to Fahrenheit conversion
#F = C *9/5 +32
F= 0
print("Give the Number of Celcius: ")
c=float(input())
print("The result is: ")
F=c*9/5+32
print(F)
|
score = float(input("백분위(0~100)점수를 입력해 주세요 >>"))
if score <= 30:
print("당신의 학점은 A입니다.")
elif score <= 70:
print("당신의 학점은 B입니다.")
else:
print("당신의 학점은 C입니다.") |
# -*- coding: utf-8 -*-
"""
ParaMol MM_engines subpackage.
Contains modules related to the ParaMol representation of MM engines.
"""
__all__ = ['openmm', 'resp'] |
#Software By AwesomeWithRex
def read_file(filename):
with open(filename) as f:
filename = f.readlines()
return filename
def get_template():
template = ''
with open('template.html', 'r') as f:
template = f.readlines()
return template
def put_in_body(file, template):
count = 0
body_tag = 0
for i in template:
count += 1
if '|b|' in i:
body_tag = count - 1
text_to_append = ""
for line in file:
text_to_append += line
formatted_text = ""
for word in text_to_append:
formatted_text += word
if '\n' in word:
formatted_text += word.replace('\n', '<br/>\t')
template[body_tag] = template[body_tag].replace('|b|', formatted_text)
for i in template:
print(i)
return template
def save_template(name_of_doc, saved_doc_file):
with open(name_of_doc, 'w') as f:
f.writelines(saved_doc_file)
def put_in_title():
pass
def main():
content = read_file('text.txt')
template = get_template()
formatted_template = put_in_body(content, template)
save_template('the.html',formatted_template)
if __name__=='__main__':
main()
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
cur_node=self.head
while cur_node:
print(cur_node.data)
cur_node = cur_node.next
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
def prepend(self,data):
new_node = Node(data)
new_node.next = self.head
self.head = new_node
def insert_after_node(self, prev_node, data):
if not prev_node:
print("previous Node not in the list")
return
new_node = Node(data)
new_node.next = prev_node.next
prev_node.next = new_node
def delete_node(self, key):
current_node = self.head
if current_node and current_node.data==key:
self.head = current_node.next
current_node = None
return
prev = None
while current_node and current_node.data != key:
prev = current_node
current_node = current_node.next
if current_node is None:
return
prev.next = current_node.next
current_node = None
llist = LinkedList()
llist.append("A")
llist.append("B")
llist.append("C")
llist.append("D")
#llist.prepend("E")
llist.delete_node("A")
llist.insert_after_node(llist.head.next,"E")
#print(llist.head.data)
llist.print_list()
|
# -*- coding: utf-8 -*-
def in_segregation(x0, R, n, N=None):
"""
return the actual indium concentration
in th nth layer
Params
------
x0 : float
the indium concentration between 0 and 1
R : float
the segregation coefficient
n : int
the current layer
N : int
number of layers in the well
"""
if N:
return x0*(1-R**N)*R**(n-N)
return x0*(1-R**n)
|
class Solution:
def angleClock(self, hour: int, minutes: int) -> float:
hdeg = ((hour*30) + (minutes*0.5))%360
mdeg = (minutes * 6)
angle = abs(hdeg-mdeg)
return min(angle, 360-angle)
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'protoc_out_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out',
},
'targets': [
{
# Protobuf compiler / generate rule for sync.proto. This is used by
# test code in net, which is why it's isolated into its own .gyp file.
'target_name': 'sync_proto',
'type': 'none',
'sources': [
'sync.proto',
'encryption.proto',
'app_specifics.proto',
'autofill_specifics.proto',
'bookmark_specifics.proto',
'extension_specifics.proto',
'nigori_specifics.proto',
'password_specifics.proto',
'preference_specifics.proto',
'session_specifics.proto',
'test.proto',
'theme_specifics.proto',
'typed_url_specifics.proto',
],
'rules': [
{
'rule_name': 'genproto',
'extension': 'proto',
'inputs': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(PRODUCT_DIR)/pyproto/sync_pb/<(RULE_INPUT_ROOT)_pb2.py',
'<(protoc_out_dir)/chrome/browser/sync/protocol/<(RULE_INPUT_ROOT).pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/<(RULE_INPUT_ROOT).pb.cc',
],
'action': [
'<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
'--proto_path=.',
'./<(RULE_INPUT_ROOT)<(RULE_INPUT_EXT)',
'--cpp_out=<(protoc_out_dir)/chrome/browser/sync/protocol',
'--python_out=<(PRODUCT_DIR)/pyproto/sync_pb',
],
'message': 'Generating C++ and Python code from <(RULE_INPUT_PATH)',
},
],
'dependencies': [
'../../../../third_party/protobuf/protobuf.gyp:protoc#host',
],
},
{
'target_name': 'sync_proto_cpp',
'type': '<(library)',
'sources': [
'<(protoc_out_dir)/chrome/browser/sync/protocol/sync.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/sync.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/encryption.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/encryption.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/app_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/app_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/autofill_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/autofill_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/bookmark_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/bookmark_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/extension_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/extension_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/nigori_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/nigori_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/password_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/password_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/preference_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/preference_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/session_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/session_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/theme_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/theme_specifics.pb.h',
'<(protoc_out_dir)/chrome/browser/sync/protocol/typed_url_specifics.pb.cc',
'<(protoc_out_dir)/chrome/browser/sync/protocol/typed_url_specifics.pb.h',
],
'export_dependent_settings': [
'../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
'sync_proto',
],
'dependencies': [
'../../../../third_party/protobuf/protobuf.gyp:protobuf_lite',
'sync_proto',
],
'direct_dependent_settings': {
'include_dirs': [
'<(protoc_out_dir)',
],
},
# This target exports a hard dependency because it includes generated
# header files.
'hard_dependency': 1,
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_file(pluginname, "sysImages/css/PagesCSS.css", "foosun")
whatweb.recog_from_file(pluginname, "Tags.html", "Foosun")
|
# reading withdrawal amount and account balance
x,y=map(float,input().split())
# this will check if account balance is less than the withdrawal amount or
# withdrawal amount is multiple of 5 and print the current account balance
if(x+0.5>=y or x%5!=0 or y<=0):
# printing the result upto two decimals
print("%.2f"%y)
# otherwise transaction will take place and print updated account balance
else:
y=y-x-0.50
# printing the result upto two decimals
print("%.2f"%y)
|
#--------------------------------------
# Open and Parse BF File
#--------------------------------------
fileName = input("Enter name of Brainf*** file here: ")
file = open(fileName, "r")
programCode = []
validCommands = [">", "<", "+", "-", ".", ",", "[", "]"]
for x in file:
for y in x:
if y in validCommands:
programCode.append(y)
file.close()
#--------------------------------------
# Find Indexes of Matching Brackets
#--------------------------------------
bracketPositions = []
loopIndex = 0
openIndex = []
for x in programCode:
if x == "[":
openIndex.append(loopIndex)
if x == "]":
openPosition = openIndex.pop()
bracketPositions.append([openPosition, loopIndex])
loopIndex += 1
#--------------------------------------
# Set Up BF Cells and Pointers
#--------------------------------------
memCells = []
memPointer = 0
instructionPointer = 0
memCellsStepper = 0
maxCells = 5000
while memCellsStepper < maxCells:
memCells.append(0)
memCellsStepper += 1
#--------------------------------------
# Define BF Commands
#--------------------------------------
def moveRight():
global memPointer
memPointer += 1
if memPointer >= maxCells:
memPointer = 0
def moveLeft():
global memPointer
memPointer -= 1
if memPointer < 0:
memPointer = maxCells - 1
def incrementCell():
memCells[memPointer] += 1
def decrementCell():
memCells[memPointer] -= 1
def outputValue():
print(chr(memCells[memPointer]), end="")
def takeInput():
print()
value = input(">")
memCells[memPointer] = ord(value[0])
def openBracket():
global instructionPointer
if memCells[memPointer] == 0:
for x in bracketPositions:
if x[0] == instructionPointer:
instructionPointer = x[1]
def closeBracket():
global instructionPointer
if memCells[memPointer] != 0:
for x in bracketPositions:
if x[1] == instructionPointer:
instructionPointer = x[0]
#--------------------------------------
# Execute BF Code
#--------------------------------------
while instructionPointer != len(programCode):
x = programCode[instructionPointer]
if x == ">":
moveRight()
if x == "<":
moveLeft()
if x == "+":
incrementCell()
if x == "-":
decrementCell()
if x == ".":
outputValue()
if x == ",":
takeInput()
if x == "[":
openBracket()
if x == "]":
closeBracket()
instructionPointer += 1
|
'''
ARIS
Author: 𝓟𝓱𝓲𝓵.𝓔𝓼𝓽𝓲𝓿𝓪𝓵 @ 𝓕𝓻𝓮𝓮.𝓯𝓻
Date:<2018-05-18 15:52:50>
Released under the MIT License
'''
class Struct:
def __init__(_,rawdat) :
_.__dict__ = rawdat
for k,v in rawdat.items() :
if isinstance(v,dict):
_.__dict__[k] = Struct(v)
if isinstance(v,list):
if all(type(x) is dict for x in v):
_.__dict__[k] = [Struct(i) for i in v]
else:
_.__dict__[k] = v
|
_registered_input_modules_types = {}
def register(name, class_type):
if name in _registered_input_modules_types:
raise RuntimeError("Dublicate input module name: " + name)
_registered_input_modules_types[name] = class_type
def load_modules(agent, input_link_config):
input_modules = []
# get input modules configuration from Parameter Server
if not isinstance(input_link_config, dict):
raise RuntimeError("Input link configuration is not valid.")
# process configuration
for module_name, module_config in input_link_config.iteritems():
module_type = _registered_input_modules_types.get(module_name)
if module_type:
input_modules.append( module_type(agent, module_config) )
else:
raise RuntimeError("Input module {} type is unknown." % module_name)
return input_modules
|
def find_missing(array):
return [x for x in range(array[0], array[-1] + 1) if x not in array]
lst = [2, 4, 1, 7, 10]
print(find_missing(lst)) |
#! /root/anaconda3/bin/python
"""
@如果一个对象同时实现了特殊方法__iter__()和__next__(),那么该对象也被称为迭代器对象,如果该对象用于for-in语句,for-in语句首先会调用特殊方法__iter__()返回一个可迭代对象,然后不断调用该迭代对象的特殊方法__next__()返回了下一次迭代的值,直到遇到StopIteration时退出循环。
"""
class MyIterator(object):
def __init__(self):
self.data = 0
def __iter__(self):
# 我是self <__main__.MyIterator object at 0x7f037b996610>
print('我是self', self)
return self
def __next__(self):
if self.data > 5:
raise StopIteration()
else:
self.data += 1
return self.data
# 我是MyIterator() <__main__.MyIterator object at 0x7f037b996610>
print('我是MyIterator()', MyIterator())
for item in MyIterator():
print(item)
print(item)
|
def trigger():
return """
CREATE OR REPLACE FUNCTION trg_mensagem_ticket_solucao()
RETURNS TRIGGER AS $$
BEGIN
IF (NEW.solucao) THEN
UPDATE ticket SET solucionado_id = NEW.id, data_solucao = NOW(), hora_solucao = NOW() WHERE id = NEW.ticket_id;
END IF;
RETURN NEW;
END
$$ LANGUAGE plpgsql;
DROP TRIGGER IF EXISTS trg_mensagem_ticket_solucao ON mensagem_ticket;
CREATE TRIGGER trg_mensagem_ticket_solucao
AFTER INSERT ON mensagem_ticket
FOR EACH ROW EXECUTE PROCEDURE trg_mensagem_ticket_solucao();
"""
|
S1 = "Hello Python"
print(S1) # Prints complete string
print(S1[0]) # Prints first character of the string
print(S1[2:5]) # Prints character starting from 3rd t 5th
print(S1[2:]) # Prints string starting from 3rd character
print(S1 * 2) # Prints string two times
print(S1 + "Thanks") # Prints concatenated string
|
def onSpawn():
while True:
pet.moveXY(48, 8)
pet.moveXY(12, 8)
pet.on("spawn", onSpawn)
while True:
hero.say("Run!!!")
hero.say("Faster!")
|
def valid_parentheses(parens):
"""Are the parentheses validly balanced?
>>> valid_parentheses("()")
True
>>> valid_parentheses("()()")
True
>>> valid_parentheses("(()())")
True
>>> valid_parentheses(")()")
False
>>> valid_parentheses("())")
False
>>> valid_parentheses("((())")
False
>>> valid_parentheses(")()(")
False
"""
d = {"(" : 1, ")" : -1}
s = 0
for c in parens:
s = s + d[c]
if s < 0:
return False
return s == 0
|
# TO print Fibonacci Series upto n numbers and replace all prime numbers and multiples of 5 by 0
# Checking for prime numbers
def isprime(numb):
if numb == 2:
return True
elif numb == 3:
return True
else :
for i in range(2, numb // 2 + 1):
if (numb % i) == 0:
return False
else:
return True
# Finding out the fibonacci numbers
def fibonacci_series(n):
flag = 0
a,b = 1,1
if n == 1:
print(a)
else:
print(a, end = " ")
print(b, end = " ")
while flag <= n:
c = a + b
a,b = b,c
flag += 1
if c % 5 == 0 or isprime(c):
print(0, end = " ")
else:
print(c, end = " ")
# The number of fibonacci terms required
n1 = int(input("Enter the value of n: "))
n = n1 - 3
fibonacci_series(n) |
class Solution:
def reorderLogFiles(self, logs: List[str]) -> List[str]:
def corder(log):
identifier, detail = log.split(None, 1)
return (0, detail, identifier) if detail[0].isalpha() else (1,)
return sorted(logs, key=corder)
|
# -*- coding: utf-8 -*-
"""
reV Econ utilities
"""
def lcoe_fcr(fixed_charge_rate, capital_cost, fixed_operating_cost,
annual_energy_production, variable_operating_cost):
"""Calculate the Levelized Cost of Electricity (LCOE) using the
fixed-charge-rate method:
LCOE = ((fixed_charge_rate * capital_cost + fixed_operating_cost)
/ annual_energy_production + variable_operating_cost)
Parameters
----------
fixed_charge_rate : float | np.ndarray
Fixed charge rage (unitless)
capital_cost : float | np.ndarray
Capital cost (aka Capital Expenditures) ($)
fixed_operating_cost : float | np.ndarray
Fixed annual operating cost ($/year)
annual_energy_production : float | np.ndarray
Annual energy production (kWh for year)
(can be calculated as capacity * cf * 8760)
variable_operating_cost : float | np.ndarray
Variable operating cost ($/kWh)
Returns
-------
lcoe : float | np.ndarray
LCOE in $/MWh
"""
lcoe = ((fixed_charge_rate * capital_cost + fixed_operating_cost)
/ annual_energy_production + variable_operating_cost)
lcoe *= 1000 # convert $/kWh to $/MWh
return lcoe
|
"""
Base Exception
MLApp Exception - inherit from Base Exception
"""
class MLAppBaseException(Exception):
def __init__(self, message):
self.message = message
class FrameworkException(MLAppBaseException):
def __init__(self, message=None):
if message is not None and isinstance(message, str):
self.message = message
def __str__(self):
return "[ML APP ERROR] %s\n" % str(self.message)
class UserException(MLAppBaseException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[USER ERROR] %s\n" % str(self.message)
class FlowManagerException(UserException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[FLOW MANAGER ERROR] %s\n" % str(self.message)
class DataManagerException(UserException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[DATA MANAGER ERROR] %s\n" % str(self.message)
class ModelManagerException(UserException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[MODEL MANAGER ERROR] %s\n" % str(self.message)
class JobManagerException(UserException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[JOB MANAGER ERROR] %s\n" % str(self.message)
class PipelineManagerException(UserException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[PIPELINE MANAGER ERROR] %s\n" % str(self.message)
class EnvironmentException(UserException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[ENVIRONMENT ERROR] %s\n" % str(self.message)
class IoManagerException(FlowManagerException, DataManagerException, ModelManagerException, JobManagerException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[IO MANAGER ERROR] %s\n" % str(self.message)
class ConfigError(FlowManagerException, DataManagerException, ModelManagerException, JobManagerException):
def __init__(self, message):
self.message = message
def __str__(self):
return "[CONFIG ERROR] %s\n" % str(self.message)
class ConfigKeyError(ConfigError):
def __init__(self, message):
self.message = message
def __str__(self):
return "[KEY ERROR] %s\n" % str(self.message)
class ConfigValueError(ConfigError):
def __init__(self, message):
self.message = message
def __str__(self):
return "[VALUE ERROR] %s\n" % str(self.message)
|
"""
link: https://leetcode-cn.com/problems/split-array-largest-sum
problem: 将 nums 分割成 m 个连续子串,求所有分割方式中,子串集最大值的最小值
solution: 二分。计算分割状态复杂度很高,可以反过来考虑,假设给定解为 x,可以在 O(n) 的时间复杂度内检查nums是否有该解。
所以可以二分枚举解,时间复杂为 O(log(sum(nums)) * n)
"""
class Solution:
def splitArray(self, nums: List[int], m: int) -> int:
def f(x: int) -> bool:
c, cnt = 0, 0
for i in nums:
if c + i > x:
cnt += 1
c = i
else:
c += i
return cnt + 1 <= m
i, j = max(nums), sum(nums)
res = j
while i < j:
mid = (i + j) >> 1
if f(mid):
res = min(res, mid)
j = mid
else:
i = mid + 1
return res |
# initiate empty list to hold user input and sum value of zero
user_list = []
list_sum = 0
# seek user input for ten numbers
for i in range(10):
userInput = input("Enter any 2-digit number: ")
# check to see if number is even and if yes, add to list_sum
# print incorrect value warning when ValueError exception occurs
try:
number = int(userInput)
user_list.append(number)
if number % 2 == 0:
list_sum += number
except ValueError:
print("Incorrect value. That's not an int!")
print("user_list: {}".format(user_list))
print("The sum of the even numbers in user_list is: {}.".format(list_sum))
|
load("@io_bazel_rules_docker//container:pull.bzl", "container_pull")
def containers():
container_pull(
name = "alpine_linux_amd64",
registry = "index.docker.io",
repository = "library/alpine",
tag = "3.14.2",
)
|
BUILD_STATE = (
('triggered', 'Triggered'),
('building', 'Building'),
('finished', 'Finished'),
)
BUILD_TYPES = (
('html', 'HTML'),
('pdf', 'PDF'),
('epub', 'Epub'),
('man', 'Manpage'),
)
|
class Solution:
def getDecimalValue(self, head: ListNode) -> int:
return self.getDecimalValueHelper(head)[0]
def getDecimalValueHelper(self, head: ListNode) -> int:
if head is None:
return (0, 0)
total, exp = self.getDecimalValueHelper(head.next)
currbit = head.val
total += currbit * (2**exp)
return (total, exp+1)
|
# Copyright (C) 2009 Duncan McGreggor <duncan@canonical.com>
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
# Copyright (C) 2012 New Dream Network, LLC (DreamHost)
# Licenced under the txaws licence available at /LICENSE in the txaws source.
__all__ = ["REGION_US", "REGION_EU", "EC2_US_EAST", "EC2_US_WEST",
"EC2_ASIA_PACIFIC", "EC2_EU_WEST", "EC2_SOUTH_AMERICA_EAST", "EC2_ALL_REGIONS"]
# These old EC2 variable names are maintained for backwards compatibility.
REGION_US = "US"
REGION_EU = "EU"
EC2_ENDPOINT_US = "https://us-east-1.ec2.amazonaws.com/"
EC2_ENDPOINT_EU = "https://eu-west-1.ec2.amazonaws.com/"
SQS_ENDPOINT_US = "https://sqs.us-east-1.amazonaws.com/"
# These are the new EC2 variables.
EC2_US_EAST = [
{"region": "US East (Northern Virginia) Region",
"endpoint": "https://ec2.us-east-1.amazonaws.com"}]
EC2_US_WEST = [
{"region": "US West (Oregon) Region",
"endpoint": "https://ec2.us-west-2.amazonaws.com"},
{"region": "US West (Northern California) Region",
"endpoint": "https://ec2.us-west-1.amazonaws.com"}]
EC2_US = EC2_US_EAST + EC2_US_WEST
EC2_ASIA_PACIFIC = [
{"region": "Asia Pacific (Singapore) Region",
"endpoint": "https://ec2.ap-southeast-1.amazonaws.com"},
{"region": "Asia Pacific (Tokyo) Region",
"endpoint": "https://ec2.ap-northeast-1.amazonaws.com"}]
EC2_EU_WEST = [
{"region": "EU (Ireland) Region",
"endpoint": "https://ec2.eu-west-1.amazonaws.com"}]
EC2_EU = EC2_EU_WEST
EC2_SOUTH_AMERICA_EAST = [
{"region": "South America (Sao Paulo) Region",
"endpoint": "https://ec2.sa-east-1.amazonaws.com"}]
EC2_SOUTH_AMERICA = EC2_SOUTH_AMERICA_EAST
EC2_ALL_REGIONS = EC2_US + EC2_ASIA_PACIFIC + EC2_EU + EC2_SOUTH_AMERICA
# This old S3 variable is maintained for backwards compatibility.
S3_ENDPOINT = "https://s3.amazonaws.com/"
# These are the new S3 variables.
S3_US_DEFAULT = [
{"region": "US Standard *",
"endpoint": "https://s3.amazonaws.com"}]
S3_US_WEST = [
{"region": "US West (Oregon) Region",
"endpoint": "https://s3-us-west-2.amazonaws.com"},
{"region": "US West (Northern California) Region",
"endpoint": "https://s3-us-west-1.amazonaws.com"}]
S3_ASIA_PACIFIC = [
{"region": "Asia Pacific (Singapore) Region",
"endpoint": "https://s3-ap-southeast-1.amazonaws.com"},
{"region": "Asia Pacific (Tokyo) Region",
"endpoint": "https://s3-ap-northeast-1.amazonaws.com"}]
S3_US = S3_US_DEFAULT + S3_US_WEST
S3_EU_WEST = [
{"region": "EU (Ireland) Region",
"endpoint": "https://s3-eu-west-1.amazonaws.com"}]
S3_EU = S3_EU_WEST
S3_SOUTH_AMERICA_EAST = [
{"region": "South America (Sao Paulo) Region",
"endpoint": "s3-sa-east-1.amazonaws.com"}]
S3_SOUTH_AMERICA = S3_SOUTH_AMERICA_EAST
S3_ALL_REGIONS = S3_US + S3_ASIA_PACIFIC + S3_EU + S3_SOUTH_AMERICA
|
"""
224. Basic Calculator
Example 1:
Input: "1 + 1"
Output: 2
Example 2:
Input: " 2-1 + 2 "
Output: 3
Example 3:
Input: "(1+(4+5+2)-3)+(6+8)"
Output: 23
"""
class Solution:
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
self.stack = []
i, res, n, sign = 0, 0, len(s), 1
while i < n:
if s[i] == '+' or s[i] == '-':
sign = 1 if s[i] == '+' else -1
elif s[i] == '(':
self.stack.append(res)
self.stack.append(sign)
sign, res = 1, 0
elif s[i] == ')':
res = self.stack.pop()*res
res += self.stack.pop()
elif s[i].isdigit():
val = 0
while i< n and s[i].isdigit():
val = val*10 + int(s[i])
i+=1
res += sign*val
i-=1
i+=1
return res
class Solution:
def calculate(self, s):
total = 0
i, signs, n = 0, [1,1], len(s)
while i < n:
if s[i].isdigit():
start = i
while i<n and s[i].isdigit():
i+=1
total += signs.pop()*int(s[start:i])
continue
if s[i] in '+-(':
signs.append(signs[-1]*(1,-1)[s[i] == '-'])
elif s[i] == ')':
signs.pop()
i += 1
return total |
n = int(input())
for i in range(0, n):
line = input()
b, p = line.split()
b = int(b)
p = float(p)
calc = (60 * b) / p
var = 60 / p
min = calc - var
max = calc + var
print(min, calc, max) |
a = int(input('First number'))
b = int(input('Second number'))
if a>b:
print(a)
else:
print(b)
|
'''
QUESTÃO 1:
Desenvolva um gerador de tabuada, capaz de gerar a tabuada de qualquer número
inteiro entre 1 a 10. O usuário deve informar de qual numero ele deseja ver a tabuada.
A saída deve ser conforme o exemplo abaixo:
Tabuada de 5:
5 X 1 = 5
5 X 2 = 10
5 X 3 =30
...
5 X 10 = 50
Obs: A entrada só deve aceitar de 1 a 10, se for um numero diferente perguntar
novamente até que seja dentro do intervalo correto.
'''
num = int(input("Informe um número inteiro, de 1 a 10: "))
while num < 1 or num > 10:
num = int(input("Informe um número inteiro válido, de 1 a 10: "))
print("Tabuada de {}:".format(num))
for i in range(1, 11):
print("{} X {} = {}".format(num, i, num*i))
|
# Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
async def test_should_clear_cookies(context, page, server):
await page.goto(server.EMPTY_PAGE)
await context.addCookies(
[{"url": server.EMPTY_PAGE, "name": "cookie1", "value": "1"}]
)
assert await page.evaluate("document.cookie") == "cookie1=1"
await context.clearCookies()
assert await context.cookies() == []
await page.reload()
assert await page.evaluate("document.cookie") == ""
async def test_should_isolate_cookies_when_clearing(context, server, browser):
another_context = await browser.newContext()
await context.addCookies(
[{"url": server.EMPTY_PAGE, "name": "page1cookie", "value": "page1value"}]
)
await another_context.addCookies(
[{"url": server.EMPTY_PAGE, "name": "page2cookie", "value": "page2value"}]
)
assert len(await context.cookies()) == 1
assert len(await another_context.cookies()) == 1
await context.clearCookies()
assert len(await context.cookies()) == 0
assert len(await another_context.cookies()) == 1
await another_context.clearCookies()
assert len(await context.cookies()) == 0
assert len(await another_context.cookies()) == 0
await another_context.close()
|
"""
RedPocket Exceptions
"""
class RedPocketException(Exception):
"""Base API Exception"""
def __init__(self, message: str = ""):
self.message = message
class RedPocketAuthError(RedPocketException):
"""Invalid Account Credentials"""
class RedPocketAPIError(RedPocketException):
"""Error returned from API Call"""
def __init__(self, message: str = "", return_code: int = -1):
super().__init__(message=message)
self.return_code = return_code
|
# pylint: disable=C0111
__all__ = ["test_dataset",
"test_label_smoother",
"test_noam_optimizer",
"test_tokenizer",
"test_transformer",
"test_transformer_data_batching",
"test_transformer_dataset",
"test_transformer_positional_encoder",
"test_vocabulary",
"test_word2vec",
"test_data",
"test_cnn"]
|
# range(): gera uma faixa de números
# range() com 1 argumento: gera uma lista de números
# que vai de zero até argumento - 1
for i in range(10):
print(i)
print('--------------------------------')
# range() com 2 argumentos: gera uma lista de números
# começando pelo primento argumento (inclusive) até
# o segundo argumento (exclusive)
for j in range(5, 15):
print(j)
print('--------------------------------')
# range() com três argumentos:
# 1º: limite inferior (inclusive)
# 2º: limite superior (exclusive)
# 3º: passo (de quanto em quanto a lista irá andar)
for k in range(1, 22, 3):
print(k)
print('--------------------------------')
for n in range(10, 0, -1):
print(n) |
distancia1: float; distancia2: float; distancia3: float; maiorD: float
print("Digite as tres distancias: ")
distancia1 = float(input())
distancia2 = float(input())
distancia3 = float(input())
if distancia1 > distancia2 and distancia1 > distancia3:
maiorD = distancia1
elif distancia2 > distancia3:
maiorD = distancia2
else:
maiorD = distancia3
print(f"MAIOR DISTANCIA = {maiorD:.2f}")
|
#
# PySNMP MIB module ALTIGA-GLOBAL-REG (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ALTIGA-GLOBAL-REG
# Produced by pysmi-0.3.4 at Wed May 1 11:21:16 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, ModuleIdentity, Bits, NotificationType, ObjectIdentity, TimeTicks, MibIdentifier, iso, Integer32, Counter32, Counter64, Unsigned32, IpAddress, enterprises, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "ModuleIdentity", "Bits", "NotificationType", "ObjectIdentity", "TimeTicks", "MibIdentifier", "iso", "Integer32", "Counter32", "Counter64", "Unsigned32", "IpAddress", "enterprises", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
altigaGlobalRegModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 1, 1, 1))
altigaGlobalRegModule.setRevisions(('2005-01-05 00:00', '2003-10-20 00:00', '2003-04-25 00:00', '2002-07-10 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: altigaGlobalRegModule.setRevisionsDescriptions(('Added the new MIB Modules(65 to 67)', 'Added the new MIB Modules(58 to 64)', 'Added the new MIB Modules(54 to 57)', 'Updated with new header',))
if mibBuilder.loadTexts: altigaGlobalRegModule.setLastUpdated('200501050000Z')
if mibBuilder.loadTexts: altigaGlobalRegModule.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: altigaGlobalRegModule.setContactInfo('Cisco Systems 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-cvpn3000@cisco.com')
if mibBuilder.loadTexts: altigaGlobalRegModule.setDescription('The Altiga Networks central registration module. Acronyms The following acronyms are used in this document: ACE: Access Control Encryption BwMgmt: Bandwidth Management CTCP: Cisco Transmission Control Protocol DHCP: Dynamic Host Configuration Protocol DNS: Domain Name Service FTP: File Transfer Protocol FW: Firewall HTTP: HyperText Transfer Protocol ICMP: Internet Control Message Protocol IKE: Internet Key Exchange IP: Internet Protocol LBSSF: Load Balance Secure Session Failover L2TP: Layer-2 Tunneling Protocol MIB: Management Information Base NAT: Network Address Translation NTP: Network Time Protocol PPP: Point-to-Point Protocol PPTP: Point-to-Point Tunneling Protocol SEP: Scalable Encryption Processor SNMP: Simple Network Management Protocol SSH: Secure Shell Protocol SSL: Secure Sockets Layer UDP: User Datagram Protocol VPN: Virtual Private Network NAC: Network Admission Control ')
altigaRoot = MibIdentifier((1, 3, 6, 1, 4, 1, 3076))
altigaReg = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1))
altigaModules = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1))
alGlobalRegModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 1))
alCapModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 2))
alMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 3))
alComplModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 4))
alVersionMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 6))
alAccessMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 7))
alEventMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 8))
alAuthMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 9))
alPptpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 10))
alPppMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 11))
alHttpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 12))
alIpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 13))
alFilterMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 14))
alUserMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 15))
alTelnetMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 16))
alFtpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 17))
alTftpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 18))
alSnmpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 19))
alIpSecMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 20))
alL2tpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 21))
alSessionMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 22))
alDnsMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 23))
alAddressMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 24))
alDhcpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 25))
alWatchdogMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 26))
alHardwareMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 27))
alNatMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 28))
alLan2LanMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 29))
alGeneralMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 30))
alSslMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 31))
alCertMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 32))
alNtpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 33))
alNetworkListMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 34))
alSepMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 35))
alIkeMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 36))
alSyncMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 37))
alT1E1MibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 38))
alMultiLinkMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 39))
alSshMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 40))
alLBSSFMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 41))
alDhcpServerMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 42))
alAutoUpdateMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 43))
alAdminAuthMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 44))
alPPPoEMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 45))
alXmlMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 46))
alCtcpMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 47))
alFwMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 48))
alGroupMatchMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 49))
alACEServerMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 50))
alNatTMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 51))
alBwMgmtMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 52))
alIpSecPreFragMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 53))
alFipsMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 54))
alBackupL2LMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 55))
alNotifyMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 56))
alRebootStatusMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 57))
alAuthorizationModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 58))
alWebPortalMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 59))
alWebEmailMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 60))
alPortForwardMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 61))
alRemoteServerMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 62))
alWebvpnAclMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 63))
alNbnsMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 64))
alSecureDesktopMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 65))
alSslTunnelClientMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 66))
alNacMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 1, 67))
altigaGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 2))
altigaProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 3))
altigaCaps = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 4))
altigaReqs = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 5))
altigaExpr = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 6))
altigaHw = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2))
altigaVpnHw = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1))
altigaVpnChassis = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1))
altigaVpnIntf = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 2))
altigaVpnEncrypt = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 3))
vpnConcentrator = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1))
vpnRemote = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 2))
vpnClient = MibIdentifier((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 3))
vpnConcentratorRev1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1, 1))
if mibBuilder.loadTexts: vpnConcentratorRev1.setStatus('current')
if mibBuilder.loadTexts: vpnConcentratorRev1.setDescription("The first revision of Altiga's VPN Concentrator hardware. 603e PPC processor. C10/15/20/30/50/60.")
vpnConcentratorRev2 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 1, 2))
if mibBuilder.loadTexts: vpnConcentratorRev2.setStatus('current')
if mibBuilder.loadTexts: vpnConcentratorRev2.setDescription("The second revision of Altiga's VPN Concentrator hardware. 740 PPC processor. C10/15/20/30/50/60.")
vpnRemoteRev1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 2, 1))
if mibBuilder.loadTexts: vpnRemoteRev1.setStatus('current')
if mibBuilder.loadTexts: vpnRemoteRev1.setDescription("The first revision of Altiga's VPN Concentrator (Remote) hardware. 8240 PPC processor.")
vpnClientRev1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 3076, 1, 2, 1, 1, 3, 1))
if mibBuilder.loadTexts: vpnClientRev1.setStatus('current')
if mibBuilder.loadTexts: vpnClientRev1.setDescription("The first revision of Altiga's VPN Hardware Client hardware. 8260 PPC processor.")
mibBuilder.exportSymbols("ALTIGA-GLOBAL-REG", PYSNMP_MODULE_ID=altigaGlobalRegModule, alNatTMibModule=alNatTMibModule, alWebEmailMibModule=alWebEmailMibModule, alEventMibModule=alEventMibModule, alPptpMibModule=alPptpMibModule, alAccessMibModule=alAccessMibModule, alDhcpMibModule=alDhcpMibModule, alIkeMibModule=alIkeMibModule, alHttpMibModule=alHttpMibModule, alSepMibModule=alSepMibModule, alMibModule=alMibModule, altigaVpnHw=altigaVpnHw, altigaExpr=altigaExpr, alHardwareMibModule=alHardwareMibModule, altigaGeneric=altigaGeneric, alRebootStatusMibModule=alRebootStatusMibModule, alSslMibModule=alSslMibModule, alVersionMibModule=alVersionMibModule, altigaVpnChassis=altigaVpnChassis, alSyncMibModule=alSyncMibModule, altigaHw=altigaHw, alPppMibModule=alPppMibModule, vpnRemote=vpnRemote, alGroupMatchMibModule=alGroupMatchMibModule, alNotifyMibModule=alNotifyMibModule, alCapModule=alCapModule, altigaReg=altigaReg, altigaRoot=altigaRoot, altigaReqs=altigaReqs, vpnClient=vpnClient, alIpSecPreFragMibModule=alIpSecPreFragMibModule, alL2tpMibModule=alL2tpMibModule, alAutoUpdateMibModule=alAutoUpdateMibModule, alSshMibModule=alSshMibModule, alSslTunnelClientMibModule=alSslTunnelClientMibModule, alAddressMibModule=alAddressMibModule, alLan2LanMibModule=alLan2LanMibModule, alSecureDesktopMibModule=alSecureDesktopMibModule, alDhcpServerMibModule=alDhcpServerMibModule, altigaVpnEncrypt=altigaVpnEncrypt, alPortForwardMibModule=alPortForwardMibModule, alT1E1MibModule=alT1E1MibModule, alAuthorizationModule=alAuthorizationModule, vpnRemoteRev1=vpnRemoteRev1, vpnConcentratorRev1=vpnConcentratorRev1, alFwMibModule=alFwMibModule, altigaProducts=altigaProducts, alPPPoEMibModule=alPPPoEMibModule, alFilterMibModule=alFilterMibModule, alCertMibModule=alCertMibModule, alTelnetMibModule=alTelnetMibModule, alGlobalRegModule=alGlobalRegModule, alWebPortalMibModule=alWebPortalMibModule, alNacMibModule=alNacMibModule, alCtcpMibModule=alCtcpMibModule, vpnClientRev1=vpnClientRev1, vpnConcentrator=vpnConcentrator, alGeneralMibModule=alGeneralMibModule, alAuthMibModule=alAuthMibModule, alACEServerMibModule=alACEServerMibModule, alNetworkListMibModule=alNetworkListMibModule, altigaCaps=altigaCaps, alWebvpnAclMibModule=alWebvpnAclMibModule, altigaVpnIntf=altigaVpnIntf, alSessionMibModule=alSessionMibModule, alIpSecMibModule=alIpSecMibModule, alFipsMibModule=alFipsMibModule, alTftpMibModule=alTftpMibModule, vpnConcentratorRev2=vpnConcentratorRev2, alSnmpMibModule=alSnmpMibModule, alFtpMibModule=alFtpMibModule, alBackupL2LMibModule=alBackupL2LMibModule, alAdminAuthMibModule=alAdminAuthMibModule, alXmlMibModule=alXmlMibModule, alLBSSFMibModule=alLBSSFMibModule, alWatchdogMibModule=alWatchdogMibModule, alDnsMibModule=alDnsMibModule, alBwMgmtMibModule=alBwMgmtMibModule, altigaModules=altigaModules, alMultiLinkMibModule=alMultiLinkMibModule, alNtpMibModule=alNtpMibModule, alNbnsMibModule=alNbnsMibModule, alRemoteServerMibModule=alRemoteServerMibModule, alNatMibModule=alNatMibModule, altigaGlobalRegModule=altigaGlobalRegModule, alComplModule=alComplModule, alIpMibModule=alIpMibModule, alUserMibModule=alUserMibModule)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is part of the nmeta2 suite
.
It defines a custom traffic classifier
.
To create your own custom classifier, copy this example to a new
file in the same directory and update the code as required.
Call it from nmeta by specifying the name of the file (without the
.py) in main_policy.yaml
.
Classifiers are called per packet, so performance is important
.
"""
class Classifier(object):
"""
A custom classifier module for import by nmeta2
"""
def __init__(self, logger):
"""
Initialise the classifier
"""
self.logger = logger
def classifier(self, flow):
"""
A really basic statistical classifier to demonstrate ability
to differentiate 'bandwidth hog' flows from ones that are
more interactive so that appropriate classification metadata
can be passed to QoS for differential treatment.
.
This method is passed a Flow class object that holds the
current context of the flow
.
It returns a dictionary specifying a key/value of QoS treatment to
take (or not if no classification determination made).
.
Only works on TCP.
"""
#*** Maximum packets to accumulate in a flow before making a
#*** classification:
_max_packets = 7
#*** Thresholds used in calculations:
_max_packet_size_threshold = 1200
_interpacket_ratio_threshold = 0.3
#*** Dictionary to hold classification results:
_results = {}
if flow.packet_count >= _max_packets and not flow.finalised:
#*** Reached our maximum packet count so do some classification:
self.logger.debug("Reached max packets count, finalising")
flow.finalised = 1
#*** Call functions to get statistics to make decisions on:
_max_packet_size = flow.max_packet_size()
_max_interpacket_interval = flow.max_interpacket_interval()
_min_interpacket_interval = flow.min_interpacket_interval()
#*** Avoid possible divide by zero error:
if _max_interpacket_interval and _min_interpacket_interval:
#*** Ratio between largest directional interpacket delta and
#*** smallest. Use a ratio as it accounts for base RTT:
_interpacket_ratio = float(_min_interpacket_interval) / \
float(_max_interpacket_interval)
else:
_interpacket_ratio = 0
self.logger.debug("max_packet_size=%s interpacket_ratio=%s",
_max_packet_size, _interpacket_ratio)
#*** Decide actions based on the statistics:
if (_max_packet_size > _max_packet_size_threshold and
_interpacket_ratio < _interpacket_ratio_threshold):
#*** This traffic looks like a bandwidth hog so constrain it:
_results['qos_treatment'] = 'constrained_bw'
else:
#*** Doesn't look like bandwidth hog so default priority:
_results['qos_treatment'] = 'default_priority'
self.logger.debug("Decided on results %s", _results)
return _results
|
# 请根据自己的理解编写计数算法,接收如下文字,输出其中每个字母出现的次数,以字典形式输出;(忽略大小写)
# 文字内容:
# Understanding object-oriented programming will help you see the world as a programmer does.
# It will help you really know your code, not just what is happening line by line, but also the bigger concepts behind it.
# Knowing the logic behind classes will train you to think logically so you can write programs that effectively address almost any problem you encounter.
print('Please input the content:')
value = input('Enter content:')
values = value.lower().replace('.', '').replace(',', '').replace(' ', '')
dic={'a':0,'b':0,'c':0,'d':0,'e':0,'f':0,'g':0,'h':0,'i':0,'j':0,'k':0,'l':0,'m':0,'n':0,'o':0,'p':0,'q':0,'r':0,'s':0,'t':0,'u':0,'v':0,'w':0,'x':0,'y':0,'z':0,}
for item in values:
if(item in dic):
dic[item]=dic[item]+1
for key,value in dic.items():
print('{0} 出现次数:{1:^2} 次'.format(key,value))
|
# Since any modulus should lay between 0 and 101, we can record all
# possible modulus at any given point in the calculation. The possible
# set of values of next step can be calculated using the previous set.
# Since there's guaranteed to be an answer, we will eventually make
# modulus 0 possible. We then backtrack to fill in all these operators.
N = int(input())
A = list(map(int, input().split()))
op = ['*'] * (N - 1)
possible = [[None] * 101 for i in range(N)]
possible[0][A[0]] = True
end = N - 1
for i in range(N - 1):
if possible[i][0]:
end = i
break
for x in range(101):
if possible[i][x]:
possible[i + 1][(x + A[i + 1]) % 101] = ('+', x)
possible[i + 1][(x + 101 - A[i + 1]) % 101] = ('-', x)
possible[i + 1][(x * A[i + 1]) % 101] = ('*', x)
x = 0
for i in range(end, 0, -1):
op[i - 1] = possible[i][x][0]
x = possible[i][x][1]
print(''.join(str(x) for t in zip(A, op) for x in t) + str(A[-1]))
|
# File: koodous_consts.py
#
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
PHANTOM_ERR_CODE_UNAVAILABLE = "Error code unavailable"
PHANTOM_ERR_MSG_UNAVAILABLE = "Unknown error occurred. Please check the asset configuration and|or action parameters."
VAULT_ERR_INVALID_VAULT_ID = "Invalid Vault ID"
VAULT_ERR_FILE_NOT_FOUND = "Vault file could not be found with supplied Vault ID"
KOODOUS_BASE_URL = 'https://api.koodous.com'
KOODOUS_SUCC_TEST_CONNECTIVITY = "Test connectivity passed"
KOODOUS_ERR_TEST_CONNECTIVITY = "Test Connectivity Failed"
KOODOUS_ERR_INVALID_ATTEMPT_PARAM = "Attempts must be integer number. Error: {0}"
KOODOUS_ERR_GET_REPORT_PARAMS = "Must specify either 'sha256' or 'vault_id'"
KOODOUS_ERR_UPLOADING_URL = "Error retrieving upload URL"
|
"""
Conditional expression Evaluated to one of two expressions depending on a boolean.
e.g: result = true_value if condition else false_value
"""
def sequence_class(immutable):
return tuple if immutable else list
seq = sequence_class(immutable=True)
t = seq("OrHasson")
print(t)
print(type(t))
|
def print_two(*args):
arg1, arg2 =args
print(f"arg1 : {arg1},arg2 : {arg2}")
def print_two_again(arg1,arg2):
print(f"arg1:{arg1},arg2:{arg2}")
def print_one(arg1):
print(f"arg1:{arg1}")
def print_none():
print("I got nothing")
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none()
|
'''
Provide transmission-daemon RPC credentials
'''
rpc_ip = ''
rpc_port = ''
rpc_username = ''
rpc_password = ''
|
''' Kattis - secretchamber
Without much execution time pressure along with nodes being characters, we opt to use python with a
dict of dicts as our adjacency matrix. This is basically just floyd warshall transitive closure.
Time: O(V^3), Mem: O(V^2)
'''
n, q = input().split()
n = int(n)
q = int(q)
edges = []
node_names = set()
for i in range(n):
u, v = input().split()
edges.append((u,v))
node_names.add(u)
node_names.add(v)
adjmat = {}
for i in node_names:
adjmat[i] = {}
for j in node_names:
adjmat[i][j] = 0
for u, v in edges:
adjmat[u][v] = 1
for k in node_names:
for i in node_names:
for j in node_names:
adjmat[i][j] |= adjmat[i][k] & adjmat[k][j]
for _ in range(q):
a, b = input().split()
if len(a) != len(b):
print("no")
continue
no = 0
for i in range(len(a)):
if (a[i] == b[i]):
continue
if not(a[i] in node_names and b[i] in node_names):
no = 1
break
if (adjmat[a[i]][b[i]] == 0):
no = 1
break
if no:
print("no")
else:
print("yes")
|
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
maria = Person("Maria Popova", 25)
print(hasattr(maria,"name"))
print(hasattr(maria,"surname"))
print(getattr(maria, "age"))
setattr(maria, "surname", "Popova")
print(getattr(maria, "surname"))
|
def spiral(steps):
dx = 1
dy = 0
dd = 1
x = 0
y = 0
d = 0
for _ in range(steps - 1):
x += dx
y += dy
d += 1
if d == dd:
d = 0
tmp = dx
dx = -dy
dy = tmp
if dy == 0:
dd += 1
yield x, y
def aoc(data):
*_, (x, y) = spiral(int(data))
return abs(x) + abs(y)
|
with open('do-plecaka.txt', 'r') as f:
dane = []
# getting and cleaning data
for line in f:
dane.append([int(x) for x in line.split()])
# printing
for x in dane:
print(x) |
#escreva um programa que pergunte o salario de um funcionario
#e calcule o valor do seu aumento.
#para salarios superiores a R$1.250,00
#calcule um aumento de 10%.
#para salarios inferiores ou iguais
#o aumento è de 15%
s=float(input("\nQual o salario que voce recebe? R$ "))
a=(15*s)/100
b=(10*s)/100
if s<=1250:
print("\nVoce tera de aumento 15%, acrescentando R$ {:.2f} em seu salario. O valor a ser pago sera de R$ {:.2f}.".format(a,s+a))
if s>1250:
print("\nVoce tera de aumento 10%, acrescentando R$ {:.2f} em seu salario. O valor a ser pago sera de R$ {:.2f}.".format(b,s+b))
s=float(input("\nQual o salario que voce recebe? R$ "))
if s<=1250:
novo= s + (s*15/100)
else:
novo= s + (s*10/100)
print("\nO valor a ser pago sera de R$ {:.2f}.".format(novo)) |
# Полуавтоматические тесты
def test_function(list_in):
...
# вход лист с числами и строкама
# выход лист с числами
...
list_temp = []
# i = 0
# while (type(list_in[i]) == int):
for i in range(len(list_in)):
if type(list_in[i])== int:
list_temp.append(list_in[i])
elif type(list_in[i]) == str:
if list_in[i].isdigit(): list_temp.append(int(list_in[i]))
# i += 1
return list_temp
#
# list_temp = [1,2,3,'abc']
#
# print(test_function(list_temp))
# теперь пишем полуавтоматическую фун-ю
def function_test():
list_temp = [1,2,3,'abc']
list_out = test_function(list_temp)
if list_out == [1,2,3]:
print('TEST 1 IS OK')
else:
print('TEST 1 IS FAILED')
list_temp = [1, 2, 3, 'abc', 4]
list_out = test_function(list_temp)
if list_out == [1, 2,3,4]:
print('TEST 2 IS OK')
else:
print('TEST 2 IS FAILED')
list_temp = [1, 2, 3,'5', 'abc', 4]
list_out = test_function(list_temp)
if list_out == [1, 2, 3, 5, 4]:
print('TEST 3 IS OK')
else:
print('TEST 3 IS FAILED')
function_test()
list_temp = [1, 2, 3,'5', 'abc', 4]
list_out = test_function(list_temp)
print(list_out)
|
# 大号的T恤
def make_shirt(size, font='I love; Python'):
print('size of shirt is ' + size)
print('font of shirt is ' + font)
make_shirt('L')
make_shirt('M')
make_shirt('S', 'hello')
|
# measurements in inches
ball_radius = 3
goal_top = 50
goal_width = 58
goal_half = 29
angle_threshold = .1
class L_params(object):
horizontal_offset = 14.5
vertical_offset = 18.5
min_y = ball_radius - vertical_offset+3 # in robot coords
max_y = goal_top - vertical_offset
min_x = -14.5
max_x = 14.0
l1 = 11
l2 = 11
shoulder_offset = -60
elbow_offset = 0
angle_threshold = angle_threshold
class R_params(object):
horizontal_offset = 43.5
vertical_offset = 18.5
min_y = ball_radius - vertical_offset+2 # in robot coords
max_y = goal_top - vertical_offset
min_x = -14.0
max_x = 14.5
l1 = 11
l2 = 11
shoulder_offset = 0
elbow_offset = 0
angle_threshold = angle_threshold
left_arm = L_params()
right_arm = R_params()
windows_port = "COM8"
unix_port = "/dev/tty.usbserial-A4012B2H"
ubuntu_port = "/dev/ttyUSB0"
num_servos = 4
servo_speed = 500
baudrate = 400000
|
class Solution:
def numDecodings(self, s: str) -> int:
if s[0] == '0' or '00' in s:
return 0
for idx, _ in enumerate(s):
if idx == 0:
pre, cur = 1, 1
else:
tmp = cur
if _ != '0':
if s[idx - 1] == '0':
cur = tmp
pre = tmp
elif 0 < int(s[idx - 1] + _) < 27:
cur = pre + tmp
pre = tmp
else:
cur = tmp
pre = tmp
else:
if s[idx - 1] > '2':
return 0
else:
cur = pre
pre = tmp
return cur
|
def find_skew_value(text):
length_of_text = len(text)
skew_value = 0
skew_value_list = []
for i in range(0, length_of_text):
if text[i] == 'C':
skew_value = skew_value - 1
elif text[i] == 'G':
skew_value = skew_value + 1
skew_value_list.append(skew_value)
return text, skew_value_list
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
def do_something():
print("This is a hello from do_something().")
def test1():
# 没有 return 的函数,默认返回 None。
# None 如果放在 if 中,就是 False。
print(do_something())
# 根据闰年的定义:
# 年份应该是 4 的倍数;
# 年份能被 100 整除但不能被 400 整除的,不是闰年。
# 所以,相当于要在能被 4 整除的年份中,排除那些能被 100 整除却不能被 400 整除的年份。
def is_leap(year):
leap = False
if year % 4 == 0:
leap = True
if year % 100 == 0 and year % 400 != 0:
leap = False
return leap
# cpython/Lib/datetime.py
def _is_leap(year):
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def test2():
print('\ntest2')
for i in range(1, 100):
if is_leap(i) != _is_leap(i):
print(i, is_leap(i), _is_leap(i))
break
else:
print('is_leap is the same with _is_leap')
def fib_between(start, end):
r = []
a, b = 0, 1
while a < end:
if a >= start:
r.append(a)
a, b = b, a + b
return r
def test3():
print('\ntest3')
print(fib_between(100, 10000))
def be_careful(a, b):
a = 2
b[0] = "What?"
print('be_careful a =', a)
print('be_careful b =', b)
def test4():
print('\ntest4 ')
a = 1
b = [1, 2, 3]
be_careful(a, b)
print('a =', a)
print('b =', b)
def main():
test1()
test2()
test3()
test4()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 22 10:46:35 2019
@author: SPAD-FCS
"""
class correlations:
pass
def selectG(G, selection='average'):
"""
Return a selection of the autocorrelations
========== ===============================================================
Input Meaning
---------- ---------------------------------------------------------------
G Object with all autocorrelations, i.e. output of e.g.
FCS2CorrSplit
selection Default value 'average': select only the autocorrelations that
are averaged over multiple time traces.
E.g. if FCS2CorrSplit splits a time trace in 10 pieces,
calculates G for each trace and then calculates the average G,
all autocorrelations are stored in G. This function removes all
of them except for the average G.
========== ===============================================================
========== ===============================================================
Output Meaning
---------- ---------------------------------------------------------------
G Autocorrelation object with only the pixel dwell time and the
average autocorrelations stored. All other autocorrelations are
removed.
========== ===============================================================
"""
# get all attributes of G
Glist = list(G.__dict__.keys())
if selection == 'average':
# make a new list containing only 'average' attributes
Glist2 = [s for s in Glist if "average" in s]
else:
Glist2 = Glist
# make a new object with the average attributes
Gout = correlations()
for i in Glist2:
setattr(Gout, i, getattr(G, i))
# add dwell time
Gout.dwellTime = G.dwellTime
return(Gout)
|
#!/usr/bin/env python3
etape = 1
compteur = 0
n = 0
while True:
print(f"{etape:4d} : {n:5d} { -2+(etape)*(etape+2):6d} ; ", end="")
for _ in range(3 + etape):
n += 1
print(n, end=" ")
compteur += 1
if compteur == 500000:
print(n)
exit()
print()
n += etape
etape += 1
|
class EPIconst:
class FeatureName:
pseknc = "pseknc"
cksnap = "cksnap"
dpcp = "dpcp"
eiip = "eiip"
kmer = "kmer"
tpcp = "tpcp"
all = sorted([pseknc, cksnap, dpcp, eiip, kmer, tpcp])
class CellName:
K562 = "K562"
NHEK = "NHEK"
IMR90 = "IMR90"
HeLa_S3 = "HeLa-S3"
HUVEC = "HUVEC"
GM12878 = "GM12878"
all = sorted([GM12878, HeLa_S3, HUVEC, IMR90, K562, NHEK])
class MethodName:
ensemble = "meta"
xgboost = "xgboost"
svm = "svm"
deepforest = "deepforest"
lightgbm = "lightgbm"
rf = "rf"
all = sorted([lightgbm, rf, xgboost, svm, deepforest])
class ModelInitParams:
logistic = {"n_jobs": 13, }
mlp = {}
deepforest = {"n_jobs": 13, "use_predictor": False, "random_state": 1, "predictor": 'forest', "verbose": 0}
lightgbm = {"n_jobs": 13, 'max_depth': -1, 'num_leaves': 31,
'min_child_samples': 20,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0,
'reg_alpha': 0.0, 'reg_lambda': 0.0,
'min_split_gain': 0.0,
'objective': None,
'n_estimators': 100, 'learning_rate': 0.1,
'device': 'gpu', 'boosting_type': 'gbdt',
'class_weight': None, 'importance_type': 'split',
'min_child_weight': 0.001, 'random_state': None,
'subsample_for_bin': 200000, 'silent': True}
rf = {"n_jobs": 13, 'n_estimators': 100, "max_depth": None, 'min_samples_split': 2, "min_samples_leaf": 1,
'max_features': 'auto'}
svm = {"probability": True}
xgboost = {'learning_rate': 0.1, 'n_estimators': 500, 'max_depth': 5, 'min_child_weight': 1, 'seed': 0,
'subsample': 0.8, 'colsample_bytree': 0.8, 'gamma': 0, 'reg_alpha': 0, 'reg_lambda': 1,
'use_label_encoder': False, 'eval_metric': 'logloss', 'tree_method': 'gpu_hist'}
class BaseModelParams:
GM12878_cksnap_deepforest = {"max_layers": 20, "n_estimators": 5, "n_trees": 250}
GM12878_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 125, 'min_child_samples': 90,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 250}
GM12878_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
GM12878_cksnap_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
GM12878_cksnap_rf = {'n_estimators': 340, 'max_depth': 114, 'min_samples_leaf': 3, 'min_samples_split': 2,
'max_features': 'sqrt'}
"----------------------------------------------"
GM12878_dpcp_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 300}
GM12878_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 331, 'max_bin': 135, 'min_child_samples': 190,
'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.9,
'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
GM12878_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'}
GM12878_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3,
'learning_rate': 0.1}
GM12878_dpcp_rf = {'n_estimators': 150, 'max_depth': 88, 'min_samples_leaf': 1, 'min_samples_split': 3,
'max_features': "sqrt"}
"----------------------------------------------"
GM12878_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 300}
GM12878_eiip_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
GM12878_eiip_rf = {'n_estimators': 280, 'max_depth': None, 'min_samples_leaf': 1, 'min_samples_split': 7,
'max_features': "sqrt"}
GM12878_eiip_svm = {'C': 1.0, 'gamma': 2048.0, 'kernel': 'rbf'}
GM12878_eiip_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
GM12878_kmer_deepforest = {'max_layers': 25, 'n_estimators': 5,
'n_trees': 400}
GM12878_kmer_lightgbm = {'max_depth': 12, 'num_leaves': 291, 'max_bin': 115, 'min_child_samples': 40,
'colsample_bytree': 1.0, 'subsample': 0.8, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
GM12878_kmer_rf = {'n_estimators': 170, 'max_depth': 41, 'min_samples_leaf': 3, 'min_samples_split': 2,
'max_features': 'sqrt'}
GM12878_kmer_svm = {'C': 2.0, 'gamma': 128.0,
'kernel': 'rbf'}
GM12878_kmer_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
GM12878_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400}
GM12878_pseknc_lightgbm = {'max_depth': 11, 'num_leaves': 291, 'max_bin': 185, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
GM12878_pseknc_rf = {'n_estimators': 250, 'max_depth': 41, 'min_samples_leaf': 2, 'min_samples_split': 6,
'max_features': 'log2'}
GM12878_pseknc_svm = {'C': 0.5, 'gamma': 1024.0, 'kernel': 'rbf'}
GM12878_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 1, 'gamma': 0.1,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.1}
"----------------------------------------------"
GM12878_tpcp_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 100}
GM12878_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 175, 'min_child_samples': 80,
'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 20, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
GM12878_tpcp_rf = {'n_estimators': 250, 'max_depth': 89, 'min_samples_leaf': 2, 'min_samples_split': 9,
'max_features': "log2"}
GM12878_tpcp_svm = {'C': 16.0, 'gamma': 64.0,
'kernel': 'rbf'}
GM12878_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
HeLa_S3_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 300}
HeLa_S3_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 105, 'min_child_samples': 80,
'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.1,
'reg_lambda': 0.1, 'min_split_gain': 0.4, 'learning_rate': 0.1, 'n_estimators': 150}
HeLa_S3_cksnap_svm = {'C': 128.0, 'gamma': 128.0,
'kernel': 'rbf'}
HeLa_S3_cksnap_rf = {'n_estimators': 340, 'max_depth': 44, 'min_samples_leaf': 1, 'min_samples_split': 5,
'max_features': 'sqrt'}
HeLa_S3_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 3, 'reg_lambda': 0.5,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 400}
HeLa_S3_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 221, 'max_bin': 155, 'min_child_samples': 180,
'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 200}
HeLa_S3_dpcp_rf = {'n_estimators': 70, 'max_depth': 32, 'min_samples_leaf': 1, 'min_samples_split': 8,
'max_features': 'sqrt'}
HeLa_S3_dpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'}
HeLa_S3_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5,
'n_trees': 200}
HeLa_S3_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 5, 'min_child_samples': 110,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100}
HeLa_S3_eiip_rf = {'n_estimators': 180, 'max_depth': 138, 'min_samples_leaf': 6, 'min_samples_split': 10,
'max_features': 'sqrt'}
HeLa_S3_eiip_svm = {'C': 2.0, 'gamma': 1024.0,
'kernel': 'rbf'}
HeLa_S3_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_kmer_deepforest = {'max_layers': 10, 'n_estimators': 5,
'n_trees': 200}
HeLa_S3_kmer_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 165, 'min_child_samples': 90,
'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 0.001,
'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125}
HeLa_S3_kmer_rf = {'n_estimators': 240, 'max_depth': 77, 'min_samples_leaf': 2, 'min_samples_split': 2,
'max_features': 'sqrt'}
HeLa_S3_kmer_svm = {'C': 8.0, 'gamma': 128.0,
'kernel': 'rbf'}
HeLa_S3_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 200}
HeLa_S3_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 261, 'max_bin': 25, 'min_child_samples': 90,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
HeLa_S3_pseknc_rf = {'n_estimators': 330, 'max_depth': 118, 'min_samples_leaf': 1, 'min_samples_split': 8,
'max_features': 'log2'}
HeLa_S3_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'}
HeLa_S3_pseknc_xgboost = {'n_estimators': 750, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.1, 'reg_lambda': 2,
'learning_rate': 0.1}
"----------------------------------------------"
HeLa_S3_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 250}
HeLa_S3_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 341, 'max_bin': 45, 'min_child_samples': 10,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 1e-05, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 250}
HeLa_S3_tpcp_rf = {'n_estimators': 320, 'max_depth': 99, 'min_samples_leaf': 1, 'min_samples_split': 10,
'max_features': 'sqrt'}
HeLa_S3_tpcp_svm = {'C': 4.0, 'gamma': 32.0,
'kernel': 'rbf'}
HeLa_S3_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
HUVEC_cksnap_deepforest = {"max_layers": 10, "n_estimators": 2,
"n_trees": 200}
HUVEC_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 271, 'max_bin': 45, 'min_child_samples': 10,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 0.5,
'reg_lambda': 0.5, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175}
HUVEC_cksnap_rf = {'n_estimators': 270, 'max_depth': 38, 'min_samples_leaf': 2, 'min_samples_split': 2,
'max_features': "auto"}
HUVEC_cksnap_svm = {'C': 8.0, 'gamma': 64.0, 'kernel': 'rbf'}
HUVEC_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2, "n_trees": 400}
HUVEC_dpcp_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 245, 'min_child_samples': 30,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.5,
'reg_lambda': 0.3, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
HUVEC_dpcp_rf = {'n_estimators': 300, 'max_depth': 61, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'log2'}
HUVEC_dpcp_svm = {'C': 4.0, 'gamma': 16.0, 'kernel': 'rbf'}
HUVEC_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 3, 'reg_lambda': 3,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 300}
HUVEC_eiip_lightgbm = {'max_depth': -1, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
HUVEC_eiip_rf = {'n_estimators': 310, 'max_depth': 28, 'min_samples_leaf': 1, 'min_samples_split': 2,
'max_features': 'sqrt'}
HUVEC_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'}
HUVEC_eiip_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 300}
HUVEC_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 5, 'min_child_samples': 170,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.5,
'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 125}
HUVEC_kmer_rf = {'n_estimators': 230, 'max_depth': 59, 'min_samples_leaf': 1, 'min_samples_split': 4,
'max_features': 'auto'}
HUVEC_kmer_svm = {'C': 4.0, 'gamma': 64.0,
'kernel': 'rbf'}
HUVEC_kmer_xgboost = {'n_estimators': 600, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 400}
HUVEC_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 115, 'min_child_samples': 190,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 175}
HUVEC_pseknc_rf = {'n_estimators': 310, 'max_depth': 42, 'min_samples_leaf': 2, 'min_samples_split': 7,
'max_features': 'sqrt'}
HUVEC_pseknc_svm = {'C': 1.0, 'gamma': 256.0, 'kernel': 'rbf'}
HUVEC_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
HUVEC_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 150}
HUVEC_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 251, 'max_bin': 35, 'min_child_samples': 190,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
HUVEC_tpcp_rf = {'n_estimators': 330, 'max_depth': 121, 'min_samples_leaf': 2, 'min_samples_split': 5,
'max_features': "sqrt"}
HUVEC_tpcp_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'}
HUVEC_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
IMR90_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 250}
IMR90_cksnap_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 95, 'min_child_samples': 60,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.3, 'learning_rate': 0.1, 'n_estimators': 225}
IMR90_cksnap_rf = {'n_estimators': 280, 'max_depth': 124, 'min_samples_leaf': 1, 'min_samples_split': 2,
'max_features': 'auto'}
IMR90_cksnap_svm = {'C': 16.0, 'gamma': 16.0, 'kernel': 'rbf'}
IMR90_cksnap_xgboost = {'n_estimators': 900, 'max_depth': 10, 'min_child_weight': 2, 'gamma': 0.4,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_dpcp_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 200}
IMR90_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 281, 'max_bin': 115, 'min_child_samples': 20,
'colsample_bytree': 0.7, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.5, 'learning_rate': 0.1, 'n_estimators': 125}
IMR90_dpcp_rf = {'n_estimators': 70, 'max_depth': 116, 'min_samples_leaf': 1, 'min_samples_split': 9,
'max_features': 'log2'}
IMR90_dpcp_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'}
IMR90_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.05, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_eiip_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 350}
IMR90_eiip_lightgbm = {'max_depth': 13, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 50,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 80, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.4, 'learning_rate': 0.2, 'n_estimators': 200}
IMR90_eiip_rf = {'n_estimators': 240, 'max_depth': 78, 'min_samples_leaf': 1, 'min_samples_split': 2,
'max_features': 'auto'}
IMR90_eiip_svm = {'C': 4.0, 'gamma': 512.0, 'kernel': 'rbf'}
IMR90_eiip_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 250}
IMR90_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 271, 'max_bin': 175, 'min_child_samples': 120,
'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 0.7,
'reg_lambda': 0.9, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
IMR90_kmer_rf = {'n_estimators': 280, 'max_depth': 79, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'auto'}
IMR90_kmer_svm = {'C': 2.0, 'gamma': 64.0,
'kernel': 'rbf'}
IMR90_kmer_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 2, 'gamma': 0.2,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300}
IMR90_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 15, 'min_child_samples': 50,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
IMR90_pseknc_rf = {'n_estimators': 240, 'max_depth': 96, 'min_samples_leaf': 3, 'min_samples_split': 4,
'max_features': 'auto'}
IMR90_pseknc_svm = {'C': 4.0, 'gamma': 1024.0,
'kernel': 'rbf'}
IMR90_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0.2,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
IMR90_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2, 'n_trees': 300}
IMR90_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 35, 'min_child_samples': 60,
'colsample_bytree': 0.6, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.5, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 100}
IMR90_tpcp_rf = {'n_estimators': 290, 'max_depth': 71, 'min_samples_leaf': 5, 'min_samples_split': 4,
'max_features': 'auto'}
IMR90_tpcp_svm = {'C': 1.0, 'gamma': 512.0, 'kernel': 'rbf'}
IMR90_tpcp_xgboost = {'n_estimators': 950, 'max_depth': 7, 'min_child_weight': 5, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.5,
'learning_rate': 0.1}
"=============================================="
K562_cksnap_deepforest = {"max_layers": 20, "n_estimators": 2, "n_trees": 400}
K562_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 311, 'max_bin': 225, 'min_child_samples': 60,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 250}
K562_cksnap_rf = {'n_estimators': 330, 'max_depth': 109, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'sqrt'}
K562_cksnap_svm = {'C': 16.0, 'gamma': 32.0, 'kernel': 'rbf'}
K562_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 10, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 0.05,
'learning_rate': 0.1}
"----------------------------------------------"
K562_dpcp_deepforest = {"max_layers": 10, "n_estimators": 2,
"n_trees": 150}
K562_dpcp_lightgbm = {'colsample_bytree': 0.7, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 1e-05,
'reg_lambda': 0.001, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 225}
K562_dpcp_rf = {'n_estimators': 240, 'max_depth': 127, 'min_samples_leaf': 1, 'min_samples_split': 6,
'max_features': 'sqrt'}
K562_dpcp_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'}
K562_dpcp_xgboost = {'n_estimators': 950, 'max_depth': 10, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.05,
'learning_rate': 0.1}
"----------------------------------------------"
K562_eiip_deepforest = {'max_layers': 10, 'n_estimators': 5,
'n_trees': 150}
K562_eiip_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 225, 'min_child_samples': 110,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.1, 'learning_rate': 0.1, 'n_estimators': 150}
K562_eiip_rf = {'n_estimators': 120, 'max_depth': 93, 'min_samples_leaf': 3, 'min_samples_split': 3,
'max_features': 'auto'}
K562_eiip_svm = {'C': 2.0, 'gamma': 1024.0, 'kernel': 'rbf'}
K562_eiip_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
K562_kmer_deepforest = {'max_layers': 15, 'n_estimators': 5,
'n_trees': 150}
K562_kmer_lightgbm = {'max_depth': 0, 'num_leaves': 321, 'max_bin': 5, 'min_child_samples': 70,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
K562_kmer_rf = {'n_estimators': 290, 'max_depth': 137, 'min_samples_leaf': 10, 'min_samples_split': 7,
'max_features': "auto"}
K562_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
K562_kmer_xgboost = {'n_estimators': 650, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.6, 'reg_alpha': 0.5, 'reg_lambda': 0,
'learning_rate': 0.1}
"----------------------------------------------"
K562_pseknc_deepforest = {'max_layers': 15, 'n_estimators': 2,
'n_trees': 300}
K562_pseknc_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 65, 'min_child_samples': 200,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
K562_pseknc_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 1, 'min_samples_split': 6,
'max_features': 'log2'}
K562_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'}
K562_pseknc_xgboost = {'n_estimators': 1000, 'max_depth': 8, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
K562_tpcp_deepforest = {'max_layers': 20, 'n_estimators': 2,
'n_trees': 300}
K562_tpcp_lightgbm = {'max_depth': -1, 'num_leaves': 241, 'max_bin': 105, 'min_child_samples': 130,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
K562_tpcp_rf = {'n_estimators': 280, 'max_depth': 143, 'min_samples_leaf': 5, 'min_samples_split': 2,
'max_features': 'sqrt'}
K562_tpcp_svm = {'C': 2.0, 'gamma': 64.0, 'kernel': 'rbf'}
K562_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 12, 'min_child_weight': 4, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 2, 'reg_lambda': 1,
'learning_rate': 0.1}
"=============================================="
NHEK_cksnap_deepforest = {"max_layers": 20, "n_estimators": 5, "n_trees": 400}
NHEK_cksnap_lightgbm = {'max_depth': -1, 'num_leaves': 291, 'max_bin': 205, 'min_child_samples': 90,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 75}
NHEK_cksnap_rf = {'n_estimators': 300, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 3,
'max_features': 'auto'}
NHEK_cksnap_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
NHEK_cksnap_xgboost = {'n_estimators': 1000, 'max_depth': 5, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_dpcp_deepforest = {"max_layers": 10, "n_estimators": 8, "n_trees": 200}
NHEK_dpcp_lightgbm = {'max_depth': 0, 'num_leaves': 301, 'max_bin': 145, 'min_child_samples': 70,
'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.9,
'reg_lambda': 1.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 150}
NHEK_dpcp_rf = {'n_estimators': 300, 'max_depth': 138, 'min_samples_leaf': 1, 'min_samples_split': 5,
'max_features': 'auto'}
NHEK_dpcp_svm = {'C': 8.0, 'gamma': 16.0, 'kernel': 'rbf'}
NHEK_dpcp_xgboost = {'n_estimators': 1000, 'max_depth': 9, 'min_child_weight': 3, 'gamma': 0.5,
'colsample_bytree': 0.7, 'subsample': 0.7, 'reg_alpha': 0, 'reg_lambda': 1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_eiip_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 100}
NHEK_eiip_lightgbm = {'max_depth': 11, 'num_leaves': 231, 'max_bin': 255, 'min_child_samples': 70,
'colsample_bytree': 1.0, 'subsample': 0.6, 'subsample_freq': 0, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 100}
NHEK_eiip_rf = {'n_estimators': 230, 'max_depth': 56, 'min_samples_leaf': 2, 'min_samples_split': 6,
'max_features': 'log2'}
NHEK_eiip_svm = {'C': 8.0, 'gamma': 512.0, 'kernel': 'rbf'}
NHEK_eiip_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_kmer_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 200}
NHEK_kmer_lightgbm = {'max_depth': 13, 'num_leaves': 261, 'max_bin': 115, 'min_child_samples': 60,
'colsample_bytree': 0.9, 'subsample': 0.9, 'subsample_freq': 40, 'reg_alpha': 0.0,
'reg_lambda': 0.001, 'min_split_gain': 1.0, 'learning_rate': 0.1, 'n_estimators': 150}
NHEK_kmer_rf = {'n_estimators': 60, 'max_depth': 117, 'min_samples_leaf': 3, 'min_samples_split': 3,
'max_features': "auto"}
NHEK_kmer_svm = {'C': 4.0, 'gamma': 64.0, 'kernel': 'rbf'}
NHEK_kmer_xgboost = {'n_estimators': 850, 'max_depth': 9, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 1, 'reg_lambda': 0.1,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_pseknc_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 150}
NHEK_pseknc_lightgbm = {'max_depth': 12, 'num_leaves': 271, 'max_bin': 155, 'min_child_samples': 20,
'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 60, 'reg_alpha': 0.1,
'reg_lambda': 1e-05, 'min_split_gain': 0.7, 'learning_rate': 0.1, 'n_estimators': 75}
NHEK_pseknc_rf = {'n_estimators': 190, 'max_depth': 85, 'min_samples_leaf': 1, 'min_samples_split': 10,
'max_features': 'auto'}
NHEK_pseknc_svm = {'C': 0.5, 'gamma': 512.0, 'kernel': 'rbf'}
NHEK_pseknc_xgboost = {'n_estimators': 950, 'max_depth': 6, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0.1, 'reg_lambda': 3,
'learning_rate': 0.1}
"----------------------------------------------"
NHEK_tpcp_deepforest = {'max_layers': 10, 'n_estimators': 2,
'n_trees': 200}
NHEK_tpcp_lightgbm = {'max_depth': 0, 'num_leaves': 241, 'max_bin': 15, 'min_child_samples': 90,
'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 40, 'reg_alpha': 0.001,
'reg_lambda': 0.001, 'min_split_gain': 0.2, 'learning_rate': 0.1, 'n_estimators': 100}
NHEK_tpcp_rf = {'n_estimators': 120, 'max_depth': 115, 'min_samples_leaf': 1, 'min_samples_split': 4,
'max_features': 'auto'}
NHEK_tpcp_svm = {'C': 1.0, 'gamma': 128.0, 'kernel': 'rbf'}
NHEK_tpcp_xgboost = {'n_estimators': 1000, 'max_depth': 7, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.8, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.01,
'learning_rate': 0.1}
class MetaModelParams:
################# GM12878 ######################
GM12878_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'identity', 'hidden_layer_sizes': 32}
GM12878_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'identity', 'hidden_layer_sizes': 8}
GM12878_6f5m_prob_logistic = {'C': 2.900000000000001}
GM12878_4f2m_prob_logistic = {'C': 0.9000000000000001}
GM12878_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400}
GM12878_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 200}
GM12878_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 55, 'min_child_samples': 200,
'colsample_bytree': 0.7, 'subsample': 0.8, 'subsample_freq': 30, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 50}
GM12878_4f2m_prob_lightgbm = {'max_depth': 11, 'num_leaves': 311, 'max_bin': 85, 'min_child_samples': 150,
'colsample_bytree': 0.8, 'subsample': 1.0, 'subsample_freq': 50, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 75}
GM12878_6f5m_prob_rf = {'n_estimators': 250, 'max_depth': 50, 'min_samples_leaf': 9, 'min_samples_split': 5,
'max_features': 'auto'}
GM12878_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 53, 'min_samples_leaf': 6, 'min_samples_split': 7,
'max_features': 'log2'}
GM12878_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'}
GM12878_4f2m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'}
GM12878_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.1}
GM12878_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.05}
################# HeLa_S3 ######################
HeLa_S3_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'relu', 'hidden_layer_sizes': 32}
HeLa_S3_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'relu', 'hidden_layer_sizes': (16, 32)}
HeLa_S3_6f5m_prob_logistic = {'C': 1.9000000000000004}
HeLa_S3_4f2m_prob_logistic = {'C': 0.5000000000000001}
HeLa_S3_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 10, 'n_trees': 400}
HeLa_S3_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400}
HeLa_S3_6f5m_prob_lightgbm = {'max_depth': 5, 'num_leaves': 281, 'max_bin': 175, 'min_child_samples': 180,
'colsample_bytree': 1.0, 'subsample': 0.7, 'subsample_freq': 80, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2,
'n_estimators': 150}
HeLa_S3_4f2m_prob_lightgbm = {'max_depth': 3, 'num_leaves': 311, 'max_bin': 35, 'min_child_samples': 20,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 70, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 1.0,
'n_estimators': 125}
HeLa_S3_6f5m_prob_rf = {'n_estimators': 130, 'max_depth': 20, 'min_samples_leaf': 2, 'min_samples_split': 3,
'max_features': 'sqrt'}
HeLa_S3_4f2m_prob_rf = {'n_estimators': 210, 'max_depth': 117, 'min_samples_leaf': 2, 'min_samples_split': 5,
'max_features': 'auto'}
HeLa_S3_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'}
HeLa_S3_4f2m_prob_svm = {'C': 0.25, 'gamma': 0.0625, 'kernel': 'rbf'}
HeLa_S3_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.7, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05,
'learning_rate': 0.1}
HeLa_S3_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.05,
'learning_rate': 0.1}
################# HUVEC ########################
HUVEC_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'relu', 'hidden_layer_sizes': 8}
HUVEC_4f2m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'tanh', 'hidden_layer_sizes': (8, 16)}
HUVEC_6f5m_prob_logistic = {'C': 2.900000000000001}
HUVEC_4f2m_prob_logistic = {'C': 0.9000000000000001}
HUVEC_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 250}
HUVEC_4f2m_prob_deepforest = {'max_layers': 15, 'n_estimators': 13, 'n_trees': 400}
HUVEC_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 311, 'max_bin': 45, 'min_child_samples': 170,
'colsample_bytree': 0.7, 'subsample': 0.6, 'subsample_freq': 10, 'reg_alpha': 0.0,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.1,
'n_estimators': 100}
HUVEC_4f2m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 261, 'max_bin': 45, 'min_child_samples': 180,
'colsample_bytree': 0.9, 'subsample': 0.8, 'subsample_freq': 10, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 200}
HUVEC_6f5m_prob_rf = {'n_estimators': 290, 'max_depth': 105, 'min_samples_leaf': 5, 'min_samples_split': 2,
'max_features': 'log2'}
HUVEC_4f2m_prob_rf = {'n_estimators': 140, 'max_depth': 76, 'min_samples_leaf': 3, 'min_samples_split': 2,
'max_features': 'log2'}
HUVEC_6f5m_prob_svm = {'C': 0.125, 'gamma': 0.0625, 'kernel': 'rbf'}
HUVEC_4f2m_prob_svm = {'C': 1.0, 'gamma': 64.0, 'kernel': 'rbf'}
HUVEC_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.01, 'reg_lambda': 0.02,
'learning_rate': 0.05}
HUVEC_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0.05, 'reg_lambda': 0.02,
'learning_rate': 0.01}
################# IMR90 ########################
IMR90_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'identity', 'hidden_layer_sizes': (16, 32)}
IMR90_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 5e-06, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'tanh', 'hidden_layer_sizes': (8, 16)}
IMR90_6f5m_prob_logistic = {'C': 2.5000000000000004}
IMR90_4f2m_prob_logistic = {'C': 2.5000000000000004}
IMR90_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 8, 'n_trees': 300}
IMR90_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 200}
IMR90_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 341, 'max_bin': 85, 'min_child_samples': 70,
'colsample_bytree': 0.9, 'subsample': 1.0, 'subsample_freq': 40, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 250}
IMR90_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 321, 'max_bin': 55, 'min_child_samples': 60,
'colsample_bytree': 0.7, 'subsample': 0.9, 'subsample_freq': 30, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.2, 'n_estimators': 175}
IMR90_6f5m_prob_rf = {'n_estimators': 340, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 3,
'max_features': 'log2'}
IMR90_4f2m_prob_rf = {'n_estimators': 270, 'max_depth': 120, 'min_samples_leaf': 10, 'min_samples_split': 7,
'max_features': 'log2'}
IMR90_6f5m_prob_svm = {'C': 1.0, 'gamma': 32.0, 'kernel': 'rbf'}
IMR90_4f2m_prob_svm = {'C': 2.0, 'gamma': 32.0, 'kernel': 'rbf'}
IMR90_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.9, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.05}
IMR90_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.07}
################# K562 #########################
K562_6f5m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'logistic', 'hidden_layer_sizes': (8, 16)}
K562_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'tanh', 'hidden_layer_sizes': 8}
K562_6f5m_prob_logistic = {'C': 2.900000000000001}
K562_4f2m_prob_logistic = {'C': 0.1}
K562_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 400}
K562_4f2m_prob_deepforest = {'max_layers': 10, 'n_estimators': 5, 'n_trees': 300}
K562_6f5m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 301, 'max_bin': 65, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 1.0, 'subsample_freq': 30, 'reg_alpha': 1e-05,
'reg_lambda': 1e-05, 'min_split_gain': 0.0, 'learning_rate': 0.07,
'n_estimators': 75}
K562_4f2m_prob_lightgbm = {'max_depth': 13, 'num_leaves': 281, 'max_bin': 25, 'min_child_samples': 80,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.75, 'n_estimators': 175}
K562_6f5m_prob_rf = {'n_estimators': 180, 'max_depth': 35, 'min_samples_leaf': 7, 'min_samples_split': 5,
'max_features': 'log2'}
K562_4f2m_prob_rf = {'n_estimators': 80, 'max_depth': 130, 'min_samples_leaf': 6, 'min_samples_split': 5,
'max_features': 'log2'}
K562_6f5m_prob_svm = {'C': 0.5, 'gamma': 0.0625, 'kernel': 'rbf'}
K562_4f2m_prob_svm = {'C': 1.0, 'gamma': 0.0625, 'kernel': 'rbf'}
K562_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 6, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.1}
K562_4f2m_prob_xgboost = {'n_estimators': 50, 'max_depth': 3, 'min_child_weight': 3, 'gamma': 0,
'colsample_bytree': 0.6, 'subsample': 0.6, 'reg_alpha': 0, 'reg_lambda': 0.01,
'learning_rate': 0.01}
################# NHEK #########################
NHEK_6f5m_prob_mlp = {'batch_size': 128, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'lbfgs',
'activation': 'identity', 'hidden_layer_sizes': 32}
NHEK_4f2m_prob_mlp = {'batch_size': 64, 'learning_rate_init': 0.0001, 'max_iter': 300, 'solver': 'sgd',
'activation': 'relu', 'hidden_layer_sizes': (16, 32)}
NHEK_6f5m_prob_logistic = {'C': 0.9000000000000001}
NHEK_4f2m_prob_logistic = {'C': 0.1}
NHEK_6f5m_prob_deepforest = {'max_layers': 10, 'n_estimators': 13, 'n_trees': 50}
NHEK_4f2m_prob_deepforest = {'max_layers': 20, 'n_estimators': 10, 'n_trees': 50}
NHEK_6f5m_prob_lightgbm = {'max_depth': 0, 'num_leaves': 291, 'max_bin': 45, 'min_child_samples': 140,
'colsample_bytree': 1.0, 'subsample': 0.9, 'subsample_freq': 70, 'reg_alpha': 1.0,
'reg_lambda': 0.7, 'min_split_gain': 0.0, 'learning_rate': 0.1, 'n_estimators': 200}
NHEK_4f2m_prob_lightgbm = {'max_depth': -1, 'num_leaves': 331, 'max_bin': 35, 'min_child_samples': 100,
'colsample_bytree': 0.8, 'subsample': 0.9, 'subsample_freq': 60, 'reg_alpha': 0.0,
'reg_lambda': 0.0, 'min_split_gain': 0.0, 'learning_rate': 0.07, 'n_estimators': 100}
NHEK_6f5m_prob_rf = {'n_estimators': 70, 'max_depth': 106, 'min_samples_leaf': 10, 'min_samples_split': 9,
'max_features': 'log2'}
NHEK_4f2m_prob_rf = {'n_estimators': 130, 'max_depth': 9, 'min_samples_leaf': 7, 'min_samples_split': 4,
'max_features': 'sqrt'}
NHEK_6f5m_prob_svm = {'C': 0.0625, 'gamma': 0.0625, 'kernel': 'rbf'}
NHEK_4f2m_prob_svm = {'C': 2.0, 'gamma': 16.0, 'kernel': 'rbf'}
NHEK_6f5m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 1, 'gamma': 0,
'colsample_bytree': 0.9, 'subsample': 0.8, 'reg_alpha': 0, 'reg_lambda': 0,
'learning_rate': 0.07}
NHEK_4f2m_prob_xgboost = {'n_estimators': 100, 'max_depth': 3, 'min_child_weight': 2, 'gamma': 0.4,
'colsample_bytree': 0.6, 'subsample': 0.7, 'reg_alpha': 0.05, 'reg_lambda': 1,
'learning_rate': 1.0}
if __name__ == '_main_':
print(getattr(EPIconst.BaseModelParams, "NHEK_tpcp_deepforest"))
|
# Functions can encapsulate functionality you want to reuse:
def even_odd(x):
if x % 2 == 0:
print("even")
else:
print("odd")
# reused
even_odd(2)
even_odd(4)
even_odd(7)
even_odd(22)
even_odd(8)
# output should be >>> even, even, odd, even, even
|
# Let A[1..n] be an array of integers. For all i from 1 to n find a subarray with maximum sum that
# covers the position i (more formally, for every i, find the largest value A[l] + A[l + 1] + · · · + A[r]
# among all pairs of indices l and r such that 1 ≤ l ≤ i ≤ r ≤ n).
# Input
# The first line contains an integer n (1 ≤ n ≤ 100 000), the number of elements in A.
# The second line contains integers A[1],A[2],...,A[n] (−106 ≤ A[i] ≤ 106).
# Output
# Print n integers separated by spaces. The i-th of them should be equal to the maximal sum of
# subarray among all that cover the position i in A.
#THIS IS WRONG PROGRAM. BELOW IS SOLUTION OF MAX SUM SUB ARRAY PROBLEM
def main(a):
curr_sum=a[0]
max_sum=a[0]
for i in range(1,len(a)):
curr_sum=max(curr_sum+a[i],a[i])
max_sum = max(max_sum, curr_sum)
return max_sum
if __name__ == "__main__":
a=[-2,1,-3,4,-1,5,1,-5,4]
# b, index=naive(a)
# print(max(b),index[b.index(max(b))])
# print(b)
# print(index[27], b[27])
print(main(a)) |
# -*- coding: utf-8 -*-
# __author__ = xiaobao
# __date__ = 2019/12/28 15:41:18
# desc: desc
# 给定一个由 '1'(陆地)和 '0'(水)组成的的二维网格,计算岛屿的数量。一个岛被水包围,并且它是通过水平方向或垂直方向上相邻的陆地连接而成的。你可以假设网格的四个边均被水包围。
# 示例 1:
# 输入:
# 11110
# 11010
# 11000
# 00000
# 输出: 1
# 示例 2:
# 输入:
# 11000
# 11000
# 00100
# 00011
# 输出: 3
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/number-of-islands
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
# 思路
# 先从一个之前未访问过的点开始,广度遍历,直到没有遍历完,那么这些点是属于一个点
# 然后再找下一个未访问过的点
# 代码
# 广度遍历
# class Solution:
# def numIslands(self, grid) -> int:
# nRow = len(grid)
# if nRow == 0:
# return 0
# nCol = len(grid[0])
# gridTaken = []
# for i in range(nRow):
# gridTaken.append([0] * nCol)
# listQueue = []
# nIslandCount = 0
# for i, RowData in enumerate(grid):
# for j, nValue in enumerate(RowData):
# if nValue == "1" and gridTaken[i][j] == 0:
# nIslandCount += 1
# gridTaken[i][j] = 1
# listQueue.append((i, j))
# while listQueue:
# nX, nY = listQueue.pop(0)
# nNextX, nNextY = nX + 1, nY + 1
# nPreX, nPreY = nX - 1, nY - 1
# if nNextX < nRow and grid[nNextX][nY] == "1" and gridTaken[nNextX][nY] == 0:
# gridTaken[nNextX][nY] = 1
# listQueue.append((nNextX, nY))
# if nNextY < nCol and grid[nX][nNextY] == "1" and gridTaken[nX][nNextY] == 0:
# gridTaken[nX][nNextY] = 1
# listQueue.append((nX, nNextY))
# if nPreX >=0 and grid[nPreX][nY] == "1" and gridTaken[nPreX][nY] == 0:
# gridTaken[nPreX][nY] = 1
# listQueue.append((nPreX, nY))
# if nPreY >= 0 and grid[nX][nPreY] == "1" and gridTaken[nX][nPreY] == 0:
# gridTaken[nX][nPreY] = 1
# listQueue.append((nX, nPreY))
# return nIslandCount
# 深度遍历,基于调用栈,先序遍历
class Solution:
def numIslands(self, grid) -> int:
nRow = len(grid)
if nRow == 0:
return 0
nCol = len(grid[0])
gridTaken = []
for _ in range(nRow):
gridTaken.append([0] * nCol)
nIslandsCount = 0
for i, RowData in enumerate(grid):
for j, nValue in enumerate(RowData):
if nValue == "1" and gridTaken[i][j] == 0:
nIslandsCount += 1
self.DFS(i,j,grid, gridTaken, nRow, nCol)
return nIslandsCount
def DFS(self, i, j, grid, gridTaken, nRow, nCol):
gridTaken[i][j] = 1
listNextPos = [(i+1,j), (i,j+1), (i-1,j), (i,j-1)]
for nextPos in listNextPos:
nextX, nextY = nextPos
if 0 <= nextX < nRow and 0 <= nextY < nCol and grid[nextX][nextY] == "1" and gridTaken[nextX][nextY] == 0:
self.DFS(nextX, nextY, grid, gridTaken, nRow, nCol)
# 深度遍历,基于栈
# class Solution:
# def numIslands(self, grid) -> int:
# pass
# 边界
solution = Solution()
# 0X0
assert(solution.numIslands([[]]) == 0)
# 1X1
assert(solution.numIslands([["1"]]) == 1)
assert(solution.numIslands([["0"]]) == 0)
# 1X2
assert(solution.numIslands([["1", "0"]]) == 1)
assert(solution.numIslands([["1", "1"]]) == 1)
assert(solution.numIslands([["0", "1"]]) == 1)
assert(solution.numIslands([["0", "0"]]) == 0)
# 2X2
assert(solution.numIslands([["1", "1"], ["1", "1"]]) == 1)
assert(solution.numIslands([["1", "0"], ["0", "1"]]) == 2)
# 其它
assert(solution.numIslands(
[["1", "1", "1", "1", "0"], ["1", "1", "0", "1", "0"], ["1", "1", "0", "0", "0"], ["0", "0", "0", "0", "0"]]) == 1)
assert(solution.numIslands(
[["1", "1", "0", "0", "0"], ["1", "1", "0", "0", "0"], ["0", "0", "1", "0", "0"], ["0", "0", "0", "1", "1"]]) == 3)
assert(solution.numIslands(
[["1", "1", "1", "1", "0"], ["1", "1", "0", "1", "0"], ["1", "1", "0", "0", "0"], ["0", "0", "0", "0", "0"]]) == 1)
assert(solution.numIslands(
[["1","1","1"],["0","1","0"],["1","1","1"]]) == 1)
|
version = '0.11.0'
version_cmd = 'confd -version'
download_url = 'https://github.com/kelseyhightower/confd/releases/download/vVERSION/confd-VERSION-linux-amd64'
install_script = """
chmod +x confd-VERSION-linux-amd64
mv -f confd-VERSION-linux-amd64 /usr/local/bin/confd
"""
|
class Sensors:
def __init__(self, **kwargs):
self.sensor_data_dictionary = kwargs
def update(self, **kwargs):
self.sensor_data_dictionary = kwargs
def get_value(self, key):
return (self.sensor_data_dictionary.get(key))
|
# https://www.google.com/webhp?sourceid=chrome-
# instant&ion=1&espv=2&ie=UTF-8#q=dp%20coin%20change
def coin_change_recur(coins,n,change_sum):
# If sum is 0 there exists a solution with no coins
if change_sum == 0:
return 1
# if sum is less then 0 no solution exists
if change_sum < 0:
return 0
# if there is no coins left and sum is not 0 then no solution
# exists
if n <= 0 and change_sum > 0:
return 0
# counts the solution including the coins[n-1] and excluding the coins[n-1]
return (coin_change_recur(coins,n-1,change_sum) +
coin_change_recur(coins,n,change_sum - coins[n-1]))
# To hold the results that has been already computed
memo_dict = {}
def coin_change_memo(coins,n,change_sum):
# Check if we have already computed for the current change_sum
if change_sum in memo_dict:
return memo_dict[change_sum]
# If sum is 0 there exists a solution with no coins
if change_sum == 0:
return 1
# if sum is less then 0 no solution exists
if change_sum < 0:
return 0
# if thhere are no coins left and sum is not 0 then no solution exists
if n <= 0 and change_sum > 0:
return 0
# count the solution inclusding coins[n-1] and excluding coins[n-1]
count = (coin_change_memo(coins,n-1,change_sum) +
coin_change_memo(coins,n,change_sum - coins[n-1]))
#memo_dict[change_sum] = count
return count
def coin_change_bottom_up(coins,change_sum):
coins_len = len(coins)
T = [[0] * (coins_len) for i in range(change_sum + 1)]
# Initialize the base case : getting sum 0
for i in range(coins_len):
T[0][i] = 1
for i in range(1, change_sum + 1):
for j in range(coins_len):
# Solutions including coins[j]
x = T[i - coins[j]][j] if i >= coins[j] else 0
# Solutions excluding coins[j]
y = T[i][j-1] if j >= 1 else 0
# total count
T[i][j] = x + y
return T[change_sum][coins_len - 1]
if __name__ == "__main__":
coins = [1,2,3]
print("Number of ways to make change: ", coin_change_recur(coins,len(coins),4))
print("Number of ways to make change: ", coin_change_memo(coins,len(coins),4))
print("Number of ways to make change: ", coin_change_bottom_up(coins,4))
|
def calc_fuel(mass: int):
return max(mass // 3 - 2, 0)
def calc_fuel_rec(mass: int):
fuel = calc_fuel(mass)
if fuel == 0:
return fuel
else:
return fuel + calc_fuel_rec(fuel)
|
MODEL_TYPE = {
"PointRend" : 1,
"MobileNetV3Large" : 2,
"MobileNetV3Small" : 3
}
TASK_TYPE = {
"Object Detection" : 1,
"Instance Segmentation (Map)" : 2,
"Instance Segmentation (Blend)" : 3
}
"""
0 : No Ml model to run
1 : Object Detection : PointRend
2 : Instance Detection (Map) : MobileV3Large
3 : Instance Detection (Blend) : MobileV3Large
""" |
class Cls:
x = "a"
d = {"a": "ab"}
cl = Cls()
cl.x = "b"
d[cl.x]
|
#Copyright 2018 Infosys Ltd.
#Use of this source code is governed by Apache 2.0 license that can be found in the LICENSE file or at
#http://www.apache.org/licenses/LICENSE-2.0 .
####DATABASE QUERY STATUS CODES####
CON000 = 'CON000' # Successfull database connection
CON001 = 'CON001' # Failed to connect to database
EXE000 = 'EXE000' # Successful query execution
EXE001 = 'EXE001' # Query Execution failure |
#a = int(input())
#b = int(input())
entrada = input()
a, b = entrada.split(" ")
a = int(a)
b = int(b)
if(a > b):
if(a%b == 0):
print ("Sao Multiplos")
else:
print("Nao sao Multiplos")
else:
if(b%a == 0):
print("Sao Multiplos")
else:
print("Nao sao Multiplos")
|
# coding: utf-8
# === admin
NUM_SUFFICIENT_TX = 10 # sufficient num. of tx to estimate pdr by ACK
WAITING_FOR_TX = u'waiting_for_tx'
WAITING_FOR_RX = u'waiting_for_rx'
# === addressing
BROADCAST_ADDRESS = u'FF-FF'
listeGlobal = []
# === packet types
PKT_TYPE_DATA = u'DATA'
PKT_TYPE_FRAG = u'FRAG'
PKT_TYPE_JOIN_REQUEST = u'JOIN_REQUEST'
PKT_TYPE_JOIN_RESPONSE = u'JOIN_RESPONSE'
PKT_TYPE_DIS = u'DIS'
PKT_TYPE_DIO = u'DIO'
PKT_TYPE_DAO = u'DAO'
PKT_TYPE_EB = u'EB'
PKT_TYPE_SIXP = u'6P'
PKT_TYPE_KEEP_ALIVE = u'KEEP_ALIVE'
# === packet lengths
PKT_LEN_DIS = 8
PKT_LEN_DIO = 76
PKT_LEN_DAO = 20
PKT_LEN_JOIN_REQUEST = 20
PKT_LEN_JOIN_RESPONSE = 20
# === rpl
RPL_MINHOPRANKINCREASE = 256
RPL_PARENT_SWITCH_RANK_THRESHOLD = 640
RPL_INFINITE_RANK = 65535
# === roupe
ROUPE_MINHOPRANKINCREASE = 256
ROUPE_PARENT_SWITCH_RANK_THRESHOLD = 640
ROUPE_INFINITE_RANK = 65535
# === ipv6
IPV6_DEFAULT_HOP_LIMIT = 64
IPV6_DEFAULT_PREFIX = u'fd00::'
IPV6_ALL_RPL_NODES_ADDRESS = u'ff02::1a'
IPV6_ALL_ROUPE_NODES_ADDRESS = u'ff02::1a'
# === sixlowpan
SIXLOWPAN_REASSEMBLY_BUFFER_LIFETIME = 60 # in seconds
SIXLOWPAN_VRB_TABLE_ENTRY_LIFETIME = 60 # in seconds
# === sixp
SIXP_MSG_TYPE_REQUEST = u'Request'
SIXP_MSG_TYPE_RESPONSE = u'Response'
SIXP_MSG_TYPE_CONFIRMATION = u'Confirmation'
SIXP_CMD_ADD = u'ADD'
SIXP_CMD_DELETE = u'DELETE'
SIXP_CMD_RELOCATE = u'RELOCATE'
SIXP_CMD_COUNT = u'COUNT'
SIXP_CMD_LIST = u'LIST'
SIXP_CMD_SIGNAL = u'SIGNAL'
SIXP_CMD_CLEAR = u'CLEAR'
SIXP_RC_SUCCESS = u'RC_SUCCESS'
SIXP_RC_EOL = u'RC_EOL'
SIXP_RC_ERR = u'RC_ERR'
SIXP_RC_RESET = u'RC_RESET'
SIXP_RC_ERR_VERSION = u'RC_ERR_VERSION'
SIXP_RC_ERR_SFID = u'RC_ERR_SFID'
SIXP_RC_ERR_SEQNUM = u'RC_ERR_SEQNUM'
SIXP_RC_ERR_CELLLIST = u'RC_ERR_CELLLIST'
SIXP_RC_ERR_BUSY = u'RC_ERR_BUSY'
SIXP_RC_ERR_LOCKED = u'RC_ERR_LOCKED'
SIXP_TRANSACTION_TYPE_2_STEP = u'2-step transaction'
SIXP_TRANSACTION_TYPE_3_STEP = u'3-step transaction'
SIXP_TRANSACTION_TYPE_TWO_STEP = u'two-step transaction'
SIXP_TRANSACTION_TYPE_THREE_STEP = u'three-step transaction'
SIXP_CALLBACK_EVENT_PACKET_RECEPTION = u'packet-reception'
SIXP_CALLBACK_EVENT_MAC_ACK_RECEPTION = u'mac-ack-reception'
SIXP_CALLBACK_EVENT_TIMEOUT = u'timeout'
SIXP_CALLBACK_EVENT_FAILURE = u'failure'
SIXP_CALLBACK_EVENT_ABORTED = u'aborted'
# === sf
MSF_MAX_NUMCELLS = 100
MSF_LIM_NUMCELLSUSED_HIGH = 0.75 # in [0-1]
MSF_LIM_NUMCELLSUSED_LOW = 0.25 # in [0-1]
MSF_HOUSEKEEPINGCOLLISION_PERIOD = 60 # in seconds
MSF_RELOCATE_PDRTHRES = 0.5 # in [0-1]
MSF_MIN_NUM_TX = 100 # min number for PDR to be significant
# === tsch
TSCH_MIN_BACKOFF_EXPONENT = 1
TSCH_MAX_BACKOFF_EXPONENT = 7
# https://gist.github.com/twatteyne/2e22ee3c1a802b685695#file-4e_tsch_default_ch-py
TSCH_HOPPING_SEQUENCE = [16, 17, 23, 18, 26, 15, 25, 22, 19, 11, 12, 13, 24, 14, 20, 21]
TSCH_MAX_EB_DELAY = 180
TSCH_NUM_NEIGHBORS_TO_WAIT = 2
TSCH_DESYNCHRONIZED_TIMEOUT_SLOTS = 1750
CELLOPTION_TX = u'TX'
CELLOPTION_RX = u'RX'
CELLOPTION_SHARED = u'SHARED'
LINKTYPE_ADVERTISING = u'ADVERTISING'
LINKTYPE_ADVERTISING_ONLY = u'ADVERTISING_ONLY'
LINKTYPE_NORMAL = u'NORMAL'
INTRASLOTORDER_STARTSLOT = 0
INTRASLOTORDER_PROPAGATE = 1
INTRASLOTORDER_STACKTASKS = 2
INTRASLOTORDER_ADMINTASKS = 3
# === radio
RADIO_STATE_TX = u'tx'
RADIO_STATE_RX = u'rx'
RADIO_STATE_OFF = u'off'
# === battery
# Idle: Time slot during which a node listens for data, but receives
# none
CHARGE_IdleListen_uC = 6.4
# TxDataRxAck: A timeslot during which the node sends some data frame,
# and expects an acknowledgment (ACK)
CHARGE_TxDataRxAck_uC = 54.5
# TxData: Similar to TxDataRxAck, but no ACK is expected. This is
# typically used when the data packet is broadcast
CHARGE_TxData_uC = 49.5
# RxDataTxAck: A timeslot during which the node receives some data
# frame, and sends back an ACK to indicate successful reception
CHARGE_RxDataTxAck_uC = 32.6
# RxData: Similar to the RxDataTxAck but no ACK is sent (for a
# broadcast packet)
CHARGE_RxData_uC = 22.6
# Time slot during which the node’s radio stays off
CHARGE_Sleep_uC = 0.0
|
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
d=0
while True:
c=False
old=[]
for i in range(len(grid)):
s=[]
for j in range(len(grid[0])):
s.append(grid[i][j])
old.append(s)
for i in range(len(grid)):
for j in range(len(grid[0])):
if old[i][j]==2:
f=self.change(grid,i,j)
if f:
c=True
if c==False:
break
else:
d=d+1
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j]==1:
return -1
return d
def change(self,grid,i,j):
r=False
for ti,tj in zip([-1,0,0,1],[0,-1,1,0]):
if ti+i>=0 and ti+i<len(grid) and tj+j>=0 and tj+j<len(grid[0]):
if grid[ti+i][tj+j]==1:
grid[ti+i][tj+j]=2
r=True
return r
|
class Solution:
def binaryGap(self, n: int) -> int:
a = str(bin(n))
a = a[2:]
dis = []
c = 0
for i in range(len(a)):
if a[i] == "1":
dis.append(c)
c = 0
c +=1
return max(dis)
|
class Suggestion:
def __init__(self, obj=None):
"""
See "https://smartystreets.com/docs/cloud/us-autocomplete-api#http-response"
"""
self.text = obj.get('text', None)
self.street_line = obj.get('street_line', None)
self.city = obj.get('city', None)
self.state = obj.get('state', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.