content stringlengths 7 1.05M | fixed_cases stringlengths 1 1.28M |
|---|---|
# elasticmodels/tests/test_settings.py
# author: andrew young
# email: ayoung@thewulf.org
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = ["elasticmodels.urls"]
INSTALLED_APPS = ["elasticmodels"]
| databases = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}}
root_urlconf = ['elasticmodels.urls']
installed_apps = ['elasticmodels'] |
# Python - 2.7.6
Test.describe('Basic Tests')
data = [2]
Test.assert_equals(print_array(data), '2')
data = [2, 4, 5, 2]
Test.assert_equals(print_array(data), '2,4,5,2')
data = [2, 4, 5, 2]
Test.assert_equals(print_array(data), '2,4,5,2')
data = [2.0, 4.2, 5.1, 2.2]
Test.assert_equals(print_array(data), '2.0,4.2,5.1,2.2')
data = ['2', '4', '5', '2']
Test.assert_equals(print_array(data), '2,4,5,2')
data = [True, False, False]
Test.assert_equals(print_array(data), 'True,False,False')
array1 = ['hello', 'this', 'is', 'an', 'array!']
array2 = ['a', 'b', 'c', 'd', 'e!']
data = array1 + array2
Test.assert_equals(print_array(data), 'hello,this,is,an,array!,a,b,c,d,e!')
array1 = ['hello', 'this', 'is', 'an', 'array!']
array2 = [1, 2, 3, 4, 5]
data = [array1, array2]
Test.assert_equals(print_array(data), "['hello', 'this', 'is', 'an', 'array!'],[1, 2, 3, 4, 5]")
| Test.describe('Basic Tests')
data = [2]
Test.assert_equals(print_array(data), '2')
data = [2, 4, 5, 2]
Test.assert_equals(print_array(data), '2,4,5,2')
data = [2, 4, 5, 2]
Test.assert_equals(print_array(data), '2,4,5,2')
data = [2.0, 4.2, 5.1, 2.2]
Test.assert_equals(print_array(data), '2.0,4.2,5.1,2.2')
data = ['2', '4', '5', '2']
Test.assert_equals(print_array(data), '2,4,5,2')
data = [True, False, False]
Test.assert_equals(print_array(data), 'True,False,False')
array1 = ['hello', 'this', 'is', 'an', 'array!']
array2 = ['a', 'b', 'c', 'd', 'e!']
data = array1 + array2
Test.assert_equals(print_array(data), 'hello,this,is,an,array!,a,b,c,d,e!')
array1 = ['hello', 'this', 'is', 'an', 'array!']
array2 = [1, 2, 3, 4, 5]
data = [array1, array2]
Test.assert_equals(print_array(data), "['hello', 'this', 'is', 'an', 'array!'],[1, 2, 3, 4, 5]") |
def result(score):
min = max = score[0]
min_count = max_count = 0
for i in score[1:]:
if i > max:
max_count += 1
max = i
if i < min:
min_count += 1
min = i
return max_count, min_count
n = input()
score = list(map(int, input().split()))
print(*result(score))
| def result(score):
min = max = score[0]
min_count = max_count = 0
for i in score[1:]:
if i > max:
max_count += 1
max = i
if i < min:
min_count += 1
min = i
return (max_count, min_count)
n = input()
score = list(map(int, input().split()))
print(*result(score)) |
DEFAULT_PRAGMAS = (
"akamai-x-get-request-id",
"akamai-x-get-cache-key",
"akamai-x-get-true-cache-key",
"akamai-x-get-extracted-values",
"akamai-x-cache-on",
"akamai-x-cache-remote-on",
"akamai-x-check-cacheable",
"akamai-x-get-ssl-client-session-id",
"akamai-x-serial-no",
)
| default_pragmas = ('akamai-x-get-request-id', 'akamai-x-get-cache-key', 'akamai-x-get-true-cache-key', 'akamai-x-get-extracted-values', 'akamai-x-cache-on', 'akamai-x-cache-remote-on', 'akamai-x-check-cacheable', 'akamai-x-get-ssl-client-session-id', 'akamai-x-serial-no') |
n1 = int(input("digite o valor em metros "))
n2 = int(input("digite o valor em metros "))
n3 = int(input("digite o valor em metros "))
r= (n1**2)+(n2**2)+(n3**2)
print(r) | n1 = int(input('digite o valor em metros '))
n2 = int(input('digite o valor em metros '))
n3 = int(input('digite o valor em metros '))
r = n1 ** 2 + n2 ** 2 + n3 ** 2
print(r) |
__author__ = 'ipetrash'
if __name__ == '__main__':
def getprint(str="hello world!"):
print(str)
def decor(func):
def wrapper(*args, **kwargs):
print("1 begin: " + func.__name__)
print("Args={} kwargs={}".format(args, kwargs))
f = func(*args, **kwargs)
print("2 end: " + func.__name__ + "\n")
return f
return wrapper
def predecor(w="W"):
print(w, end=': ')
getprint()
getprint("Py!")
print()
f = decor(getprint)
f()
f("Py!")
def rgb2hex(get_rgb_func):
def wrapper(*args, **kwargs):
r, g, b = get_rgb_func(*args, **kwargs)
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
return wrapper
class RGB:
def __init__(self):
self._r = 0xff
self._g = 0xff
self._b = 0xff
def getr(self):
return self._r
def setr(self, r):
self._r = r
r = property(getr, setr)
def getg(self):
return self._g
def setg(self, g):
self._g = g
g = property(getg, setg)
def getb(self):
return self._b
def setb(self, b):
self._b = b
b = property(getb, setb)
def setrgb(self, r, g, b):
self.r, self.g, self.b = r, g, b
@rgb2hex
def getrgb(self):
return (self.r, self.g, self.b)
rgb = RGB()
print('rgb.r={}'.format(rgb.r))
rgb.setrgb(0xff, 0x1, 0xff)
print("rgb.getrgb(): %s" % rgb.getrgb())
print()
@decor
def foo(a, b):
print("{} ^ {} = {}".format(a, b, (a ** b)))
foo(2, 3)
foo(b=3, a=2) | __author__ = 'ipetrash'
if __name__ == '__main__':
def getprint(str='hello world!'):
print(str)
def decor(func):
def wrapper(*args, **kwargs):
print('1 begin: ' + func.__name__)
print('Args={} kwargs={}'.format(args, kwargs))
f = func(*args, **kwargs)
print('2 end: ' + func.__name__ + '\n')
return f
return wrapper
def predecor(w='W'):
print(w, end=': ')
getprint()
getprint('Py!')
print()
f = decor(getprint)
f()
f('Py!')
def rgb2hex(get_rgb_func):
def wrapper(*args, **kwargs):
(r, g, b) = get_rgb_func(*args, **kwargs)
return '#{:02x}{:02x}{:02x}'.format(r, g, b)
return wrapper
class Rgb:
def __init__(self):
self._r = 255
self._g = 255
self._b = 255
def getr(self):
return self._r
def setr(self, r):
self._r = r
r = property(getr, setr)
def getg(self):
return self._g
def setg(self, g):
self._g = g
g = property(getg, setg)
def getb(self):
return self._b
def setb(self, b):
self._b = b
b = property(getb, setb)
def setrgb(self, r, g, b):
(self.r, self.g, self.b) = (r, g, b)
@rgb2hex
def getrgb(self):
return (self.r, self.g, self.b)
rgb = rgb()
print('rgb.r={}'.format(rgb.r))
rgb.setrgb(255, 1, 255)
print('rgb.getrgb(): %s' % rgb.getrgb())
print()
@decor
def foo(a, b):
print('{} ^ {} = {}'.format(a, b, a ** b))
foo(2, 3)
foo(b=3, a=2) |
'''
https://youtu.be/-xRKazHGtjU
Smarter Approach: https://youtu.be/J7S3CHFBZJA
Dynamic Programming: https://youtu.be/VQeFcG9pjJU
'''
| """
https://youtu.be/-xRKazHGtjU
Smarter Approach: https://youtu.be/J7S3CHFBZJA
Dynamic Programming: https://youtu.be/VQeFcG9pjJU
""" |
def fit_index(dataset, list_variables):
""" Mapping between index and category, for categorical variables
For each (categorical) variable, create 2 dictionaries:
- index_to_categorical: from the index to the category
- categorical_to_index: from the category to the index
Parameters
----------
dataset: pandas.core.frame.DataFrame
DataFrame with (partly) categorical variables
list_variables: list(str)
List of variable names to index
Returns
-------
index: dict
For each categorical column, we have the 2 mappings: idx2cat & idx2cat
"""
index = dict()
for icol in list_variables:
if icol not in dataset.columns:
raise RuntimeError(f'{icol} not found in dataframe')
idx2cat = {ii: jj for ii, jj in enumerate(dataset.loc[:, icol].unique())}
cat2idx = {jj: ii for ii, jj in idx2cat.items()}
index[icol] = {
'index_to_categorical': idx2cat,
'categorical_to_index': cat2idx
}
return index
def map_to_or_from_index(dataset, index, type_conversion):
"""Transform categorical variables to their index
Parameters
----------
dataset: pandas.core.frame.DataFrame
DataFrame with categorical variables
index: dict
For each categorical column (dict index), we have 2 mappings:
- index_to_categorical
- categorical_to_index
Returns
-------
dataset: pandas.core.frame.DataFrame
Dataframe with the mapping & missing values
"""
for icol in set(index.keys()).intersection(dataset.columns):
dataset_init = dataset.copy()
dataset[icol] = dataset[icol].map(
lambda x: index[icol][type_conversion].get(x, None)
)
missing_index = dataset[icol].isna()
if sum(missing_index) > 0:
dataset = dataset[~missing_index]
print(
"Missing {} for {} ({} rows): {}".format(
type_conversion,
icol, sum(missing_index), set(dataset_init[missing_index][icol])
)
)
del dataset_init
return dataset
| def fit_index(dataset, list_variables):
""" Mapping between index and category, for categorical variables
For each (categorical) variable, create 2 dictionaries:
- index_to_categorical: from the index to the category
- categorical_to_index: from the category to the index
Parameters
----------
dataset: pandas.core.frame.DataFrame
DataFrame with (partly) categorical variables
list_variables: list(str)
List of variable names to index
Returns
-------
index: dict
For each categorical column, we have the 2 mappings: idx2cat & idx2cat
"""
index = dict()
for icol in list_variables:
if icol not in dataset.columns:
raise runtime_error(f'{icol} not found in dataframe')
idx2cat = {ii: jj for (ii, jj) in enumerate(dataset.loc[:, icol].unique())}
cat2idx = {jj: ii for (ii, jj) in idx2cat.items()}
index[icol] = {'index_to_categorical': idx2cat, 'categorical_to_index': cat2idx}
return index
def map_to_or_from_index(dataset, index, type_conversion):
"""Transform categorical variables to their index
Parameters
----------
dataset: pandas.core.frame.DataFrame
DataFrame with categorical variables
index: dict
For each categorical column (dict index), we have 2 mappings:
- index_to_categorical
- categorical_to_index
Returns
-------
dataset: pandas.core.frame.DataFrame
Dataframe with the mapping & missing values
"""
for icol in set(index.keys()).intersection(dataset.columns):
dataset_init = dataset.copy()
dataset[icol] = dataset[icol].map(lambda x: index[icol][type_conversion].get(x, None))
missing_index = dataset[icol].isna()
if sum(missing_index) > 0:
dataset = dataset[~missing_index]
print('Missing {} for {} ({} rows): {}'.format(type_conversion, icol, sum(missing_index), set(dataset_init[missing_index][icol])))
del dataset_init
return dataset |
def model(outcome, player1, player2, game_matrix):
"""
outcome [N, 1] where N is games and extra dimension is just 1 or zero depending on whether
player 1 or player 2 wins
player1 is one-hot vector encoding of player id
player2 ""
game_matrix has entries [G,P] (use sparse multiplication COO)
Say there are P players
Say there are G games
"""
# random normal distribution with vector [P, 1]
skill = pyro.sample(...)
diff = game_matrix @ skill
# random normal distribution with means as differences
score = pyro.sample(Normal(diff, 1))
prob = sigmoid(score)
# Outcome is drawn from a probability dist with p being result of sigmoid
pyro.sample(dis.Bernoulli(prob), obs=outcome)
# For the guide do
# Look it up
guide = pyro.AutoDiagonalNormal()
| def model(outcome, player1, player2, game_matrix):
"""
outcome [N, 1] where N is games and extra dimension is just 1 or zero depending on whether
player 1 or player 2 wins
player1 is one-hot vector encoding of player id
player2 ""
game_matrix has entries [G,P] (use sparse multiplication COO)
Say there are P players
Say there are G games
"""
skill = pyro.sample(...)
diff = game_matrix @ skill
score = pyro.sample(normal(diff, 1))
prob = sigmoid(score)
pyro.sample(dis.Bernoulli(prob), obs=outcome)
guide = pyro.AutoDiagonalNormal() |
# inspired from spacy
def add_codes(err_cls):
"""Add error codes to string messages via class attribute names."""
class ErrorsWithCodes(object):
def __getattribute__(self, code):
msg = getattr(err_cls, code)
return '[{code}] {msg}'.format(code=code, msg=msg)
return ErrorsWithCodes()
@add_codes
class Errors:
""" List of identified error """
E001 = 'Error on loading data configuration file.'
| def add_codes(err_cls):
"""Add error codes to string messages via class attribute names."""
class Errorswithcodes(object):
def __getattribute__(self, code):
msg = getattr(err_cls, code)
return '[{code}] {msg}'.format(code=code, msg=msg)
return errors_with_codes()
@add_codes
class Errors:
""" List of identified error """
e001 = 'Error on loading data configuration file.' |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def Insertion_sort(_list):
list_length = len(_list)
i = 1
while i < list_length:
key = _list[i]
j = i - 1
while j >= 0 and _list[j] > key:
_list[j+1] = _list[j]
j -= 1
_list[j+1] = key
i += 1
return _list
| def insertion_sort(_list):
list_length = len(_list)
i = 1
while i < list_length:
key = _list[i]
j = i - 1
while j >= 0 and _list[j] > key:
_list[j + 1] = _list[j]
j -= 1
_list[j + 1] = key
i += 1
return _list |
duzina = 5
sirina = 2
povrsina = duzina * sirina
print('Povrsina je ', povrsina)
print('Obim je ', 2 * (duzina + sirina))
| duzina = 5
sirina = 2
povrsina = duzina * sirina
print('Povrsina je ', povrsina)
print('Obim je ', 2 * (duzina + sirina)) |
""" Constants for event handling in Eris. """
PRIO_HIGH = 0
PRIO_MEDIUM = 1
PRIO_LOW = 2
| """ Constants for event handling in Eris. """
prio_high = 0
prio_medium = 1
prio_low = 2 |
class Contact:
def __init__(self, fname=None, sname=None, lname=None, address=None, email=None, tel=None):
self.fname = fname
self.sname = sname
self.lname = lname
self.address = address
self.email = email
self.tel = tel | class Contact:
def __init__(self, fname=None, sname=None, lname=None, address=None, email=None, tel=None):
self.fname = fname
self.sname = sname
self.lname = lname
self.address = address
self.email = email
self.tel = tel |
def response(number):
if number % 4 == 0:
return "Multiple of four"
elif number % 2 == 0:
return "Even"
else:
return "Odd"
def divisible(num, check):
if check % num == 0:
return "Yes, it's evenly divisible"
return "No, it's not evenly divisible"
if __name__ == "__main__":
number = int(input("Tell me a number: "))
print(response(number))
| def response(number):
if number % 4 == 0:
return 'Multiple of four'
elif number % 2 == 0:
return 'Even'
else:
return 'Odd'
def divisible(num, check):
if check % num == 0:
return "Yes, it's evenly divisible"
return "No, it's not evenly divisible"
if __name__ == '__main__':
number = int(input('Tell me a number: '))
print(response(number)) |
class PipelineError(Exception):
pass
class PipelineParallelError(Exception):
pass
| class Pipelineerror(Exception):
pass
class Pipelineparallelerror(Exception):
pass |
# print statement, function definition
name = "Anurag"
age = 30
print(name, age, "python", 2020)
print(name, age, "python", 2020, sep=", ", end=" $$ ")
| name = 'Anurag'
age = 30
print(name, age, 'python', 2020)
print(name, age, 'python', 2020, sep=', ', end=' $$ ') |
def isPermutation(string_1, string_2):
string_1 = list(string_1)
string_2 = list(string_2)
for i in range(0, len(string_1)):
for j in range(0, len(string_2)):
if string_1[i] == string_2[j]:
del string_2[j]
break
if len(string_2) == 0:
return True
else:
return False
string_1 = str(input())
string_2 = str(input())
if isPermutation(string_1, string_2):
print('Your strings are permutations of each other.')
else:
print('Your strings are not permutations of each other.') | def is_permutation(string_1, string_2):
string_1 = list(string_1)
string_2 = list(string_2)
for i in range(0, len(string_1)):
for j in range(0, len(string_2)):
if string_1[i] == string_2[j]:
del string_2[j]
break
if len(string_2) == 0:
return True
else:
return False
string_1 = str(input())
string_2 = str(input())
if is_permutation(string_1, string_2):
print('Your strings are permutations of each other.')
else:
print('Your strings are not permutations of each other.') |
""" Regular expressions """
def match(pattern, string, flags=0):
return _compile(pattern, flags).match(string)
def _compile(pattern, flags):
p = sre_compile.compile(pattern, flags)
return p
| """ Regular expressions """
def match(pattern, string, flags=0):
return _compile(pattern, flags).match(string)
def _compile(pattern, flags):
p = sre_compile.compile(pattern, flags)
return p |
class Observer(object):
"""docstring for Observer"""
def __init__(self):
super(Observer, self).__init__()
self.signalFunc = None
def onReceive(self, signal, emitter):
if self.signalFunc != None and signal in self.signalFunc:
self.signalFunc[signal](emitter)
| class Observer(object):
"""docstring for Observer"""
def __init__(self):
super(Observer, self).__init__()
self.signalFunc = None
def on_receive(self, signal, emitter):
if self.signalFunc != None and signal in self.signalFunc:
self.signalFunc[signal](emitter) |
class _SCon:
esc : str = '\u001B'
bra : str = '['
eb : str = esc + bra
bRed : str = eb + '41m'
white : str = eb + '37m'
bold : str = eb + '1m'
right : str = 'C'
left : str = 'D'
down : str = 'B'
up : str = 'A'
reset : str = eb + '0m'
cyan : str = eb + '36m'
del_char: str = eb + 'X'
save : str = eb + 's'
restore : str = eb + 'u'
def caret_to(self, x: int, y: int) -> None: print(self.eb + f"{y};{x}H", end = "")
def caret_save(self) -> None: print(self.save, end = "")
def caret_restore(self) -> None: print(self.restore, end = "")
def del_line(self) -> None: print(self.eb + "2K", end ="")
def reset_screen_and_caret(self) -> None: print(self.eb + "2J" + self.eb + "0;0H", end = "")
def caret_x_pos(self, x: int) -> None: print(self.eb + f"{x}G", end = "")
def caret_y_pos(self, y: int) -> None: print(self.eb + f"{y}d", end = "")
SCON: _SCon = _SCon() | class _Scon:
esc: str = '\x1b'
bra: str = '['
eb: str = esc + bra
b_red: str = eb + '41m'
white: str = eb + '37m'
bold: str = eb + '1m'
right: str = 'C'
left: str = 'D'
down: str = 'B'
up: str = 'A'
reset: str = eb + '0m'
cyan: str = eb + '36m'
del_char: str = eb + 'X'
save: str = eb + 's'
restore: str = eb + 'u'
def caret_to(self, x: int, y: int) -> None:
print(self.eb + f'{y};{x}H', end='')
def caret_save(self) -> None:
print(self.save, end='')
def caret_restore(self) -> None:
print(self.restore, end='')
def del_line(self) -> None:
print(self.eb + '2K', end='')
def reset_screen_and_caret(self) -> None:
print(self.eb + '2J' + self.eb + '0;0H', end='')
def caret_x_pos(self, x: int) -> None:
print(self.eb + f'{x}G', end='')
def caret_y_pos(self, y: int) -> None:
print(self.eb + f'{y}d', end='')
scon: _SCon = _s_con() |
# tree structure in decoder side
# divide sub-node by brackets "()"
class Tree():
def __init__(self):
self.parent = None
self.num_children = 0
self.children = []
def __str__(self, level = 0):
ret = ""
for child in self.children:
if isinstance(child,type(self)):
ret += child.__str__(level+1)
else:
ret += "\t"*level + str(child) + "\n"
return ret
def add_child(self,c):
if isinstance(c,type(self)):
c.parent = self
self.children.append(c)
self.num_children = self.num_children + 1
def to_string(self):
r_list = []
for i in range(self.num_children):
if isinstance(self.children[i], Tree):
r_list.append("( " + self.children[i].to_string() + " )")
else:
r_list.append(str(self.children[i]))
return "".join(r_list)
def to_list(self, form_manager):
r_list = []
for i in range(self.num_children):
if isinstance(self.children[i], type(self)):
r_list.append(form_manager.get_symbol_idx("("))
cl = self.children[i].to_list(form_manager)
for k in range(len(cl)):
r_list.append(cl[k])
r_list.append(form_manager.get_symbol_idx(")"))
else:
r_list.append(self.children[i])
return r_list | class Tree:
def __init__(self):
self.parent = None
self.num_children = 0
self.children = []
def __str__(self, level=0):
ret = ''
for child in self.children:
if isinstance(child, type(self)):
ret += child.__str__(level + 1)
else:
ret += '\t' * level + str(child) + '\n'
return ret
def add_child(self, c):
if isinstance(c, type(self)):
c.parent = self
self.children.append(c)
self.num_children = self.num_children + 1
def to_string(self):
r_list = []
for i in range(self.num_children):
if isinstance(self.children[i], Tree):
r_list.append('( ' + self.children[i].to_string() + ' )')
else:
r_list.append(str(self.children[i]))
return ''.join(r_list)
def to_list(self, form_manager):
r_list = []
for i in range(self.num_children):
if isinstance(self.children[i], type(self)):
r_list.append(form_manager.get_symbol_idx('('))
cl = self.children[i].to_list(form_manager)
for k in range(len(cl)):
r_list.append(cl[k])
r_list.append(form_manager.get_symbol_idx(')'))
else:
r_list.append(self.children[i])
return r_list |
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
N = len(A)
l_sum = A[0]
r_sum = sum(A) - l_sum
diff = abs(l_sum - r_sum)
for i in range(1, N -1):
l_sum += A[i]
r_sum -= A[i]
c_diff = abs(l_sum - r_sum)
if diff > c_diff:
diff = c_diff
return diff
| def solution(A):
n = len(A)
l_sum = A[0]
r_sum = sum(A) - l_sum
diff = abs(l_sum - r_sum)
for i in range(1, N - 1):
l_sum += A[i]
r_sum -= A[i]
c_diff = abs(l_sum - r_sum)
if diff > c_diff:
diff = c_diff
return diff |
#!/usr/bin/python3
def uppercase(str):
for c in str:
if (ord(c) >= ord('a')) and (ord(c) <= ord('z')):
c = chr(ord(c)-ord('a')+ord('A'))
print("{}".format(c), end='')
print()
| def uppercase(str):
for c in str:
if ord(c) >= ord('a') and ord(c) <= ord('z'):
c = chr(ord(c) - ord('a') + ord('A'))
print('{}'.format(c), end='')
print() |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BUILD rules used to provide a Swift toolchain provided by Xcode on macOS.
The rules defined in this file are not intended to be used outside of the Swift
toolchain package. If you are looking for rules to build Swift code using this
toolchain, see `swift.bzl`.
"""
load(":providers.bzl", "SwiftToolchainInfo")
load("@bazel_skylib//:lib.bzl", "dicts")
def _default_linker_opts(apple_fragment, apple_toolchain, platform, target):
"""Returns options that should be passed by default to `clang` when linking.
Args:
apple_fragment: The `apple` configuration fragment.
apple_toolchain: The `apple_common.apple_toolchain()` object.
platform: The `apple_platform` value describing the target platform.
target: The target triple.
Returns:
A list of options that will be passed to any compile action created by this
toolchain.
"""
platform_framework_dir = apple_toolchain.platform_developer_framework_dir(
apple_fragment)
if _is_macos(platform):
swift_subdir = "swift_static"
static_linkopts = [
"-Xlinker",
"-force_load_swift_libs",
"-framework",
"Foundation",
"-lstdc++",
# XCTest.framework only lives in the Xcode bundle, so test binaries need
# to have that directory explicitly added to their rpaths.
# TODO(allevato): Factor this out into test-specific linkopts?
"-Wl,-rpath,{}".format(platform_framework_dir),
]
else:
swift_subdir = "swift"
static_linkopts = []
swift_lib_dir = (
"{developer_dir}/Toolchains/{toolchain}.xctoolchain" +
"/usr/lib/{swift_subdir}/{platform}"
).format(
developer_dir=apple_toolchain.developer_dir(),
platform=platform.name_in_plist.lower(),
swift_subdir=swift_subdir,
toolchain="XcodeDefault",
)
return static_linkopts + [
"-target", target,
"--sysroot", apple_toolchain.sdk_dir(),
"-F", platform_framework_dir,
"-L", swift_lib_dir,
]
def _default_swiftc_copts(apple_fragment, apple_toolchain, target):
"""Returns options that should be passed by default to `swiftc`.
Args:
apple_fragment: The `apple` configuration fragment.
apple_toolchain: The `apple_common.apple_toolchain()` object.
target: The target triple.
Returns:
A list of options that will be passed to any compile action created by this
toolchain.
"""
copts = [
"-target", target,
"-sdk", apple_toolchain.sdk_dir(),
"-F", apple_toolchain.platform_developer_framework_dir(apple_fragment),
]
bitcode_mode = str(apple_fragment.bitcode_mode)
if bitcode_mode == "embedded":
copts.append("-embed-bitcode")
elif bitcode_mode == "embedded_markers":
copts.append("-embed-bitcode-marker")
elif bitcode_mode != "none":
fail("Internal error: expected apple_fragment.bitcode_mode to be one " +
"of: ['embedded', 'embedded_markers', 'none']")
return copts
def _is_macos(platform):
"""Returns `True` if the given platform is macOS.
Args:
platform: An `apple_platform` value describing the platform for which a
target is being built.
Returns:
`True` if the given platform is macOS.
"""
return platform.platform_type == apple_common.platform_type.macos
def _swift_apple_target_triple(cpu, platform, version):
"""Returns a target triple string for an Apple platform.
Args:
cpu: The CPU of the target.
platform: The `apple_platform` value describing the target platform.
version: The target platform version as a dotted version string.
Returns:
A target triple string describing the platform.
"""
platform_string = str(platform.platform_type)
if platform_string == "macos":
platform_string = "macosx"
return "{cpu}-apple-{platform}{version}".format(
cpu=cpu,
platform=platform_string,
version=version,
)
def _xcode_env(xcode_config, platform):
"""Returns a dictionary containing Xcode-related environment variables.
Args:
xcode_config: The `XcodeVersionConfig` provider that contains information
about the current Xcode configuration.
platform: The `apple_platform` value describing the target platform being
built.
Returns:
A `dict` containing Xcode-related environment variables that should be
passed to Swift compile and link actions.
"""
return dicts.add(
apple_common.apple_host_system_env(xcode_config),
apple_common.target_apple_env(xcode_config, platform)
)
def _xcode_swift_toolchain_impl(ctx):
apple_fragment = ctx.fragments.apple
apple_toolchain = apple_common.apple_toolchain()
cpu = apple_fragment.single_arch_cpu
platform = apple_fragment.single_arch_platform
xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
target_os_version = xcode_config.minimum_os_for_platform_type(
platform.platform_type)
target = _swift_apple_target_triple(cpu, platform, target_os_version)
linker_opts = _default_linker_opts(
apple_fragment, apple_toolchain, platform, target)
swiftc_copts = _default_swiftc_copts(apple_fragment, apple_toolchain, target)
return [
SwiftToolchainInfo(
action_environment=_xcode_env(xcode_config, platform),
cc_toolchain_info=None,
cpu=cpu,
execution_requirements={"requires-darwin": ""},
implicit_deps=[],
linker_opts=linker_opts,
object_format="macho",
requires_autolink_extract=False,
requires_workspace_relative_module_maps=False,
root_dir=None,
spawn_wrapper=ctx.executable._xcrunwrapper,
stamp=ctx.attr.stamp if _is_macos(platform) else None,
supports_objc_interop=True,
swiftc_copts=swiftc_copts,
system_name="darwin",
),
]
xcode_swift_toolchain = rule(
attrs={
"stamp": attr.label(
doc="""
A `cc`-providing target that should be linked into any binaries that are built
with stamping enabled.
""",
providers=[["cc"]],
),
"_xcode_config": attr.label(
default=configuration_field(
fragment="apple",
name="xcode_config_label",
),
),
"_xcrunwrapper": attr.label(
cfg="host",
default=Label("@bazel_tools//tools/objc:xcrunwrapper"),
executable=True,
),
},
doc="Represents a Swift compiler toolchain provided by Xcode.",
fragments=["apple", "cpp"],
implementation=_xcode_swift_toolchain_impl,
)
| """BUILD rules used to provide a Swift toolchain provided by Xcode on macOS.
The rules defined in this file are not intended to be used outside of the Swift
toolchain package. If you are looking for rules to build Swift code using this
toolchain, see `swift.bzl`.
"""
load(':providers.bzl', 'SwiftToolchainInfo')
load('@bazel_skylib//:lib.bzl', 'dicts')
def _default_linker_opts(apple_fragment, apple_toolchain, platform, target):
"""Returns options that should be passed by default to `clang` when linking.
Args:
apple_fragment: The `apple` configuration fragment.
apple_toolchain: The `apple_common.apple_toolchain()` object.
platform: The `apple_platform` value describing the target platform.
target: The target triple.
Returns:
A list of options that will be passed to any compile action created by this
toolchain.
"""
platform_framework_dir = apple_toolchain.platform_developer_framework_dir(apple_fragment)
if _is_macos(platform):
swift_subdir = 'swift_static'
static_linkopts = ['-Xlinker', '-force_load_swift_libs', '-framework', 'Foundation', '-lstdc++', '-Wl,-rpath,{}'.format(platform_framework_dir)]
else:
swift_subdir = 'swift'
static_linkopts = []
swift_lib_dir = ('{developer_dir}/Toolchains/{toolchain}.xctoolchain' + '/usr/lib/{swift_subdir}/{platform}').format(developer_dir=apple_toolchain.developer_dir(), platform=platform.name_in_plist.lower(), swift_subdir=swift_subdir, toolchain='XcodeDefault')
return static_linkopts + ['-target', target, '--sysroot', apple_toolchain.sdk_dir(), '-F', platform_framework_dir, '-L', swift_lib_dir]
def _default_swiftc_copts(apple_fragment, apple_toolchain, target):
"""Returns options that should be passed by default to `swiftc`.
Args:
apple_fragment: The `apple` configuration fragment.
apple_toolchain: The `apple_common.apple_toolchain()` object.
target: The target triple.
Returns:
A list of options that will be passed to any compile action created by this
toolchain.
"""
copts = ['-target', target, '-sdk', apple_toolchain.sdk_dir(), '-F', apple_toolchain.platform_developer_framework_dir(apple_fragment)]
bitcode_mode = str(apple_fragment.bitcode_mode)
if bitcode_mode == 'embedded':
copts.append('-embed-bitcode')
elif bitcode_mode == 'embedded_markers':
copts.append('-embed-bitcode-marker')
elif bitcode_mode != 'none':
fail('Internal error: expected apple_fragment.bitcode_mode to be one ' + "of: ['embedded', 'embedded_markers', 'none']")
return copts
def _is_macos(platform):
"""Returns `True` if the given platform is macOS.
Args:
platform: An `apple_platform` value describing the platform for which a
target is being built.
Returns:
`True` if the given platform is macOS.
"""
return platform.platform_type == apple_common.platform_type.macos
def _swift_apple_target_triple(cpu, platform, version):
"""Returns a target triple string for an Apple platform.
Args:
cpu: The CPU of the target.
platform: The `apple_platform` value describing the target platform.
version: The target platform version as a dotted version string.
Returns:
A target triple string describing the platform.
"""
platform_string = str(platform.platform_type)
if platform_string == 'macos':
platform_string = 'macosx'
return '{cpu}-apple-{platform}{version}'.format(cpu=cpu, platform=platform_string, version=version)
def _xcode_env(xcode_config, platform):
"""Returns a dictionary containing Xcode-related environment variables.
Args:
xcode_config: The `XcodeVersionConfig` provider that contains information
about the current Xcode configuration.
platform: The `apple_platform` value describing the target platform being
built.
Returns:
A `dict` containing Xcode-related environment variables that should be
passed to Swift compile and link actions.
"""
return dicts.add(apple_common.apple_host_system_env(xcode_config), apple_common.target_apple_env(xcode_config, platform))
def _xcode_swift_toolchain_impl(ctx):
apple_fragment = ctx.fragments.apple
apple_toolchain = apple_common.apple_toolchain()
cpu = apple_fragment.single_arch_cpu
platform = apple_fragment.single_arch_platform
xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
target_os_version = xcode_config.minimum_os_for_platform_type(platform.platform_type)
target = _swift_apple_target_triple(cpu, platform, target_os_version)
linker_opts = _default_linker_opts(apple_fragment, apple_toolchain, platform, target)
swiftc_copts = _default_swiftc_copts(apple_fragment, apple_toolchain, target)
return [swift_toolchain_info(action_environment=_xcode_env(xcode_config, platform), cc_toolchain_info=None, cpu=cpu, execution_requirements={'requires-darwin': ''}, implicit_deps=[], linker_opts=linker_opts, object_format='macho', requires_autolink_extract=False, requires_workspace_relative_module_maps=False, root_dir=None, spawn_wrapper=ctx.executable._xcrunwrapper, stamp=ctx.attr.stamp if _is_macos(platform) else None, supports_objc_interop=True, swiftc_copts=swiftc_copts, system_name='darwin')]
xcode_swift_toolchain = rule(attrs={'stamp': attr.label(doc='\nA `cc`-providing target that should be linked into any binaries that are built\nwith stamping enabled.\n', providers=[['cc']]), '_xcode_config': attr.label(default=configuration_field(fragment='apple', name='xcode_config_label')), '_xcrunwrapper': attr.label(cfg='host', default=label('@bazel_tools//tools/objc:xcrunwrapper'), executable=True)}, doc='Represents a Swift compiler toolchain provided by Xcode.', fragments=['apple', 'cpp'], implementation=_xcode_swift_toolchain_impl) |
#In PowerShell
"""
function Get-Something {
param (
[string[]]$thing
)
foreach ($t in $things){
Write-Host $t
}
}
"""
#region functions
def powershell_python():
print('This is a function')
#return is key for returning values
return
#positional arguments mandatory,
def powershell_python(name, optional=yes):
if optional == 'yes':
print('This uses an optional arguments')
print('This is a function {var1}'.format(var1=name))
return
#endregion | """
function Get-Something {
param (
[string[]]$thing
)
foreach ($t in $things){
Write-Host $t
}
}
"""
def powershell_python():
print('This is a function')
return
def powershell_python(name, optional=yes):
if optional == 'yes':
print('This uses an optional arguments')
print('This is a function {var1}'.format(var1=name))
return |
def calcula_diferenca(A: int, B: int, C: int, D: int):
if (not isinstance(A, int) or
not isinstance(B, int) or
not isinstance(C, int) or
not isinstance(D, int)):
raise(TypeError)
D = A * B - C * D
return f'DIFERENCA = {D}' | def calcula_diferenca(A: int, B: int, C: int, D: int):
if not isinstance(A, int) or not isinstance(B, int) or (not isinstance(C, int)) or (not isinstance(D, int)):
raise TypeError
d = A * B - C * D
return f'DIFERENCA = {D}' |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###################################################
#........../\./\...___......|\.|..../...\.........#
#........./..|..\/\.|.|_|._.|.\|....|.c.|.........#
#......../....../--\|.|.|.|i|..|....\.../.........#
# Mathtin (c) #
###################################################
# Author: Daniel [Mathtin] Shiko #
# Copyright (c) 2020 <wdaniil@mail.ru> #
# This file is released under the MIT license. #
###################################################
__author__ = 'Mathtin'
class InvalidConfigException(Exception):
def __init__(self, msg: str, var_name: str):
super().__init__(f'{msg}, check {var_name} value')
class NotCoroutineException(TypeError):
def __init__(self, func):
super().__init__(f'{str(func)} is not a coroutine function')
class MissingResourceException(Exception):
def __init__(self, xml: str, path: str):
super().__init__(f'Missing resource in {xml}: {path}') | __author__ = 'Mathtin'
class Invalidconfigexception(Exception):
def __init__(self, msg: str, var_name: str):
super().__init__(f'{msg}, check {var_name} value')
class Notcoroutineexception(TypeError):
def __init__(self, func):
super().__init__(f'{str(func)} is not a coroutine function')
class Missingresourceexception(Exception):
def __init__(self, xml: str, path: str):
super().__init__(f'Missing resource in {xml}: {path}') |
#
# LeetCode
# Algorithm 104 Maximum depth of binary tree
#
# See LICENSE
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def traverse(self, root, depth):
"""
:type root: TreeNode
:type depth: int
:rtype: int
"""
if root == None:
return depth
else:
return max( self.traverse(root.left, depth+1), self.traverse(root.right, depth+1))
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.traverse(root, 0)
| class Solution(object):
def traverse(self, root, depth):
"""
:type root: TreeNode
:type depth: int
:rtype: int
"""
if root == None:
return depth
else:
return max(self.traverse(root.left, depth + 1), self.traverse(root.right, depth + 1))
def max_depth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.traverse(root, 0) |
#Question link
#https://practice.geeksforgeeks.org/problems/smallest-subarray-with-sum-greater-than-x/0
def window(arr,n, k):
left=0
right=0
ans=n
sum1=0
while left<n and right<n+1:
if sum1>k:
if left==right:
ans=1
break
ans=min(ans,right-left)
sum1-=arr[left]
left+=1
elif right==n:
break
else:
sum1+=arr[right]
right+=1
return ans
def main():
t = int(input())
for _ in range(t):
n, k = map(int,input().split())
arr = list(map(int,input().split()))
print(window(arr,n,k))
if __name__ == "__main__":
main()
| def window(arr, n, k):
left = 0
right = 0
ans = n
sum1 = 0
while left < n and right < n + 1:
if sum1 > k:
if left == right:
ans = 1
break
ans = min(ans, right - left)
sum1 -= arr[left]
left += 1
elif right == n:
break
else:
sum1 += arr[right]
right += 1
return ans
def main():
t = int(input())
for _ in range(t):
(n, k) = map(int, input().split())
arr = list(map(int, input().split()))
print(window(arr, n, k))
if __name__ == '__main__':
main() |
s = input()
# s = ' name1'
list_stop = [' ', '@', '$', '%']
list_num = '0123456789'
# flag_true = 0
flag_false = 0
for i in list_num:
if s[0] == i:
flag_false += 1
break
for j in s:
for k in list_stop:
if j == k:
flag_false += 1
break
else:
# flag_true += 1
break
if flag_false >= 1:
print(False)
else:
print(True)
| s = input()
list_stop = [' ', '@', '$', '%']
list_num = '0123456789'
flag_false = 0
for i in list_num:
if s[0] == i:
flag_false += 1
break
for j in s:
for k in list_stop:
if j == k:
flag_false += 1
break
else:
break
if flag_false >= 1:
print(False)
else:
print(True) |
#
# @lc app=leetcode id=1232 lang=python3
#
# [1232] Check If It Is a Straight Line
#
# @lc code=start
class Solution:
def checkStraightLine(self, coordinates):
if len(coordinates) <= 2:
return True
x1, x2, y1, y2 = coordinates[0][0], coordinates[1][0], coordinates[0][1], coordinates[1][1]
if x1 == x2:
k = 0
else:
k = (y1 - y2)/(x1 - x2)
b = y1 - k * x1
for item in coordinates[2:]:
if item[1] != item[0] * k + b:
return False
return True
# @lc code=end
| class Solution:
def check_straight_line(self, coordinates):
if len(coordinates) <= 2:
return True
(x1, x2, y1, y2) = (coordinates[0][0], coordinates[1][0], coordinates[0][1], coordinates[1][1])
if x1 == x2:
k = 0
else:
k = (y1 - y2) / (x1 - x2)
b = y1 - k * x1
for item in coordinates[2:]:
if item[1] != item[0] * k + b:
return False
return True |
class YggException(Exception): pass
class LoginFailed(Exception): pass
class TooManyFailedLogins(Exception): pass
| class Yggexception(Exception):
pass
class Loginfailed(Exception):
pass
class Toomanyfailedlogins(Exception):
pass |
class TriggerBase:
def __init__(self, q, events):
self.q = q
self.events = events
def trigger(self, name):
self.q.put(
{'req': 'trigger_animation', 'data': name, 'sender': 'Trigger'})
| class Triggerbase:
def __init__(self, q, events):
self.q = q
self.events = events
def trigger(self, name):
self.q.put({'req': 'trigger_animation', 'data': name, 'sender': 'Trigger'}) |
favcolor = {
"Jacob": "Magenta",
"Jason": "Red",
"Anais": "Purple"
}
for name, color in favcolor.items():
print("%s's favorite color is %s" %(name, color))
| favcolor = {'Jacob': 'Magenta', 'Jason': 'Red', 'Anais': 'Purple'}
for (name, color) in favcolor.items():
print("%s's favorite color is %s" % (name, color)) |
"""Constants for the Vivint integration."""
DOMAIN = "vivint"
EVENT_TYPE = f"{DOMAIN}_event"
RTSP_STREAM_DIRECT = 0
RTSP_STREAM_INTERNAL = 1
RTSP_STREAM_EXTERNAL = 2
RTSP_STREAM_TYPES = {
RTSP_STREAM_DIRECT: "Direct (falls back to internal if direct access is not available)",
RTSP_STREAM_INTERNAL: "Internal",
RTSP_STREAM_EXTERNAL: "External",
}
CONF_HD_STREAM = "hd_stream"
CONF_RTSP_STREAM = "rtsp_stream"
DEFAULT_HD_STREAM = True
DEFAULT_RTSP_STREAM = RTSP_STREAM_DIRECT
| """Constants for the Vivint integration."""
domain = 'vivint'
event_type = f'{DOMAIN}_event'
rtsp_stream_direct = 0
rtsp_stream_internal = 1
rtsp_stream_external = 2
rtsp_stream_types = {RTSP_STREAM_DIRECT: 'Direct (falls back to internal if direct access is not available)', RTSP_STREAM_INTERNAL: 'Internal', RTSP_STREAM_EXTERNAL: 'External'}
conf_hd_stream = 'hd_stream'
conf_rtsp_stream = 'rtsp_stream'
default_hd_stream = True
default_rtsp_stream = RTSP_STREAM_DIRECT |
'''
5. Write a Python program to check whether a specified value is contained in a group of values.
Test Data :
3 -> [1, 5, 8, 3] : True
-1 -> [1, 5, 8, 3] : False
'''
def check_value(group_data, n):
for x in group_data:
if n == x:
return True
else:
return False
print(check_value([1,5,8,3], 3))
print(check_value([1,5,8,3], -1))
| """
5. Write a Python program to check whether a specified value is contained in a group of values.
Test Data :
3 -> [1, 5, 8, 3] : True
-1 -> [1, 5, 8, 3] : False
"""
def check_value(group_data, n):
for x in group_data:
if n == x:
return True
else:
return False
print(check_value([1, 5, 8, 3], 3))
print(check_value([1, 5, 8, 3], -1)) |
# Program corresponding to flowchart in this site https://automatetheboringstuff.com/2e/images/000039.jpg
print('Is raining? (Y)es or (N)o')
answer = input()
if answer == 'N':
print('Go outside.')
elif answer == 'Y':
print('Have umbrella? (Y)es or (N)o')
answer2 = input()
if answer2 == 'Y':
print('Go outside.')
elif answer2 == 'N':
print('Wait a while.')
print('Is raining? (Y)es or (N)o')
answer3 = input()
while answer3 == 'Y':
print('Wait a while.')
print('Is raining? (Y)es or (N)o')
answer3 = input()
print('Go outside.')
else:
print("I can't understand you.Type 'Y' for yes and 'N' or No.")
print('===============')
print('Exiting program')
| print('Is raining? (Y)es or (N)o')
answer = input()
if answer == 'N':
print('Go outside.')
elif answer == 'Y':
print('Have umbrella? (Y)es or (N)o')
answer2 = input()
if answer2 == 'Y':
print('Go outside.')
elif answer2 == 'N':
print('Wait a while.')
print('Is raining? (Y)es or (N)o')
answer3 = input()
while answer3 == 'Y':
print('Wait a while.')
print('Is raining? (Y)es or (N)o')
answer3 = input()
print('Go outside.')
else:
print("I can't understand you.Type 'Y' for yes and 'N' or No.")
print('===============')
print('Exiting program') |
__all__ = ["TreeDict"]
class TreeDict:
"""Converts a nested dict to an object.
Items in the dict are set to object attributes.
ARTIQ python does not support dict type. Inherit this class to convert the dict to an object.
self.value_parser() can be inherited to parse non-dict values.
Args:
dict_value: dict, dictionary to convert to an object.
nested_dict_class: class for nested dicts. Default None, which represents self.__class__.
This can be a different class (usually another class inherited from TreeDict).
"""
def __init__(self, dict_value, nested_dict_class=None):
self._set_attributes(dict_value, nested_dict_class)
def value_parser(self, value):
"""Parser for non-dict values."""
return value
def _set_attributes(self, dict_value, nested_dict_class):
if nested_dict_class is None:
class SubClass(self.__class__):
"""A derived class from the current class.
ARTIQ python does not support nesting a class as an attribute of the same class,
so a derived class from self.__class__ is necessary.
"""
pass
nested_dict_class = SubClass
for item in dict_value:
if isinstance(dict_value[item], dict):
setattr(self, item, nested_dict_class(dict_value[item]))
else:
setattr(self, item, self.value_parser(dict_value[item]))
| __all__ = ['TreeDict']
class Treedict:
"""Converts a nested dict to an object.
Items in the dict are set to object attributes.
ARTIQ python does not support dict type. Inherit this class to convert the dict to an object.
self.value_parser() can be inherited to parse non-dict values.
Args:
dict_value: dict, dictionary to convert to an object.
nested_dict_class: class for nested dicts. Default None, which represents self.__class__.
This can be a different class (usually another class inherited from TreeDict).
"""
def __init__(self, dict_value, nested_dict_class=None):
self._set_attributes(dict_value, nested_dict_class)
def value_parser(self, value):
"""Parser for non-dict values."""
return value
def _set_attributes(self, dict_value, nested_dict_class):
if nested_dict_class is None:
class Subclass(self.__class__):
"""A derived class from the current class.
ARTIQ python does not support nesting a class as an attribute of the same class,
so a derived class from self.__class__ is necessary.
"""
pass
nested_dict_class = SubClass
for item in dict_value:
if isinstance(dict_value[item], dict):
setattr(self, item, nested_dict_class(dict_value[item]))
else:
setattr(self, item, self.value_parser(dict_value[item])) |
first_name = input()
second_name = input()
delimeter = input()
print(f"{first_name}{delimeter}{second_name}")
| first_name = input()
second_name = input()
delimeter = input()
print(f'{first_name}{delimeter}{second_name}') |
#
# PySNMP MIB module CISCO-ITP-RT-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-ITP-RT-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 12:03:41 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
NotificationGroup, ModuleCompliance, AgentCapabilities = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "AgentCapabilities")
Bits, Counter32, NotificationType, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Integer32, Gauge32, Unsigned32, MibIdentifier, TimeTicks, iso, IpAddress, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter32", "NotificationType", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Integer32", "Gauge32", "Unsigned32", "MibIdentifier", "TimeTicks", "iso", "IpAddress", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoItpRtCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 216))
ciscoItpRtCapability.setRevisions(('2002-01-21 00:00', '2001-10-24 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoItpRtCapability.setRevisionsDescriptions(('Updated capabilities MIB as required for new groups. cItpRtNotificationsGroup, cItpRtScalarGroupRev1', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoItpRtCapability.setLastUpdated('200201210000Z')
if mibBuilder.loadTexts: ciscoItpRtCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoItpRtCapability.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-ss7@cisco.com')
if mibBuilder.loadTexts: ciscoItpRtCapability.setDescription('Agent capabilities for the CISCO-ITP-RT-MIB.')
ciscoItpRtCapabilityV12R024MB1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 216, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoItpRtCapabilityV12R024MB1 = ciscoItpRtCapabilityV12R024MB1.setProductRelease('Cisco IOS 12.2(4)MB1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoItpRtCapabilityV12R024MB1 = ciscoItpRtCapabilityV12R024MB1.setStatus('current')
if mibBuilder.loadTexts: ciscoItpRtCapabilityV12R024MB1.setDescription('IOS 12.2(4)MB1 Cisco CISCO-ITP-RT-MIB.my User Agent MIB capabilities.')
ciscoItpRtCapabilityV12R0204MB3 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 216, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoItpRtCapabilityV12R0204MB3 = ciscoItpRtCapabilityV12R0204MB3.setProductRelease('Cisco IOS 12.2(4)MB3')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoItpRtCapabilityV12R0204MB3 = ciscoItpRtCapabilityV12R0204MB3.setStatus('current')
if mibBuilder.loadTexts: ciscoItpRtCapabilityV12R0204MB3.setDescription('IOS 12.2(4)MB3 Cisco CISCO-ITP-RT-MIB.my User Agent MIB capabilities.')
mibBuilder.exportSymbols("CISCO-ITP-RT-CAPABILITY", ciscoItpRtCapabilityV12R024MB1=ciscoItpRtCapabilityV12R024MB1, ciscoItpRtCapabilityV12R0204MB3=ciscoItpRtCapabilityV12R0204MB3, PYSNMP_MODULE_ID=ciscoItpRtCapability, ciscoItpRtCapability=ciscoItpRtCapability)
| (integer, object_identifier, octet_string) = mibBuilder.importSymbols('ASN1', 'Integer', 'ObjectIdentifier', 'OctetString')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_union, value_size_constraint, single_value_constraint, constraints_intersection, value_range_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsUnion', 'ValueSizeConstraint', 'SingleValueConstraint', 'ConstraintsIntersection', 'ValueRangeConstraint')
(cisco_agent_capability,) = mibBuilder.importSymbols('CISCO-SMI', 'ciscoAgentCapability')
(notification_group, module_compliance, agent_capabilities) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ModuleCompliance', 'AgentCapabilities')
(bits, counter32, notification_type, counter64, mib_scalar, mib_table, mib_table_row, mib_table_column, object_identity, integer32, gauge32, unsigned32, mib_identifier, time_ticks, iso, ip_address, module_identity) = mibBuilder.importSymbols('SNMPv2-SMI', 'Bits', 'Counter32', 'NotificationType', 'Counter64', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'ObjectIdentity', 'Integer32', 'Gauge32', 'Unsigned32', 'MibIdentifier', 'TimeTicks', 'iso', 'IpAddress', 'ModuleIdentity')
(display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention')
cisco_itp_rt_capability = module_identity((1, 3, 6, 1, 4, 1, 9, 7, 216))
ciscoItpRtCapability.setRevisions(('2002-01-21 00:00', '2001-10-24 00:00'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
ciscoItpRtCapability.setRevisionsDescriptions(('Updated capabilities MIB as required for new groups. cItpRtNotificationsGroup, cItpRtScalarGroupRev1', 'Initial version of this MIB module.'))
if mibBuilder.loadTexts:
ciscoItpRtCapability.setLastUpdated('200201210000Z')
if mibBuilder.loadTexts:
ciscoItpRtCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts:
ciscoItpRtCapability.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-ss7@cisco.com')
if mibBuilder.loadTexts:
ciscoItpRtCapability.setDescription('Agent capabilities for the CISCO-ITP-RT-MIB.')
cisco_itp_rt_capability_v12_r024_mb1 = agent_capabilities((1, 3, 6, 1, 4, 1, 9, 7, 216, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cisco_itp_rt_capability_v12_r024_mb1 = ciscoItpRtCapabilityV12R024MB1.setProductRelease('Cisco IOS 12.2(4)MB1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cisco_itp_rt_capability_v12_r024_mb1 = ciscoItpRtCapabilityV12R024MB1.setStatus('current')
if mibBuilder.loadTexts:
ciscoItpRtCapabilityV12R024MB1.setDescription('IOS 12.2(4)MB1 Cisco CISCO-ITP-RT-MIB.my User Agent MIB capabilities.')
cisco_itp_rt_capability_v12_r0204_mb3 = agent_capabilities((1, 3, 6, 1, 4, 1, 9, 7, 216, 2))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cisco_itp_rt_capability_v12_r0204_mb3 = ciscoItpRtCapabilityV12R0204MB3.setProductRelease('Cisco IOS 12.2(4)MB3')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cisco_itp_rt_capability_v12_r0204_mb3 = ciscoItpRtCapabilityV12R0204MB3.setStatus('current')
if mibBuilder.loadTexts:
ciscoItpRtCapabilityV12R0204MB3.setDescription('IOS 12.2(4)MB3 Cisco CISCO-ITP-RT-MIB.my User Agent MIB capabilities.')
mibBuilder.exportSymbols('CISCO-ITP-RT-CAPABILITY', ciscoItpRtCapabilityV12R024MB1=ciscoItpRtCapabilityV12R024MB1, ciscoItpRtCapabilityV12R0204MB3=ciscoItpRtCapabilityV12R0204MB3, PYSNMP_MODULE_ID=ciscoItpRtCapability, ciscoItpRtCapability=ciscoItpRtCapability) |
# -*- coding: utf-8 -*-
qntCaso = int(input())
for caso in range(qntCaso):
listStrTamanhoStr = list()
listStr = list(map(str, input().split()))
for indiceStr in range(len(listStr)): listStrTamanhoStr.append([listStr[indiceStr], len(listStr[indiceStr])])
strSequenciaOrdenadaTamanho = ""
for chave, valor in sorted(listStrTamanhoStr, key=lambda x: x[1],reverse=True): strSequenciaOrdenadaTamanho += "{} ".format(chave)
print(strSequenciaOrdenadaTamanho.strip()) | qnt_caso = int(input())
for caso in range(qntCaso):
list_str_tamanho_str = list()
list_str = list(map(str, input().split()))
for indice_str in range(len(listStr)):
listStrTamanhoStr.append([listStr[indiceStr], len(listStr[indiceStr])])
str_sequencia_ordenada_tamanho = ''
for (chave, valor) in sorted(listStrTamanhoStr, key=lambda x: x[1], reverse=True):
str_sequencia_ordenada_tamanho += '{} '.format(chave)
print(strSequenciaOrdenadaTamanho.strip()) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Chirstoph Reimers'
__email__ = 'creimers@byteyard.de'
__version__ = '0.1.0.b6'
| __author__ = 'Chirstoph Reimers'
__email__ = 'creimers@byteyard.de'
__version__ = '0.1.0.b6' |
#square pattern
'''
Print the following pattern for the given N number of rows.
Pattern for N = 4
4444
4444
4444
4444
'''
rows=int(input())
for i in range(rows):
for j in range(rows):
print(rows,end="")
print()
| """
Print the following pattern for the given N number of rows.
Pattern for N = 4
4444
4444
4444
4444
"""
rows = int(input())
for i in range(rows):
for j in range(rows):
print(rows, end='')
print() |
expected_output = {
"vrf": {
"default": {
"address_family": {
"ipv4": {
"instance": {
"10000": {
"summary_traffic_statistics": {
"ospf_packets_received_sent": {
"type": {
"rx_invalid": {"packets": 0, "bytes": 0},
"rx_hello": {"packets": 0, "bytes": 0},
"rx_db_des": {"packets": 0, "bytes": 0},
"rx_ls_req": {"packets": 0, "bytes": 0},
"rx_ls_upd": {"packets": 0, "bytes": 0},
"rx_ls_ack": {"packets": 0, "bytes": 0},
"rx_total": {"packets": 0, "bytes": 0},
"tx_failed": {"packets": 0, "bytes": 0},
"tx_hello": {"packets": 0, "bytes": 0},
"tx_db_des": {"packets": 0, "bytes": 0},
"tx_ls_req": {"packets": 0, "bytes": 0},
"tx_ls_upd": {"packets": 0, "bytes": 0},
"tx_ls_ack": {"packets": 0, "bytes": 0},
"tx_total": {"packets": 0, "bytes": 0},
}
},
"ospf_header_errors": {
"length": 0,
"instance_id": 0,
"checksum": 0,
"auth_type": 0,
"version": 0,
"bad_source": 0,
"no_virtual_link": 0,
"area_mismatch": 0,
"no_sham_link": 0,
"self_originated": 0,
"duplicate_id": 0,
"hello": 0,
"mtu_mismatch": 0,
"nbr_ignored": 0,
"lls": 0,
"unknown_neighbor": 0,
"authentication": 0,
"ttl_check_fail": 0,
"adjacency_throttle": 0,
"bfd": 0,
"test_discard": 0,
},
"ospf_lsa_errors": {
"type": 0,
"length": 0,
"data": 0,
"checksum": 0,
},
}
},
"888": {
"router_id": "10.19.13.14",
"ospf_queue_statistics": {
"limit": {"inputq": 0, "updateq": 200, "outputq": 0},
"drops": {"inputq": 0, "updateq": 0, "outputq": 0},
"max_delay_msec": {
"inputq": 3,
"updateq": 2,
"outputq": 1,
},
"max_size": {
"total": {"inputq": 4, "updateq": 3, "outputq": 2},
"invalid": {
"inputq": 0,
"updateq": 0,
"outputq": 0,
},
"hello": {"inputq": 4, "updateq": 0, "outputq": 1},
"db_des": {"inputq": 0, "updateq": 0, "outputq": 1},
"ls_req": {"inputq": 0, "updateq": 0, "outputq": 0},
"ls_upd": {"inputq": 0, "updateq": 3, "outputq": 0},
"ls_ack": {"inputq": 0, "updateq": 0, "outputq": 0},
},
"current_size": {
"total": {"inputq": 0, "updateq": 0, "outputq": 0},
"invalid": {
"inputq": 0,
"updateq": 0,
"outputq": 0,
},
"hello": {"inputq": 0, "updateq": 0, "outputq": 0},
"db_des": {"inputq": 0, "updateq": 0, "outputq": 0},
"ls_req": {"inputq": 0, "updateq": 0, "outputq": 0},
"ls_upd": {"inputq": 0, "updateq": 0, "outputq": 0},
"ls_ack": {"inputq": 0, "updateq": 0, "outputq": 0},
},
},
"interface_statistics": {
"interfaces": {
"Tunnel65541": {
"last_clear_traffic_counters": "never",
"ospf_packets_received_sent": {
"type": {
"rx_invalid": {
"packets": 0,
"bytes": 0,
},
"rx_hello": {"packets": 0, "bytes": 0},
"rx_db_des": {"packets": 0, "bytes": 0},
"rx_ls_req": {"packets": 0, "bytes": 0},
"rx_ls_upd": {"packets": 0, "bytes": 0},
"rx_ls_ack": {"packets": 0, "bytes": 0},
"rx_total": {"packets": 0, "bytes": 0},
"tx_failed": {"packets": 0, "bytes": 0},
"tx_hello": {
"packets": 62301,
"bytes": 5980896,
},
"tx_db_des": {"packets": 0, "bytes": 0},
"tx_ls_req": {"packets": 0, "bytes": 0},
"tx_ls_upd": {"packets": 0, "bytes": 0},
"tx_ls_ack": {"packets": 0, "bytes": 0},
"tx_total": {
"packets": 62301,
"bytes": 5980896,
},
}
},
"ospf_header_errors": {
"length": 0,
"instance_id": 0,
"checksum": 0,
"auth_type": 0,
"version": 0,
"bad_source": 0,
"no_virtual_link": 0,
"area_mismatch": 0,
"no_sham_link": 0,
"self_originated": 0,
"duplicate_id": 0,
"hello": 0,
"mtu_mismatch": 0,
"nbr_ignored": 0,
"lls": 0,
"unknown_neighbor": 0,
"authentication": 0,
"ttl_check_fail": 0,
"adjacency_throttle": 0,
"bfd": 0,
"test_discard": 0,
},
"ospf_lsa_errors": {
"type": 0,
"length": 0,
"data": 0,
"checksum": 0,
},
},
"GigabitEthernet0/1/7": {
"last_clear_traffic_counters": "never",
"ospf_packets_received_sent": {
"type": {
"rx_invalid": {
"packets": 0,
"bytes": 0,
},
"rx_hello": {
"packets": 70493,
"bytes": 3383664,
},
"rx_db_des": {
"packets": 3,
"bytes": 1676,
},
"rx_ls_req": {
"packets": 1,
"bytes": 36,
},
"rx_ls_upd": {
"packets": 14963,
"bytes": 1870388,
},
"rx_ls_ack": {
"packets": 880,
"bytes": 76140,
},
"rx_total": {
"packets": 86340,
"bytes": 5331904,
},
"tx_failed": {"packets": 0, "bytes": 0},
"tx_hello": {
"packets": 1,
"bytes": 100,
},
"tx_db_des": {
"packets": 4,
"bytes": 416,
},
"tx_ls_req": {
"packets": 1,
"bytes": 968,
},
"tx_ls_upd": {
"packets": 1,
"bytes": 108,
},
"tx_ls_ack": {
"packets": 134,
"bytes": 9456,
},
"tx_total": {
"packets": 141,
"bytes": 11048,
},
}
},
"ospf_header_errors": {
"length": 0,
"instance_id": 0,
"checksum": 0,
"auth_type": 0,
"version": 0,
"bad_source": 0,
"no_virtual_link": 0,
"area_mismatch": 0,
"no_sham_link": 0,
"self_originated": 0,
"duplicate_id": 0,
"hello": 0,
"mtu_mismatch": 0,
"nbr_ignored": 0,
"lls": 0,
"unknown_neighbor": 0,
"authentication": 0,
"ttl_check_fail": 0,
"adjacency_throttle": 0,
"bfd": 0,
"test_discard": 0,
},
"ospf_lsa_errors": {
"type": 0,
"length": 0,
"data": 0,
"checksum": 0,
},
},
"GigabitEthernet0/1/6": {
"last_clear_traffic_counters": "never",
"ospf_packets_received_sent": {
"type": {
"rx_invalid": {
"packets": 0,
"bytes": 0,
},
"rx_hello": {
"packets": 70504,
"bytes": 3384192,
},
"rx_db_des": {
"packets": 3,
"bytes": 1676,
},
"rx_ls_req": {
"packets": 1,
"bytes": 36,
},
"rx_ls_upd": {
"packets": 14809,
"bytes": 1866264,
},
"rx_ls_ack": {
"packets": 877,
"bytes": 76028,
},
"rx_total": {
"packets": 86194,
"bytes": 5328196,
},
"tx_failed": {"packets": 0, "bytes": 0},
"tx_hello": {
"packets": 1,
"bytes": 100,
},
"tx_db_des": {
"packets": 4,
"bytes": 416,
},
"tx_ls_req": {
"packets": 1,
"bytes": 968,
},
"tx_ls_upd": {
"packets": 1,
"bytes": 108,
},
"tx_ls_ack": {
"packets": 117,
"bytes": 8668,
},
"tx_total": {
"packets": 124,
"bytes": 10260,
},
}
},
"ospf_header_errors": {
"length": 0,
"instance_id": 0,
"checksum": 0,
"auth_type": 0,
"version": 0,
"bad_source": 0,
"no_virtual_link": 0,
"area_mismatch": 0,
"no_sham_link": 0,
"self_originated": 0,
"duplicate_id": 0,
"hello": 0,
"mtu_mismatch": 0,
"nbr_ignored": 0,
"lls": 0,
"unknown_neighbor": 0,
"authentication": 0,
"ttl_check_fail": 0,
"adjacency_throttle": 0,
"bfd": 0,
"test_discard": 0,
},
"ospf_lsa_errors": {
"type": 0,
"length": 0,
"data": 0,
"checksum": 0,
},
},
}
},
"summary_traffic_statistics": {
"ospf_packets_received_sent": {
"type": {
"rx_invalid": {"packets": 0, "bytes": 0},
"rx_hello": {
"packets": 159187,
"bytes": 7640968,
},
"rx_db_des": {
"packets": 10240,
"bytes": 337720,
},
"rx_ls_req": {"packets": 5, "bytes": 216},
"rx_ls_upd": {
"packets": 31899,
"bytes": 4010656,
},
"rx_ls_ack": {"packets": 2511, "bytes": 201204},
"rx_total": {
"packets": 203842,
"bytes": 12190764,
},
"tx_failed": {"packets": 0, "bytes": 0},
"tx_hello": {
"packets": 208493,
"bytes": 20592264,
},
"tx_db_des": {
"packets": 10540,
"bytes": 15808320,
},
"tx_ls_req": {"packets": 5, "bytes": 3112},
"tx_ls_upd": {
"packets": 33998,
"bytes": 5309252,
},
"tx_ls_ack": {
"packets": 17571,
"bytes": 1220144,
},
"tx_total": {
"packets": 270607,
"bytes": 42933092,
},
}
},
"ospf_header_errors": {
"length": 0,
"instance_id": 0,
"checksum": 0,
"auth_type": 0,
"version": 0,
"bad_source": 0,
"no_virtual_link": 0,
"area_mismatch": 0,
"no_sham_link": 0,
"self_originated": 0,
"duplicate_id": 0,
"hello": 0,
"mtu_mismatch": 0,
"nbr_ignored": 2682,
"lls": 0,
"unknown_neighbor": 0,
"authentication": 0,
"ttl_check_fail": 0,
"adjacency_throttle": 0,
"bfd": 0,
"test_discard": 0,
},
"ospf_lsa_errors": {
"type": 0,
"length": 0,
"data": 0,
"checksum": 0,
},
},
},
}
}
}
}
},
"ospf_statistics": {
"last_clear_traffic_counters": "never",
"rcvd": {
"total": 204136,
"checksum_errors": 0,
"hello": 159184,
"database_desc": 10240,
"link_state_req": 5,
"link_state_updates": 31899,
"link_state_acks": 2511,
},
"sent": {
"total": 281838,
"hello": 219736,
"database_desc": 10540,
"link_state_req": 5,
"link_state_updates": 33998,
"link_state_acks": 17571,
},
},
}
| expected_output = {'vrf': {'default': {'address_family': {'ipv4': {'instance': {'10000': {'summary_traffic_statistics': {'ospf_packets_received_sent': {'type': {'rx_invalid': {'packets': 0, 'bytes': 0}, 'rx_hello': {'packets': 0, 'bytes': 0}, 'rx_db_des': {'packets': 0, 'bytes': 0}, 'rx_ls_req': {'packets': 0, 'bytes': 0}, 'rx_ls_upd': {'packets': 0, 'bytes': 0}, 'rx_ls_ack': {'packets': 0, 'bytes': 0}, 'rx_total': {'packets': 0, 'bytes': 0}, 'tx_failed': {'packets': 0, 'bytes': 0}, 'tx_hello': {'packets': 0, 'bytes': 0}, 'tx_db_des': {'packets': 0, 'bytes': 0}, 'tx_ls_req': {'packets': 0, 'bytes': 0}, 'tx_ls_upd': {'packets': 0, 'bytes': 0}, 'tx_ls_ack': {'packets': 0, 'bytes': 0}, 'tx_total': {'packets': 0, 'bytes': 0}}}, 'ospf_header_errors': {'length': 0, 'instance_id': 0, 'checksum': 0, 'auth_type': 0, 'version': 0, 'bad_source': 0, 'no_virtual_link': 0, 'area_mismatch': 0, 'no_sham_link': 0, 'self_originated': 0, 'duplicate_id': 0, 'hello': 0, 'mtu_mismatch': 0, 'nbr_ignored': 0, 'lls': 0, 'unknown_neighbor': 0, 'authentication': 0, 'ttl_check_fail': 0, 'adjacency_throttle': 0, 'bfd': 0, 'test_discard': 0}, 'ospf_lsa_errors': {'type': 0, 'length': 0, 'data': 0, 'checksum': 0}}}, '888': {'router_id': '10.19.13.14', 'ospf_queue_statistics': {'limit': {'inputq': 0, 'updateq': 200, 'outputq': 0}, 'drops': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'max_delay_msec': {'inputq': 3, 'updateq': 2, 'outputq': 1}, 'max_size': {'total': {'inputq': 4, 'updateq': 3, 'outputq': 2}, 'invalid': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'hello': {'inputq': 4, 'updateq': 0, 'outputq': 1}, 'db_des': {'inputq': 0, 'updateq': 0, 'outputq': 1}, 'ls_req': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'ls_upd': {'inputq': 0, 'updateq': 3, 'outputq': 0}, 'ls_ack': {'inputq': 0, 'updateq': 0, 'outputq': 0}}, 'current_size': {'total': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'invalid': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'hello': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'db_des': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'ls_req': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'ls_upd': {'inputq': 0, 'updateq': 0, 'outputq': 0}, 'ls_ack': {'inputq': 0, 'updateq': 0, 'outputq': 0}}}, 'interface_statistics': {'interfaces': {'Tunnel65541': {'last_clear_traffic_counters': 'never', 'ospf_packets_received_sent': {'type': {'rx_invalid': {'packets': 0, 'bytes': 0}, 'rx_hello': {'packets': 0, 'bytes': 0}, 'rx_db_des': {'packets': 0, 'bytes': 0}, 'rx_ls_req': {'packets': 0, 'bytes': 0}, 'rx_ls_upd': {'packets': 0, 'bytes': 0}, 'rx_ls_ack': {'packets': 0, 'bytes': 0}, 'rx_total': {'packets': 0, 'bytes': 0}, 'tx_failed': {'packets': 0, 'bytes': 0}, 'tx_hello': {'packets': 62301, 'bytes': 5980896}, 'tx_db_des': {'packets': 0, 'bytes': 0}, 'tx_ls_req': {'packets': 0, 'bytes': 0}, 'tx_ls_upd': {'packets': 0, 'bytes': 0}, 'tx_ls_ack': {'packets': 0, 'bytes': 0}, 'tx_total': {'packets': 62301, 'bytes': 5980896}}}, 'ospf_header_errors': {'length': 0, 'instance_id': 0, 'checksum': 0, 'auth_type': 0, 'version': 0, 'bad_source': 0, 'no_virtual_link': 0, 'area_mismatch': 0, 'no_sham_link': 0, 'self_originated': 0, 'duplicate_id': 0, 'hello': 0, 'mtu_mismatch': 0, 'nbr_ignored': 0, 'lls': 0, 'unknown_neighbor': 0, 'authentication': 0, 'ttl_check_fail': 0, 'adjacency_throttle': 0, 'bfd': 0, 'test_discard': 0}, 'ospf_lsa_errors': {'type': 0, 'length': 0, 'data': 0, 'checksum': 0}}, 'GigabitEthernet0/1/7': {'last_clear_traffic_counters': 'never', 'ospf_packets_received_sent': {'type': {'rx_invalid': {'packets': 0, 'bytes': 0}, 'rx_hello': {'packets': 70493, 'bytes': 3383664}, 'rx_db_des': {'packets': 3, 'bytes': 1676}, 'rx_ls_req': {'packets': 1, 'bytes': 36}, 'rx_ls_upd': {'packets': 14963, 'bytes': 1870388}, 'rx_ls_ack': {'packets': 880, 'bytes': 76140}, 'rx_total': {'packets': 86340, 'bytes': 5331904}, 'tx_failed': {'packets': 0, 'bytes': 0}, 'tx_hello': {'packets': 1, 'bytes': 100}, 'tx_db_des': {'packets': 4, 'bytes': 416}, 'tx_ls_req': {'packets': 1, 'bytes': 968}, 'tx_ls_upd': {'packets': 1, 'bytes': 108}, 'tx_ls_ack': {'packets': 134, 'bytes': 9456}, 'tx_total': {'packets': 141, 'bytes': 11048}}}, 'ospf_header_errors': {'length': 0, 'instance_id': 0, 'checksum': 0, 'auth_type': 0, 'version': 0, 'bad_source': 0, 'no_virtual_link': 0, 'area_mismatch': 0, 'no_sham_link': 0, 'self_originated': 0, 'duplicate_id': 0, 'hello': 0, 'mtu_mismatch': 0, 'nbr_ignored': 0, 'lls': 0, 'unknown_neighbor': 0, 'authentication': 0, 'ttl_check_fail': 0, 'adjacency_throttle': 0, 'bfd': 0, 'test_discard': 0}, 'ospf_lsa_errors': {'type': 0, 'length': 0, 'data': 0, 'checksum': 0}}, 'GigabitEthernet0/1/6': {'last_clear_traffic_counters': 'never', 'ospf_packets_received_sent': {'type': {'rx_invalid': {'packets': 0, 'bytes': 0}, 'rx_hello': {'packets': 70504, 'bytes': 3384192}, 'rx_db_des': {'packets': 3, 'bytes': 1676}, 'rx_ls_req': {'packets': 1, 'bytes': 36}, 'rx_ls_upd': {'packets': 14809, 'bytes': 1866264}, 'rx_ls_ack': {'packets': 877, 'bytes': 76028}, 'rx_total': {'packets': 86194, 'bytes': 5328196}, 'tx_failed': {'packets': 0, 'bytes': 0}, 'tx_hello': {'packets': 1, 'bytes': 100}, 'tx_db_des': {'packets': 4, 'bytes': 416}, 'tx_ls_req': {'packets': 1, 'bytes': 968}, 'tx_ls_upd': {'packets': 1, 'bytes': 108}, 'tx_ls_ack': {'packets': 117, 'bytes': 8668}, 'tx_total': {'packets': 124, 'bytes': 10260}}}, 'ospf_header_errors': {'length': 0, 'instance_id': 0, 'checksum': 0, 'auth_type': 0, 'version': 0, 'bad_source': 0, 'no_virtual_link': 0, 'area_mismatch': 0, 'no_sham_link': 0, 'self_originated': 0, 'duplicate_id': 0, 'hello': 0, 'mtu_mismatch': 0, 'nbr_ignored': 0, 'lls': 0, 'unknown_neighbor': 0, 'authentication': 0, 'ttl_check_fail': 0, 'adjacency_throttle': 0, 'bfd': 0, 'test_discard': 0}, 'ospf_lsa_errors': {'type': 0, 'length': 0, 'data': 0, 'checksum': 0}}}}, 'summary_traffic_statistics': {'ospf_packets_received_sent': {'type': {'rx_invalid': {'packets': 0, 'bytes': 0}, 'rx_hello': {'packets': 159187, 'bytes': 7640968}, 'rx_db_des': {'packets': 10240, 'bytes': 337720}, 'rx_ls_req': {'packets': 5, 'bytes': 216}, 'rx_ls_upd': {'packets': 31899, 'bytes': 4010656}, 'rx_ls_ack': {'packets': 2511, 'bytes': 201204}, 'rx_total': {'packets': 203842, 'bytes': 12190764}, 'tx_failed': {'packets': 0, 'bytes': 0}, 'tx_hello': {'packets': 208493, 'bytes': 20592264}, 'tx_db_des': {'packets': 10540, 'bytes': 15808320}, 'tx_ls_req': {'packets': 5, 'bytes': 3112}, 'tx_ls_upd': {'packets': 33998, 'bytes': 5309252}, 'tx_ls_ack': {'packets': 17571, 'bytes': 1220144}, 'tx_total': {'packets': 270607, 'bytes': 42933092}}}, 'ospf_header_errors': {'length': 0, 'instance_id': 0, 'checksum': 0, 'auth_type': 0, 'version': 0, 'bad_source': 0, 'no_virtual_link': 0, 'area_mismatch': 0, 'no_sham_link': 0, 'self_originated': 0, 'duplicate_id': 0, 'hello': 0, 'mtu_mismatch': 0, 'nbr_ignored': 2682, 'lls': 0, 'unknown_neighbor': 0, 'authentication': 0, 'ttl_check_fail': 0, 'adjacency_throttle': 0, 'bfd': 0, 'test_discard': 0}, 'ospf_lsa_errors': {'type': 0, 'length': 0, 'data': 0, 'checksum': 0}}}}}}}}, 'ospf_statistics': {'last_clear_traffic_counters': 'never', 'rcvd': {'total': 204136, 'checksum_errors': 0, 'hello': 159184, 'database_desc': 10240, 'link_state_req': 5, 'link_state_updates': 31899, 'link_state_acks': 2511}, 'sent': {'total': 281838, 'hello': 219736, 'database_desc': 10540, 'link_state_req': 5, 'link_state_updates': 33998, 'link_state_acks': 17571}}} |
# Write your solutions for 1.5 here!
class superheroes:
def __int__(self, name, superpower, strength):
self.name=name
self.superpower=superpower
self.strength=strength
def print_me(self):
print(self.name +str( self.strength))
superhero = superheroes("tamara","fly", 10)
superhero.print_me()
| class Superheroes:
def __int__(self, name, superpower, strength):
self.name = name
self.superpower = superpower
self.strength = strength
def print_me(self):
print(self.name + str(self.strength))
superhero = superheroes('tamara', 'fly', 10)
superhero.print_me() |
'''
There are N children standing in a line. Each child is assigned a rating value.
You are giving candies to these children subjected to the following requirements:
Each child must have at least one candy.
Children with a higher rating get more candies than their neighbors.
What is the minimum candies you must give?
Example 1:
Input: [1,0,2]
Output: 5
Explanation: You can allocate to the first, second and third child with 2, 1, 2 candies respectively.
Example 2:
Input: [1,2,2]
Output: 4
Explanation: You can allocate to the first, second and third child with 1, 2, 1 candies respectively.
The third child gets 1 candy because it satisfies the above two conditions.
'''
class Solution(object):
def candy(self, ratings):
"""
:type ratings: List[int]
:rtype: int
"""
res = [1 for i in xrange(len(ratings))]
h = []
for i in xrange(len(ratings)):
heapq.heappush(h, (ratings[i], i))
while h:
v, i = heapq.heappop(h)
if 0 <= i-1:
if ratings[i-1] < ratings[i]:
res[i] = max(res[i], res[i-1]+1)
if i+1 < len(ratings):
if ratings[i] > ratings[i+1]:
res[i] = max(res[i], res[i+1]+1)
return sum(res)
| """
There are N children standing in a line. Each child is assigned a rating value.
You are giving candies to these children subjected to the following requirements:
Each child must have at least one candy.
Children with a higher rating get more candies than their neighbors.
What is the minimum candies you must give?
Example 1:
Input: [1,0,2]
Output: 5
Explanation: You can allocate to the first, second and third child with 2, 1, 2 candies respectively.
Example 2:
Input: [1,2,2]
Output: 4
Explanation: You can allocate to the first, second and third child with 1, 2, 1 candies respectively.
The third child gets 1 candy because it satisfies the above two conditions.
"""
class Solution(object):
def candy(self, ratings):
"""
:type ratings: List[int]
:rtype: int
"""
res = [1 for i in xrange(len(ratings))]
h = []
for i in xrange(len(ratings)):
heapq.heappush(h, (ratings[i], i))
while h:
(v, i) = heapq.heappop(h)
if 0 <= i - 1:
if ratings[i - 1] < ratings[i]:
res[i] = max(res[i], res[i - 1] + 1)
if i + 1 < len(ratings):
if ratings[i] > ratings[i + 1]:
res[i] = max(res[i], res[i + 1] + 1)
return sum(res) |
N = int(input())
A, B, C = input(), input(), input()
ans = 0
for i in range(N):
abc = A[i], B[i], C[i]
ans += len(set(abc)) - 1
print(ans)
| n = int(input())
(a, b, c) = (input(), input(), input())
ans = 0
for i in range(N):
abc = (A[i], B[i], C[i])
ans += len(set(abc)) - 1
print(ans) |
contador = 0
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 1
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 2
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 3
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 4
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 5
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 6
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 7
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 8
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
| contador = 0
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 1
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 2
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 3
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 4
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 5
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 6
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 7
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador))
contador = 8
print('2 elevado a ' + str(contador) + ' es igual a: ' + str(2 ** contador)) |
class Number:
def __init__(self):
self.num = 0
def setNum(self, x):
self.num = x
# na= Number()
# na.setNum(3)
# print(hasattr(na, 'id'))
a = ABCDEFGHIJKLMNOPQRSTUVWXYZ
b = BLUESKYACDFGHIJMNOPQRTVWXZ
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return (self.x, self.y)
def __add__(self, p2):
return 4
p1 = Point(1, 2)
p2 = Point(2, 3)
print(p1+p2)
| class Number:
def __init__(self):
self.num = 0
def set_num(self, x):
self.num = x
a = ABCDEFGHIJKLMNOPQRSTUVWXYZ
b = BLUESKYACDFGHIJMNOPQRTVWXZ
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return (self.x, self.y)
def __add__(self, p2):
return 4
p1 = point(1, 2)
p2 = point(2, 3)
print(p1 + p2) |
n = int(input())
ans = 0
for i in range(n):
a, b = map(int, input().split())
ans += (a + b) * (b - a + 1) // 2
print(ans) | n = int(input())
ans = 0
for i in range(n):
(a, b) = map(int, input().split())
ans += (a + b) * (b - a + 1) // 2
print(ans) |
def is_leap(year):
leap = False
# Write your logic here
if (year%400) == 0:
leap = True
elif (year%100) == 0:
leap = False
elif (year%4) == 0:
leap = True
return leap | def is_leap(year):
leap = False
if year % 400 == 0:
leap = True
elif year % 100 == 0:
leap = False
elif year % 4 == 0:
leap = True
return leap |
__version__ = '2.0.0'
print("*"*35)
print(f'SpotifyToVKStatus. Version: {__version__}')
print("*"*35)
| __version__ = '2.0.0'
print('*' * 35)
print(f'SpotifyToVKStatus. Version: {__version__}')
print('*' * 35) |
file_name = input('Enter file name: ')
if file_name == 'na na boo boo':
print("NA NA BOO BOO TO YOU - You have been punk'd!")
exit()
else:
try:
file = open(file_name)
except:
print('File cannot be opened')
exit()
count = 0
numbers = 0
average = 0
for line in file:
if line.startswith('X-DSPAM-Confidence'):
colon_position = line.find(':')
numbers = numbers + float(line[colon_position+1:])
count = count + 1
if count != 0:
average = numbers / count
print(average) | file_name = input('Enter file name: ')
if file_name == 'na na boo boo':
print("NA NA BOO BOO TO YOU - You have been punk'd!")
exit()
else:
try:
file = open(file_name)
except:
print('File cannot be opened')
exit()
count = 0
numbers = 0
average = 0
for line in file:
if line.startswith('X-DSPAM-Confidence'):
colon_position = line.find(':')
numbers = numbers + float(line[colon_position + 1:])
count = count + 1
if count != 0:
average = numbers / count
print(average) |
""" Module docstring """
def _write_file_impl(ctx):
f = ctx.actions.declare_file("out.txt")
ctx.actions.write(f, "contents")
def _source_list_rule_impl(ctx):
if len(ctx.attr.srcs) != 2:
fail("Expected two sources")
first = ctx.attr.srcs[0].short_path.replace("\\", "/")
second = ctx.attr.srcs[1].short_path.replace("\\", "/")
expected_first = "src.txt"
expected_second = "file__/out.txt"
if first != expected_first:
fail("Expected short path {}, got {}".format(expected_first, first))
if second != expected_second:
fail("Expected short path {}, got {}".format(expected_second, second))
f = ctx.actions.declare_file("out2.txt")
ctx.actions.write(f, "contents2")
write_file = rule(
attrs = {},
implementation = _write_file_impl,
)
source_list_rule = rule(
attrs = {"srcs": attr.source_list()},
implementation = _source_list_rule_impl,
)
| """ Module docstring """
def _write_file_impl(ctx):
f = ctx.actions.declare_file('out.txt')
ctx.actions.write(f, 'contents')
def _source_list_rule_impl(ctx):
if len(ctx.attr.srcs) != 2:
fail('Expected two sources')
first = ctx.attr.srcs[0].short_path.replace('\\', '/')
second = ctx.attr.srcs[1].short_path.replace('\\', '/')
expected_first = 'src.txt'
expected_second = 'file__/out.txt'
if first != expected_first:
fail('Expected short path {}, got {}'.format(expected_first, first))
if second != expected_second:
fail('Expected short path {}, got {}'.format(expected_second, second))
f = ctx.actions.declare_file('out2.txt')
ctx.actions.write(f, 'contents2')
write_file = rule(attrs={}, implementation=_write_file_impl)
source_list_rule = rule(attrs={'srcs': attr.source_list()}, implementation=_source_list_rule_impl) |
class Solution(object):
def countNumbersWithUniqueDigits(self, n):
"""
:type n: int
:rtype: int
"""
cnt = 1
prod = 9
for i in range(min(n, 10)):
cnt += prod
prod *= 9 - i
return cnt
| class Solution(object):
def count_numbers_with_unique_digits(self, n):
"""
:type n: int
:rtype: int
"""
cnt = 1
prod = 9
for i in range(min(n, 10)):
cnt += prod
prod *= 9 - i
return cnt |
#
# @lc app=leetcode id=46 lang=python3
#
# [46] Permutations
#
# @lc code=start
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
results = []
prev_elements = []
def dfs(elements):
if len(elements) == 0:
results.append(prev_elements[:])
for e in elements:
next_elements = elements[:]
next_elements.remove(e)
prev_elements.append(e)
dfs(next_elements)
prev_elements.pop()
# dfs(nums)
# return results
return list(itertools.permutations(nums))
# @lc code=end
| class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
results = []
prev_elements = []
def dfs(elements):
if len(elements) == 0:
results.append(prev_elements[:])
for e in elements:
next_elements = elements[:]
next_elements.remove(e)
prev_elements.append(e)
dfs(next_elements)
prev_elements.pop()
return list(itertools.permutations(nums)) |
def count_up(start, stop):
"""Print all numbers from start up to and including stop.
For example:
count_up(5, 7)
should print:
5
6
7
"""
# YOUR CODE HERE
# print(start)
# start += 1
# print(start)
# parameters can be modified
while start <= stop:
print(start)
start += 1
count_up(5, 7)
count_up(3, 8)
| def count_up(start, stop):
"""Print all numbers from start up to and including stop.
For example:
count_up(5, 7)
should print:
5
6
7
"""
while start <= stop:
print(start)
start += 1
count_up(5, 7)
count_up(3, 8) |
def user(*args):
blank=[]
for num in args:
blank+=1
return arg
user() | def user(*args):
blank = []
for num in args:
blank += 1
return arg
user() |
class basedriver (object):
def __init__(self, ctx, model):
self._ctx = ctx
self._model = model
def check_update(self, current):
if current is None:
return True
if current.version is None:
return True
if current.version != self._model.version:
return True
return False
def update(self, fmgr):
raise NotImplementedError()
def unpack(self, fmgr, locations):
raise NotImplementedError()
def cleanup(self):
pass
| class Basedriver(object):
def __init__(self, ctx, model):
self._ctx = ctx
self._model = model
def check_update(self, current):
if current is None:
return True
if current.version is None:
return True
if current.version != self._model.version:
return True
return False
def update(self, fmgr):
raise not_implemented_error()
def unpack(self, fmgr, locations):
raise not_implemented_error()
def cleanup(self):
pass |
def get(key):
return None
def set(key, value):
pass
| def get(key):
return None
def set(key, value):
pass |
"""
[2017-09-29] Challenge #333 [Hard] Build a Web API-driven Data Site
https://www.reddit.com/r/dailyprogrammer/comments/739j8c/20170929_challenge_333_hard_build_a_web_apidriven/
# Description
A common theme in present-day programming are web APIs. We've had a previous challenge where you had to _consume_ an
API, today's challenge is to _implement_ one. Today's is relatively simple: a single CSV file as input that can
probably be represented by a single database table.
Your solution may use whatever technologies you wish to build on:
* Web server software, e.g. Flask, Rails, Play!, etc
* Database software, e.g. MySQL, MongoDB, etc - or none, using a database is optional
* Database interaction layer, e.g. SQLAlchemy, ActiveRecord, Ecto, etc
This challenge focuses less on the guts of the server and more on routing requests, transforming a request into a data
extraction method, and returning those results.
Today's challenge will utilize the State of Iowa - Monthly Voter Registration Totals by County data set:
https://data.iowa.gov/Communities-People/State-of-Iowa-Monthly-Voter-Registration-Totals-by/cp55-uurs
Download the JSON, CSV or other and use that as your input. It contains 19 columns and over 20,000 rows. Now expose the
data via a web API.
Your solution **must** implement the following API behaviors:
* A "get_voters_where" endpoint that takes the following optional arguments: county, month, party affiliation,
active_status, and limit (the max number of results to return). The endpoint must return a JSON-formatted output, but
the schema is up to you.
* All APIs must be RESTful (see [The REST API in five minutes](https://developer.marklogic.com/try/rest/index) for some
background if you need it).
This challenge extends Wednesday's idea of practicality and real world scenarios. Wednesday was some basic data
science, today is some basic application development. It's open ended.
# Bonus
Ensure your API is immune to attack vectors like SQL injection.
"""
def main():
pass
if __name__ == "__main__":
main()
| """
[2017-09-29] Challenge #333 [Hard] Build a Web API-driven Data Site
https://www.reddit.com/r/dailyprogrammer/comments/739j8c/20170929_challenge_333_hard_build_a_web_apidriven/
# Description
A common theme in present-day programming are web APIs. We've had a previous challenge where you had to _consume_ an
API, today's challenge is to _implement_ one. Today's is relatively simple: a single CSV file as input that can
probably be represented by a single database table.
Your solution may use whatever technologies you wish to build on:
* Web server software, e.g. Flask, Rails, Play!, etc
* Database software, e.g. MySQL, MongoDB, etc - or none, using a database is optional
* Database interaction layer, e.g. SQLAlchemy, ActiveRecord, Ecto, etc
This challenge focuses less on the guts of the server and more on routing requests, transforming a request into a data
extraction method, and returning those results.
Today's challenge will utilize the State of Iowa - Monthly Voter Registration Totals by County data set:
https://data.iowa.gov/Communities-People/State-of-Iowa-Monthly-Voter-Registration-Totals-by/cp55-uurs
Download the JSON, CSV or other and use that as your input. It contains 19 columns and over 20,000 rows. Now expose the
data via a web API.
Your solution **must** implement the following API behaviors:
* A "get_voters_where" endpoint that takes the following optional arguments: county, month, party affiliation,
active_status, and limit (the max number of results to return). The endpoint must return a JSON-formatted output, but
the schema is up to you.
* All APIs must be RESTful (see [The REST API in five minutes](https://developer.marklogic.com/try/rest/index) for some
background if you need it).
This challenge extends Wednesday's idea of practicality and real world scenarios. Wednesday was some basic data
science, today is some basic application development. It's open ended.
# Bonus
Ensure your API is immune to attack vectors like SQL injection.
"""
def main():
pass
if __name__ == '__main__':
main() |
# Problem Statement: https://leetcode.com/problems/longest-increasing-subsequence/
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
arr = nums
if not arr:
return 0
lens = [1 for num in arr]
seqs = [None for num in arr]
for i, num in enumerate(arr):
curr_num = num
for j in range(0, i):
other_num = arr[j]
if other_num < curr_num and lens[j] + 1 >= lens[i]:
lens[i] = lens[j] + 1
seqs[i] = j
return max(lens)
| class Solution:
def length_of_lis(self, nums: List[int]) -> int:
arr = nums
if not arr:
return 0
lens = [1 for num in arr]
seqs = [None for num in arr]
for (i, num) in enumerate(arr):
curr_num = num
for j in range(0, i):
other_num = arr[j]
if other_num < curr_num and lens[j] + 1 >= lens[i]:
lens[i] = lens[j] + 1
seqs[i] = j
return max(lens) |
print('load # extractor diagram V1 essential')
# Essential version for the final summary automation in the main notebook.
#It contains only the winning prefilter and feature extraction from the development process.
class extdia_v1_essential(extractor_diagram):
def ini_diagram(self): # custom
# extractor diagram name
self.name = 'EDiaV1'
# name extention HP
if self.fHP:
self.name += 'HP'
# name extention augment
if self.augment>-1:
self.name += 'aug' + str(self.augment)
# name extention DeviceType ( Time Slicing or not)
if self.DeviceType==1:
self.name += 'TsSl'
# extractor pre objects
self.pre['denoise'] = feature_extractor_pre_nnFilterDenoise(self.base_folder,'den')
self.pre['denoise'].set_hyperparamter(aggregation=np.mean, channel=0)
if self.fHP:
self.pre['HP'] = simple_FIR_HP(self.fHP, 16000)
else:
self.pre['HP'] = simple_FIR_HP(120, 16000)
# extractor objects
self.ext['MEL'] = feature_extractor_mel(self.base_folder,'MELv1')
self.ext['MEL'].set_hyperparamter(n_fft=1024, n_mels=80, hop_length=512, channel=0)
self.ext['PSD'] = feature_extractor_welchPSD(BASE_FOLDER,'PSDv1')
self.ext['PSD'].set_hyperparamter(nperseg=512, nfft=1024, channel=0)
# outport ini
self.outport_akkulist['MEL_raw'] = []
self.outport_akkulist['PSD_raw'] = []
self.outport_akkulist['MEL_den'] = []
pass
def execute_diagram(self,file_path,file_class, probe=False): # custom
#-record target to akku append later
# get file and cut main channel
wmfs = [copy.deepcopy(memory_wave_file().read_wavfile(self.base_folder,file_path))]
wmfs[0].channel = np.array([wmfs[0].channel[self.main_channel]])
#print(wmfs[0].channel.shape )
wmfs_class = [file_class]
# react to augmenting flag
if file_class==self.augment:
#print(file_class,self.augment,file_path)
wmfs.append(create_augmenter(wmfs[0]))
wmfs_class.append(-1)
#print(wmfs[0].channel.shape)
for wmf_i,wmf in enumerate(wmfs):
#print(wmf_i,wmfs_class[wmf_i],file_path)
self.target_akkulist.append(wmfs_class[wmf_i])
#print(wmfs[wmf_i].channel.shape)
# HP toggle on off
if self.fHP:
wmfs[wmf_i].channel[0] = self.pre['HP'].apply(wmf.channel[0])
#print(wmfs[wmf_i].channel.shape)
# Time Slice
if self.DeviceType == 1:
wmfs[wmf_i].channel = TimeSliceAppendActivation(wmfs[wmf_i].channel,wmfs[wmf_i].srate)
#print(wmfs[wmf_i].channel.shape,file_path)
# denoise 2
self.pre['denoise'].create_from_wav(wmfs[wmf_i])
wmf_den2 = copy.deepcopy(self.pre['denoise'].get_wav_memory_file())
#->OUTPORTs
self.ext['PSD'].create_from_wav(wmfs[wmf_i])
self.outport_akkulist['PSD_raw'].append(copy.deepcopy(self.ext['PSD'].get_dict()))
self.ext['MEL'].create_from_wav(wmfs[wmf_i])
self.outport_akkulist['MEL_raw'].append(copy.deepcopy(self.ext['MEL'].get_dict()))
self.ext['MEL'].create_from_wav(wmf_den2)
self.outport_akkulist['MEL_den'].append(copy.deepcopy(self.ext['MEL'].get_dict()))
pass | print('load # extractor diagram V1 essential')
class Extdia_V1_Essential(extractor_diagram):
def ini_diagram(self):
self.name = 'EDiaV1'
if self.fHP:
self.name += 'HP'
if self.augment > -1:
self.name += 'aug' + str(self.augment)
if self.DeviceType == 1:
self.name += 'TsSl'
self.pre['denoise'] = feature_extractor_pre_nn_filter_denoise(self.base_folder, 'den')
self.pre['denoise'].set_hyperparamter(aggregation=np.mean, channel=0)
if self.fHP:
self.pre['HP'] = simple_fir_hp(self.fHP, 16000)
else:
self.pre['HP'] = simple_fir_hp(120, 16000)
self.ext['MEL'] = feature_extractor_mel(self.base_folder, 'MELv1')
self.ext['MEL'].set_hyperparamter(n_fft=1024, n_mels=80, hop_length=512, channel=0)
self.ext['PSD'] = feature_extractor_welch_psd(BASE_FOLDER, 'PSDv1')
self.ext['PSD'].set_hyperparamter(nperseg=512, nfft=1024, channel=0)
self.outport_akkulist['MEL_raw'] = []
self.outport_akkulist['PSD_raw'] = []
self.outport_akkulist['MEL_den'] = []
pass
def execute_diagram(self, file_path, file_class, probe=False):
wmfs = [copy.deepcopy(memory_wave_file().read_wavfile(self.base_folder, file_path))]
wmfs[0].channel = np.array([wmfs[0].channel[self.main_channel]])
wmfs_class = [file_class]
if file_class == self.augment:
wmfs.append(create_augmenter(wmfs[0]))
wmfs_class.append(-1)
for (wmf_i, wmf) in enumerate(wmfs):
self.target_akkulist.append(wmfs_class[wmf_i])
if self.fHP:
wmfs[wmf_i].channel[0] = self.pre['HP'].apply(wmf.channel[0])
if self.DeviceType == 1:
wmfs[wmf_i].channel = time_slice_append_activation(wmfs[wmf_i].channel, wmfs[wmf_i].srate)
self.pre['denoise'].create_from_wav(wmfs[wmf_i])
wmf_den2 = copy.deepcopy(self.pre['denoise'].get_wav_memory_file())
self.ext['PSD'].create_from_wav(wmfs[wmf_i])
self.outport_akkulist['PSD_raw'].append(copy.deepcopy(self.ext['PSD'].get_dict()))
self.ext['MEL'].create_from_wav(wmfs[wmf_i])
self.outport_akkulist['MEL_raw'].append(copy.deepcopy(self.ext['MEL'].get_dict()))
self.ext['MEL'].create_from_wav(wmf_den2)
self.outport_akkulist['MEL_den'].append(copy.deepcopy(self.ext['MEL'].get_dict()))
pass |
class Solution(object):
def kidsWithCandies(self, candies, extraCandies):
"""
:type candies: List[int]
:type extraCandies: int
:rtype: List[bool]
"""
max_candies = max(candies)
# out_l = []
# for i in candies:
# if(i + extraCandies >= max_candies):
# out_l.append(True)
# else:
# out_l.append(False)
# return out_l
# One Liner
return [True if(i + extraCandies >= max_candies) else False for i in candies] | class Solution(object):
def kids_with_candies(self, candies, extraCandies):
"""
:type candies: List[int]
:type extraCandies: int
:rtype: List[bool]
"""
max_candies = max(candies)
return [True if i + extraCandies >= max_candies else False for i in candies] |
n = int(input())
sticks = list(map(int, input().split()))
uniq = sorted(set(sticks))
for i in uniq:
print(len([x for x in sticks if x >= i])) | n = int(input())
sticks = list(map(int, input().split()))
uniq = sorted(set(sticks))
for i in uniq:
print(len([x for x in sticks if x >= i])) |
x = 1
y = 10
if(x == 1):
print("x equals 1")
if(y != 1):
print("y doesn't equal 1")
if(x < y):
print("x is less than y")
elif(x > y):
print("x is greater than y")
else:
print("x equals y")
if (x == 1 and y == 10):
print("Both values true")
if(x < 10):
if (y > 5):
print("x is less than 10, y is greater than 5") | x = 1
y = 10
if x == 1:
print('x equals 1')
if y != 1:
print("y doesn't equal 1")
if x < y:
print('x is less than y')
elif x > y:
print('x is greater than y')
else:
print('x equals y')
if x == 1 and y == 10:
print('Both values true')
if x < 10:
if y > 5:
print('x is less than 10, y is greater than 5') |
class BackgroundClip(
Property,
):
BorderBox = "border-box"
PaddingBox = "padding-box"
ContentBox = "content-box"
| class Backgroundclip(Property):
border_box = 'border-box'
padding_box = 'padding-box'
content_box = 'content-box' |
'''
config file
'''
n_one_hot_slot = 6 # 0 - user_id, 1 - movie_id, 2 - gender, 3 - age, 4 - occ, 5 - release year
n_mul_hot_slot = 2 # 6 - title (mul-hot), 7 - genres (mul-hot)
max_len_per_slot = 5 # max num of fts in one mul-hot slot
num_csv_col_warm = 17
num_csv_col_w_ngb = 17 + 160 # num of cols in the csv file (w ngb)
layer_dim = [256, 128, 1]
# for ngb
n_one_hot_slot_ngb = 6
n_mul_hot_slot_ngb = 2
max_len_per_slot_ngb = 5
max_n_ngb_ori = 10 # num of ngbs in data file
max_n_ngb = 10 # num of ngbs to use in model, <= max_n_ngb_ori
pre = './data/'
suf = '.tfrecord'
# a, b - used for meta learning
train_file_name_a = [pre+'train_oneshot_a_w_ngb'+suf, pre+'train_oneshot_b_w_ngb'+suf] #, pre+'train_oneshot_c_w_ngb'+suf]
train_file_name_b = [pre+'train_oneshot_b_w_ngb'+suf, pre+'train_oneshot_c_w_ngb'+suf] #, pre+'train_oneshot_a_w_ngb'+suf]
# warm, warm_2 - used for warm-up training
train_file_name_warm = [pre+'test_oneshot_a'+suf]
train_file_name_warm_2 = [pre+'test_oneshot_b'+suf]
# you can use 'test_oneshot_a_w_ngb' for validation
test_file_name = [pre+'test_test_w_ngb'+suf]
# the following are indices for features (excluding label)
# 0 - user_id, 1 - movie_id, 2 - gender, 3 - age, 4 - occ, 5 - release year, 6 - title (mul-hot), 7 - genres (mul-hot)
# tar_idx - whose emb to be generated
# attr_idx - which are intrinsic item attributes
tar_idx = [1]
# must be from small to large
attr_idx = [5,6,7]
n_ft = 11134
input_format = 'tfrecord' #'csv'
time_style = '%Y-%m-%d %H:%M:%S'
rnd_seed = 123 # random seed (different seeds lead to different results)
att_dim = 10*len(attr_idx)
batch_size = 128 # used for warm up training
# meta_mode: self - use the new ad's own attributes
# ngb - use ngbs' pre-trained ID embs.
meta_mode = 'GME-A' # 'self', 'ngb', 'GME-P', 'GME-G', 'GME-A'
meta_batch_size_range = [60]
# learning rate for getting a new adapted embedding
cold_eta_range = [1e-4] # [0.05, 0.1]
# learning rate for meta learning
meta_eta_range = [5e-3] # [1e-4, 5e-4, 1e-3, 5e-3, 1e-2]
# learning rate for warm-up training
eta_range = [1e-3]
n_epoch = 1 # number of times to loop over the warm-up training data set
n_epoch_meta = 1 # number of times to loop over the meta training data set
alpha = 0.1
gamma = 1.0
test_batch_size = 128
# whether to perform warm up training
# only valid for 'gme_all_in_one_warm_up.py'
warm_up_bool = False # True
#################
save_model_ind = 0
# load emb and FC layer weights from a pre-trained DNN model
model_loading_addr = './tmp/dnn_1011_1705/'
output_file_name = '0801_0900'
k = 10 # embedding size / number of latent factors
opt_alg = 'Adam' # 'Adagrad'
kp_prob = 1.0
record_step_size = 200 # record the loss and auc after xx steps
| """
config file
"""
n_one_hot_slot = 6
n_mul_hot_slot = 2
max_len_per_slot = 5
num_csv_col_warm = 17
num_csv_col_w_ngb = 17 + 160
layer_dim = [256, 128, 1]
n_one_hot_slot_ngb = 6
n_mul_hot_slot_ngb = 2
max_len_per_slot_ngb = 5
max_n_ngb_ori = 10
max_n_ngb = 10
pre = './data/'
suf = '.tfrecord'
train_file_name_a = [pre + 'train_oneshot_a_w_ngb' + suf, pre + 'train_oneshot_b_w_ngb' + suf]
train_file_name_b = [pre + 'train_oneshot_b_w_ngb' + suf, pre + 'train_oneshot_c_w_ngb' + suf]
train_file_name_warm = [pre + 'test_oneshot_a' + suf]
train_file_name_warm_2 = [pre + 'test_oneshot_b' + suf]
test_file_name = [pre + 'test_test_w_ngb' + suf]
tar_idx = [1]
attr_idx = [5, 6, 7]
n_ft = 11134
input_format = 'tfrecord'
time_style = '%Y-%m-%d %H:%M:%S'
rnd_seed = 123
att_dim = 10 * len(attr_idx)
batch_size = 128
meta_mode = 'GME-A'
meta_batch_size_range = [60]
cold_eta_range = [0.0001]
meta_eta_range = [0.005]
eta_range = [0.001]
n_epoch = 1
n_epoch_meta = 1
alpha = 0.1
gamma = 1.0
test_batch_size = 128
warm_up_bool = False
save_model_ind = 0
model_loading_addr = './tmp/dnn_1011_1705/'
output_file_name = '0801_0900'
k = 10
opt_alg = 'Adam'
kp_prob = 1.0
record_step_size = 200 |
first_num_elements, second_num_elements2 = [int(num) for num in input().split()]
first_set = {input() for _ in range(first_num_elements)}
second_set = {input() for _ in range(second_num_elements2)}
print(*first_set.intersection(second_set), sep='\n')
# 4 3
# 1
# 3
# 5
# 7
# 3
# 4
# 5 | (first_num_elements, second_num_elements2) = [int(num) for num in input().split()]
first_set = {input() for _ in range(first_num_elements)}
second_set = {input() for _ in range(second_num_elements2)}
print(*first_set.intersection(second_set), sep='\n') |
def decode_index(index: int) -> str:
return {0: "ham", 1: "spam"}[index]
def probability_to_index(prediction: list) -> int:
return 0 if prediction[0] > prediction[1] else 1
| def decode_index(index: int) -> str:
return {0: 'ham', 1: 'spam'}[index]
def probability_to_index(prediction: list) -> int:
return 0 if prediction[0] > prediction[1] else 1 |
a = []
impar = []
par = []
while True:
n1 = int(input("Digite um valor: "))
a.append(n1)
if n1 % 2 == 0:
par.append(n1)
elif n1 % 2 != 0:
impar.append(n1)
s = str(input("Deseja continuar? [S/N]"))
if s in 'Nn':
break
print(f"Lista geral {a}")
print(f"Lista dos pares {par}")
print(f"Lista dos impares {impar}")
| a = []
impar = []
par = []
while True:
n1 = int(input('Digite um valor: '))
a.append(n1)
if n1 % 2 == 0:
par.append(n1)
elif n1 % 2 != 0:
impar.append(n1)
s = str(input('Deseja continuar? [S/N]'))
if s in 'Nn':
break
print(f'Lista geral {a}')
print(f'Lista dos pares {par}')
print(f'Lista dos impares {impar}') |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
def travel(node, tiles):
if node is None:
return 0
sum_left = travel(node.left, tiles)
sum_right = travel(node.right, tiles)
diff = abs(sum_left - sum_right)
tiles.append(diff)
sum_all = node.val + sum_left + sum_right
return sum_all
tiles = []
travel(root, tiles)
return sum(tiles)
| class Solution(object):
def find_tilt(self, root):
def travel(node, tiles):
if node is None:
return 0
sum_left = travel(node.left, tiles)
sum_right = travel(node.right, tiles)
diff = abs(sum_left - sum_right)
tiles.append(diff)
sum_all = node.val + sum_left + sum_right
return sum_all
tiles = []
travel(root, tiles)
return sum(tiles) |
class Triangle:
def __init__(self,a,b,c):
self.a=a
self.b=b
self.c=c
def is_valid(self):
if (self.a+self.b>self.c) and (self.a+self.c>self.b) and (self.b+self.c>self.a):
return 'Valid'
else:
return 'Invalid'
def Side_Classification(self):
if self.is_valid()=='Valid':
if (self.a==self.b and self.b==self.c):
return 'Equilateral'
elif (self.a==self.b or self.b==self.c or self.a==self.c):
return 'Isosceles'
else:
return 'Scalene'
else:
return 'Invalid'
def Angle_Classification(self):
if self.is_valid()=='Valid':
l=sorted([self.a,self.b,self.c])
a,b,c=l
if ((a)**2+(b)**2 >(c)**2):
return 'Acute'
elif ((a)**2+(b)**2 ==(c)**2):
return 'Right'
else:
return 'Obtuse'
else:
return 'Invalid'
def Area(self):
if self.is_valid()=='Valid':
a,b,c=[self.a,self.b,self.c]
s=(a+b+c)/2
area=(s*(s-a)*(s-b)*(s-c))**0.5
return area
else:
return 'Invalid'
a=int(input())
b=int(input())
c=int(input())
T=Triangle(a,b,c)
print(T.is_valid())
print(T.Side_Classification())
print(T.Angle_Classification())
print(T.Area())
| class Triangle:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def is_valid(self):
if self.a + self.b > self.c and self.a + self.c > self.b and (self.b + self.c > self.a):
return 'Valid'
else:
return 'Invalid'
def side__classification(self):
if self.is_valid() == 'Valid':
if self.a == self.b and self.b == self.c:
return 'Equilateral'
elif self.a == self.b or self.b == self.c or self.a == self.c:
return 'Isosceles'
else:
return 'Scalene'
else:
return 'Invalid'
def angle__classification(self):
if self.is_valid() == 'Valid':
l = sorted([self.a, self.b, self.c])
(a, b, c) = l
if a ** 2 + b ** 2 > c ** 2:
return 'Acute'
elif a ** 2 + b ** 2 == c ** 2:
return 'Right'
else:
return 'Obtuse'
else:
return 'Invalid'
def area(self):
if self.is_valid() == 'Valid':
(a, b, c) = [self.a, self.b, self.c]
s = (a + b + c) / 2
area = (s * (s - a) * (s - b) * (s - c)) ** 0.5
return area
else:
return 'Invalid'
a = int(input())
b = int(input())
c = int(input())
t = triangle(a, b, c)
print(T.is_valid())
print(T.Side_Classification())
print(T.Angle_Classification())
print(T.Area()) |
def main(app_config=None, q1=0, q2=2):
some_var = {'key': 'value'}
if q1 > 9:
return {
"dict_return": 1,
}
return some_var
if __name__ == "__main__":
main()
| def main(app_config=None, q1=0, q2=2):
some_var = {'key': 'value'}
if q1 > 9:
return {'dict_return': 1}
return some_var
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2018 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Structure(list):
def __init__(self, capacity, signature):
self.capacity = capacity
self.signature = signature
def __repr__(self):
return repr(tuple(iter(self)))
def __eq__(self, other):
return list(self) == list(other)
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
yield self.signature
yield tuple(super(Structure, self).__iter__())
| class Structure(list):
def __init__(self, capacity, signature):
self.capacity = capacity
self.signature = signature
def __repr__(self):
return repr(tuple(iter(self)))
def __eq__(self, other):
return list(self) == list(other)
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
yield self.signature
yield tuple(super(Structure, self).__iter__()) |
#Implemnting queue ADT using singly linked list
class LinkedQueue:
"""FIFO queue implementation using a singly linked list for storage"""
class Empty(Exception):
"""Error attempting to access an element from an empty container"""
pass
class _Node:
"""Lightweight, nonpublic class for storing singly linked node """
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._elment = element
self._next = next
def __init__(self):
#create an empty queue
self._head = None
self._tail = None
self._size = 0
def __len__(self):
#retuns size of the queue
return self._size
def is_empty(self):
#returns true if the list is empty
return self._size == 0
def first(self):
#return but do not remove the top element of the queue
if self.is_empty():
raise Empty('queue is empty')
return self._head._element
def dequeue(self):
#remove and returns the first the element of the queue
if self.is_empty():
raise Empty('Queue is empty')
answer = self._head._element
self._head = self._head._element
self._size -= 1
if self.is_empty():
self._tail = None
return answer
def enqueue(self, e):
#add an element to the back of the queue
newest = self._Node(e, None) #this node will be a new tail node
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1
| class Linkedqueue:
"""FIFO queue implementation using a singly linked list for storage"""
class Empty(Exception):
"""Error attempting to access an element from an empty container"""
pass
class _Node:
"""Lightweight, nonpublic class for storing singly linked node """
__slots__ = ('_element', '_next')
def __init__(self, element, next):
self._elment = element
self._next = next
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def __len__(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
if self.is_empty():
raise empty('queue is empty')
return self._head._element
def dequeue(self):
if self.is_empty():
raise empty('Queue is empty')
answer = self._head._element
self._head = self._head._element
self._size -= 1
if self.is_empty():
self._tail = None
return answer
def enqueue(self, e):
newest = self._Node(e, None)
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1 |
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 10:48:30 2020
@author: Tim
"""
n = 1000
count = 0
for i in range(n):
for j in range(n):
for k in range(n):
if i < j and j < k:
count += 1
print(count) | """
Created on Fri May 29 10:48:30 2020
@author: Tim
"""
n = 1000
count = 0
for i in range(n):
for j in range(n):
for k in range(n):
if i < j and j < k:
count += 1
print(count) |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
def isSymmetric(self, root: TreeNode) -> bool:
"""
function to determine if the provided TreeNode is the root
of a symmetric binary tree
"""
def isMirror(left: TreeNode, right: TreeNode) -> bool:
"""
Utility function to determine if two trees mirror each other. If the root and
all subtrees searched here are mirrors, then the tree as a whole is symmetric
"""
# Two null values are mirrors
if left is None and right is None:
return True
# If only one value is null, these trees do not mirror each other
if left is None or right is None:
return False
# If the values are not equal, the trees do not mirror each other
if left.val != right.val:
return False
# If left.left mirrors right.right, and left.right mirrors right.left,
# the subtrees mirror each other
return isMirror(left.left, right.right) and isMirror(left.right, right.left)
return isMirror(root, root)
| def is_symmetric(self, root: TreeNode) -> bool:
"""
function to determine if the provided TreeNode is the root
of a symmetric binary tree
"""
def is_mirror(left: TreeNode, right: TreeNode) -> bool:
"""
Utility function to determine if two trees mirror each other. If the root and
all subtrees searched here are mirrors, then the tree as a whole is symmetric
"""
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val != right.val:
return False
return is_mirror(left.left, right.right) and is_mirror(left.right, right.left)
return is_mirror(root, root) |
# Python3 program to find the numbers
# of non negative integral solutions
# return number of non negative
# integral solutions
def countSolutions(n, val,indent):
print(indent+"countSolutions(",n,val,")")
# initialize total = 0
total = 0
# Base Case if n = 1 and val >= 0
# then it should return 1
if n == 1 and val >= 0:
return 1
# iterate the loop till equal the val
for i in range(val + 1):
# total solution of of equations
# and again call the recursive
# function Solutions(variable,value)
total += countSolutions(n - 1, val - i,indent+" ")
# return the total no possible solution
return total
# driver code
n = 4
val = 2
print(countSolutions(n, val,"")) | def count_solutions(n, val, indent):
print(indent + 'countSolutions(', n, val, ')')
total = 0
if n == 1 and val >= 0:
return 1
for i in range(val + 1):
total += count_solutions(n - 1, val - i, indent + ' ')
return total
n = 4
val = 2
print(count_solutions(n, val, '')) |
# -*- coding: utf-8 -*-
DATABASE_MAPPING = {
'database_list': {
'resource': 'database/',
'docs': '',
'methods': ['GET'],
},
'database_get': {
'resource': 'database/{id}/',
'docs': '',
'methods': ['GET'],
},
'database_create': {
'resource': 'database/',
'docs': '',
'methods': ['POST'],
},
'database_update': {
'resource': 'database/{id}/',
'docs': '',
'methods': ['PUT'],
},
'database_delete': {
'resource': 'database/{id}/',
'docs': '',
'methods': ['DELETE'],
},
}
| database_mapping = {'database_list': {'resource': 'database/', 'docs': '', 'methods': ['GET']}, 'database_get': {'resource': 'database/{id}/', 'docs': '', 'methods': ['GET']}, 'database_create': {'resource': 'database/', 'docs': '', 'methods': ['POST']}, 'database_update': {'resource': 'database/{id}/', 'docs': '', 'methods': ['PUT']}, 'database_delete': {'resource': 'database/{id}/', 'docs': '', 'methods': ['DELETE']}} |
def is_abundant(number):
mysum = 1 # Can always divide by 1, so start looking at divisor 2
for divisor in range(2, int(round(number / 2 + 1))):
if number % divisor == 0:
mysum += divisor
if mysum > number:
return True
else:
return False
| def is_abundant(number):
mysum = 1
for divisor in range(2, int(round(number / 2 + 1))):
if number % divisor == 0:
mysum += divisor
if mysum > number:
return True
else:
return False |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Muhammad Aditya Hilmy, <mhilmy@hey.com>, 2020
class DIDNotAvailableException(BaseException):
def __init__(self):
super().__init__("DID is not yet available.")
class MultipleItemDID(list): # pragma: no cover
def __init__(self, items, did_available=True):
super(MultipleItemDID, self).__init__(items)
self.items = items
self.did_available = did_available
def __str__(self):
if not self.did_available:
raise DIDNotAvailableException()
return super().__str__()
def __repr__(self):
if not self.did_available:
raise DIDNotAvailableException()
return super().__repr__()
def __getitem__(self, key):
if not self.did_available:
raise DIDNotAvailableException()
return super().__getitem__(key)
def __iter__(self):
if not self.did_available:
raise DIDNotAvailableException()
return super().__iter__()
class SingleItemDID(str): # pragma: no cover
def __init__(self, path):
super(SingleItemDID, self).__init__()
self.path = path
self.did_available = path is not None
def __str__(self):
if not self.did_available:
raise DIDNotAvailableException()
return self.path
def __repr__(self):
if not self.did_available:
raise DIDNotAvailableException()
return self.path
def __getitem__(self, key):
if not self.did_available:
raise DIDNotAvailableException()
return super().__getitem__(key)
def __iter__(self):
if not self.did_available:
raise DIDNotAvailableException()
return super().__iter__()
| class Didnotavailableexception(BaseException):
def __init__(self):
super().__init__('DID is not yet available.')
class Multipleitemdid(list):
def __init__(self, items, did_available=True):
super(MultipleItemDID, self).__init__(items)
self.items = items
self.did_available = did_available
def __str__(self):
if not self.did_available:
raise did_not_available_exception()
return super().__str__()
def __repr__(self):
if not self.did_available:
raise did_not_available_exception()
return super().__repr__()
def __getitem__(self, key):
if not self.did_available:
raise did_not_available_exception()
return super().__getitem__(key)
def __iter__(self):
if not self.did_available:
raise did_not_available_exception()
return super().__iter__()
class Singleitemdid(str):
def __init__(self, path):
super(SingleItemDID, self).__init__()
self.path = path
self.did_available = path is not None
def __str__(self):
if not self.did_available:
raise did_not_available_exception()
return self.path
def __repr__(self):
if not self.did_available:
raise did_not_available_exception()
return self.path
def __getitem__(self, key):
if not self.did_available:
raise did_not_available_exception()
return super().__getitem__(key)
def __iter__(self):
if not self.did_available:
raise did_not_available_exception()
return super().__iter__() |
class OAuthError(Exception):
"""Base class for OAuth errors"""
pass
class OAuthStateMismatchError(OAuthError):
pass
class OAuthCannotDisconnectError(OAuthError):
pass
class OAuthUserAlreadyExistsError(OAuthError):
pass
| class Oautherror(Exception):
"""Base class for OAuth errors"""
pass
class Oauthstatemismatcherror(OAuthError):
pass
class Oauthcannotdisconnecterror(OAuthError):
pass
class Oauthuseralreadyexistserror(OAuthError):
pass |
def ispow2(n):
'''
True if n is a power of 2, False otherwise
>>> ispow2(5)
False
>>> ispow2(4)
True
'''
return (n & (n-1)) == 0
def nextpow2(n):
'''
Given n, return the nearest power of two that is >= n
>>> nextpow2(1)
1
>>> nextpow2(2)
2
>>> nextpow2(5)
8
>>> nextpow2(17)
32
'''
if ispow2(n):
return n
count = 0
while n != 0:
n = n >> 1
count += 1
return 1 << count
class SamplingRateError(ValueError):
'''
Indicates that the conversion of frequency to sampling rate could not be
performed.
'''
def __init__(self, fs, requested_fs):
self.fs = fs
self.requested_fs = requested_fs
def __str__(self):
mesg = 'The requested sampling rate, %f Hz, is greater than ' + \
'the DSP clock frequency of %f Hz.'
return mesg % (self.requested_fs, self.fs)
def convert(src_unit, dest_unit, value, dsp_fs):
'''
Converts value to desired unit give the sampling frequency of the DSP.
Parameters specified in paradigms are typically expressed as
frequency and time while many DSP parameters are expressed in number of
samples (referenced to the DSP sampling frequency). This function provides
a convenience method for converting between conventional values and the
'digital' values used by the DSP.
Note that for converting units of time/frequency to n/nPer, we have to
coerce the value to a multiple of the DSP period (e.g. the number of
'ticks' of the DSP clock).
Appropriate strings for the unit types:
fs
sampling frequency
nPer
number of samples per period
n
number of samples
s
seconds
ms
milliseconds
nPow2
number of samples, coerced to the next greater power of 2 (used for
ensuring efficient FFT computation)
>>> convert('s', 'n', 0.5, 10000)
5000
>>> convert('fs', 'nPer', 500, 10000)
20
>>> convert('s', 'nPow2', 5, 97.5e3)
524288
Parameters
----------
src_unit: string
dest_unit: string
Destination unit
value: numerical (e.g. integer or float)
Value to be converted
Returns
-------
converted unit : numerical value
'''
def fs_to_nPer(req_fs, dsp_fs):
if dsp_fs < req_fs:
raise SamplingRateError(dsp_fs, req_fs)
return int(dsp_fs/req_fs)
def nPer_to_fs(nPer, dsp_fs):
return dsp_fs/nPer
def n_to_s(n, dsp_fs):
return n/dsp_fs
def s_to_n(s, dsp_fs):
return int(s*dsp_fs)
def ms_to_n(ms, dsp_fs):
return int(ms*1e-3*dsp_fs)
def n_to_ms(n, dsp_fs):
return n/dsp_fs*1e3
def s_to_nPow2(s, dsp_fs):
return nextpow2(s_to_n(s, dsp_fs))
fun = '%s_to_%s' % (src_unit, dest_unit)
return locals()[fun](value, dsp_fs)
| def ispow2(n):
"""
True if n is a power of 2, False otherwise
>>> ispow2(5)
False
>>> ispow2(4)
True
"""
return n & n - 1 == 0
def nextpow2(n):
"""
Given n, return the nearest power of two that is >= n
>>> nextpow2(1)
1
>>> nextpow2(2)
2
>>> nextpow2(5)
8
>>> nextpow2(17)
32
"""
if ispow2(n):
return n
count = 0
while n != 0:
n = n >> 1
count += 1
return 1 << count
class Samplingrateerror(ValueError):
"""
Indicates that the conversion of frequency to sampling rate could not be
performed.
"""
def __init__(self, fs, requested_fs):
self.fs = fs
self.requested_fs = requested_fs
def __str__(self):
mesg = 'The requested sampling rate, %f Hz, is greater than ' + 'the DSP clock frequency of %f Hz.'
return mesg % (self.requested_fs, self.fs)
def convert(src_unit, dest_unit, value, dsp_fs):
"""
Converts value to desired unit give the sampling frequency of the DSP.
Parameters specified in paradigms are typically expressed as
frequency and time while many DSP parameters are expressed in number of
samples (referenced to the DSP sampling frequency). This function provides
a convenience method for converting between conventional values and the
'digital' values used by the DSP.
Note that for converting units of time/frequency to n/nPer, we have to
coerce the value to a multiple of the DSP period (e.g. the number of
'ticks' of the DSP clock).
Appropriate strings for the unit types:
fs
sampling frequency
nPer
number of samples per period
n
number of samples
s
seconds
ms
milliseconds
nPow2
number of samples, coerced to the next greater power of 2 (used for
ensuring efficient FFT computation)
>>> convert('s', 'n', 0.5, 10000)
5000
>>> convert('fs', 'nPer', 500, 10000)
20
>>> convert('s', 'nPow2', 5, 97.5e3)
524288
Parameters
----------
src_unit: string
dest_unit: string
Destination unit
value: numerical (e.g. integer or float)
Value to be converted
Returns
-------
converted unit : numerical value
"""
def fs_to_n_per(req_fs, dsp_fs):
if dsp_fs < req_fs:
raise sampling_rate_error(dsp_fs, req_fs)
return int(dsp_fs / req_fs)
def n_per_to_fs(nPer, dsp_fs):
return dsp_fs / nPer
def n_to_s(n, dsp_fs):
return n / dsp_fs
def s_to_n(s, dsp_fs):
return int(s * dsp_fs)
def ms_to_n(ms, dsp_fs):
return int(ms * 0.001 * dsp_fs)
def n_to_ms(n, dsp_fs):
return n / dsp_fs * 1000.0
def s_to_n_pow2(s, dsp_fs):
return nextpow2(s_to_n(s, dsp_fs))
fun = '%s_to_%s' % (src_unit, dest_unit)
return locals()[fun](value, dsp_fs) |
# Complete the fibonacciModified function below.
def fibonacciModified(t1, t2, n):
term = 3
while term <= n:
actual_number = t1 + t2**2
t1 = t2
t2 = actual_number
term += 1
return actual_number
| def fibonacci_modified(t1, t2, n):
term = 3
while term <= n:
actual_number = t1 + t2 ** 2
t1 = t2
t2 = actual_number
term += 1
return actual_number |
a=1;
b=2;
c=a+b;
print("hello world")
121213 | a = 1
b = 2
c = a + b
print('hello world')
121213 |
name = input()
age = int(input())
while name != 'Anton':
print(name)
name = input()
age = input()
print(f'I am Anton') | name = input()
age = int(input())
while name != 'Anton':
print(name)
name = input()
age = input()
print(f'I am Anton') |
class Task(object):
def __init__(self, name, description="", task_id=None):
self.id = task_id
self.name = name
self.description = description
def serialize(self):
return {
"id": self.id,
"name": self.name,
"description": self.description
}
@staticmethod
def serialize_multiple(tasks):
return [task.serialize() for task in tasks]
| class Task(object):
def __init__(self, name, description='', task_id=None):
self.id = task_id
self.name = name
self.description = description
def serialize(self):
return {'id': self.id, 'name': self.name, 'description': self.description}
@staticmethod
def serialize_multiple(tasks):
return [task.serialize() for task in tasks] |
# -*- coding: utf-8 -*-
_available_examples = ["ex_001_Molecule_Hamiltonian.py",
"ex_002_Molecule_Aggregate.py",
"ex_003_CorrFcnSpectDens.py",
"ex_004_SpectDensDatabase.py",
"ex_005_UnitsManagementHamiltonian.py",
"ex_006_Absorption_1.py",
"ex_010_RedfieldTheory_1.py",
"ex_011_LindbladForm_1.py",
"ex_012_Integrodiff.py",
"ex_013_HEOM.py",
"ex_014_HEOM_rates.py",
"ex_015_RedfieldTheory_2.py",
"ex_016_FoersterTheory_1.py",
"ex_020_EvolutionSuperOperator_1.py",
"ex_050_PDB_FMO1.py",
"ex_300_ParallelIterators.py",
"ex_800_DiagProblem.py",
"ex_853_RC.py",
"ex_854_2DSpectrum_DimerDisorder.py"]
_available_data = ["data_050_3eni.pdb",
"data_050_3eoj.pdb",
"ex_853_RC.yaml",
"ex_854_2DSpectrum_DimerDisorder.yaml"]
| _available_examples = ['ex_001_Molecule_Hamiltonian.py', 'ex_002_Molecule_Aggregate.py', 'ex_003_CorrFcnSpectDens.py', 'ex_004_SpectDensDatabase.py', 'ex_005_UnitsManagementHamiltonian.py', 'ex_006_Absorption_1.py', 'ex_010_RedfieldTheory_1.py', 'ex_011_LindbladForm_1.py', 'ex_012_Integrodiff.py', 'ex_013_HEOM.py', 'ex_014_HEOM_rates.py', 'ex_015_RedfieldTheory_2.py', 'ex_016_FoersterTheory_1.py', 'ex_020_EvolutionSuperOperator_1.py', 'ex_050_PDB_FMO1.py', 'ex_300_ParallelIterators.py', 'ex_800_DiagProblem.py', 'ex_853_RC.py', 'ex_854_2DSpectrum_DimerDisorder.py']
_available_data = ['data_050_3eni.pdb', 'data_050_3eoj.pdb', 'ex_853_RC.yaml', 'ex_854_2DSpectrum_DimerDisorder.yaml'] |
def recursive_multiply(x, y):
if (x < y):
return recursive_multiply(y, x)
elif (y != 0):
return (x + recursive_multiply(x, y-1))
else:
return 0
x = int(input("Enter x"))
y = int(input("Enter y"))
print(recursive_multiply(x, y)) | def recursive_multiply(x, y):
if x < y:
return recursive_multiply(y, x)
elif y != 0:
return x + recursive_multiply(x, y - 1)
else:
return 0
x = int(input('Enter x'))
y = int(input('Enter y'))
print(recursive_multiply(x, y)) |
DEFAULT_STACK_SIZE = 8
class PcStack(object):
def __init__(self, programCounter, size: int=DEFAULT_STACK_SIZE):
self._programCounter = programCounter
self._size = size
self._stack = [0]*size
self._stackPointer = 0
@property
def programCounter(self):
return self._programCounter
@property
def size(self) -> int:
return self._size
@property
def stack(self):
return self._stack
@property
def stackPointer(self) -> int:
return self._stackPointer
@stackPointer.setter
def stackPointer(self, value: int):
self._stackPointer = value
@property
def current(self):
return self.stack[self.stackPointer]
@current.setter
def current(self, value: int):
self.stack[self.stackPointer] = value
def incStackPointer(self):
self._stackPointer = (self.stackPointer + 1) % self.size
def decStackPointer(self):
if self.stackPointer == 0:
self.stackPointer = self.size - 1
else:
self.stackPointer = self.stackPointer - 1
def push(self, address):
self.current = self.programCounter.address + 1
self.incStackPointer()
self.programCounter.address = address
def pop(self):
self.decStackPointer()
self.programCounter.address = self.current
| default_stack_size = 8
class Pcstack(object):
def __init__(self, programCounter, size: int=DEFAULT_STACK_SIZE):
self._programCounter = programCounter
self._size = size
self._stack = [0] * size
self._stackPointer = 0
@property
def program_counter(self):
return self._programCounter
@property
def size(self) -> int:
return self._size
@property
def stack(self):
return self._stack
@property
def stack_pointer(self) -> int:
return self._stackPointer
@stackPointer.setter
def stack_pointer(self, value: int):
self._stackPointer = value
@property
def current(self):
return self.stack[self.stackPointer]
@current.setter
def current(self, value: int):
self.stack[self.stackPointer] = value
def inc_stack_pointer(self):
self._stackPointer = (self.stackPointer + 1) % self.size
def dec_stack_pointer(self):
if self.stackPointer == 0:
self.stackPointer = self.size - 1
else:
self.stackPointer = self.stackPointer - 1
def push(self, address):
self.current = self.programCounter.address + 1
self.incStackPointer()
self.programCounter.address = address
def pop(self):
self.decStackPointer()
self.programCounter.address = self.current |
# -*- coding: utf-8 -*-
user_schema = {
'username': {
'type': 'string',
'minlength': 1,
'maxlength': 64,
'required': True,
'unique': True
},
'email': {
'type': 'string',
'regex': '^\S+@\S+.\S+',
'required': True,
'unique': True
},
'password': {
'type': 'string',
'minlength': 1,
'maxlength': 64,
'required': True
},
}
user = {
'item_title': 'user',
'additional_lookup': {
'url': 'regex("[\w]+")',
'field': 'username'
},
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'resource_methods': ['GET', 'POST'],
'schema': user_schema
}
todolist_schema = {
'title': {
'type': 'string',
'minlength': 1,
'maxlength': 128,
'required': True
},
'creator': {
'type': 'string'
},
'todos': {},
}
todolist = {
'item_title': 'todolist',
'additional_lookup': {
'url': 'regex("[\w]+")',
# 'field': '_id'
'field': 'title'
},
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'resource_methods': ['GET', 'POST', 'DELETE'],
'schema': todolist_schema
}
todo_schema = {
'description': {
'type': 'string',
'minlength': 1,
'maxlength': 128,
'required': True
},
'creator': {
'type': 'string'
},
'todolist': {},
}
todo = {
'item_title': 'todo',
'additional_lookup': {
'url': 'regex("[\w]+")',
# 'field': '_id'
'field': 'description'
},
'cache_control': 'max-age=10,must-revalidate',
'cache_expires': 10,
'resource_methods': ['GET', 'POST', 'DELETE'],
'schema': todo_schema
}
DOMAIN = {
'users': user,
'todolists': todolist,
'todos': todo
}
# mongo db settings
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_USERNAME = ''
MONGO_PASSWORD = ''
MONGO_DBNAME = 'apitest'
| user_schema = {'username': {'type': 'string', 'minlength': 1, 'maxlength': 64, 'required': True, 'unique': True}, 'email': {'type': 'string', 'regex': '^\\S+@\\S+.\\S+', 'required': True, 'unique': True}, 'password': {'type': 'string', 'minlength': 1, 'maxlength': 64, 'required': True}}
user = {'item_title': 'user', 'additional_lookup': {'url': 'regex("[\\w]+")', 'field': 'username'}, 'cache_control': 'max-age=10,must-revalidate', 'cache_expires': 10, 'resource_methods': ['GET', 'POST'], 'schema': user_schema}
todolist_schema = {'title': {'type': 'string', 'minlength': 1, 'maxlength': 128, 'required': True}, 'creator': {'type': 'string'}, 'todos': {}}
todolist = {'item_title': 'todolist', 'additional_lookup': {'url': 'regex("[\\w]+")', 'field': 'title'}, 'cache_control': 'max-age=10,must-revalidate', 'cache_expires': 10, 'resource_methods': ['GET', 'POST', 'DELETE'], 'schema': todolist_schema}
todo_schema = {'description': {'type': 'string', 'minlength': 1, 'maxlength': 128, 'required': True}, 'creator': {'type': 'string'}, 'todolist': {}}
todo = {'item_title': 'todo', 'additional_lookup': {'url': 'regex("[\\w]+")', 'field': 'description'}, 'cache_control': 'max-age=10,must-revalidate', 'cache_expires': 10, 'resource_methods': ['GET', 'POST', 'DELETE'], 'schema': todo_schema}
domain = {'users': user, 'todolists': todolist, 'todos': todo}
mongo_host = 'localhost'
mongo_port = 27017
mongo_username = ''
mongo_password = ''
mongo_dbname = 'apitest' |
'''
https://www.geeksforgeeks.org/find-count-number-given-string-present-2d-character-array/
Given a 2-Dimensional character array and a string, we need to find the given string in 2-dimensional character array such that individual characters can be present left to right, right to left, top to down or down to top.
Examples:
In case you wish to attend live classes with experts, please refer DSA Live Classes for Working Professionals and Competitive Programming Live for Students.
Input : a ={
{D,D,D,G,D,D},
{B,B,D,E,B,S},
{B,S,K,E,B,K},
{D,D,D,D,D,E},
{D,D,D,D,D,E},
{D,D,D,D,D,G}
}
str= "GEEKS"
Output :2
Input : a = {
{B,B,M,B,B,B},
{C,B,A,B,B,B},
{I,B,G,B,B,B},
{G,B,I,B,B,B},
{A,B,C,B,B,B},
{M,C,I,G,A,M}
}
str= "MAGIC"
Output :3
We have discussed simpler problem to find if a word exists or not in a matrix.
To count all occurrences, we follow simple brute force approach. Traverse through each character of the matrix and taking each character as start of the string to be found, try to search in all the possible directions. Whenever, a word is found, increase the count, and after traversing the matrix what ever will be the value of count will be number of times string exists in character matrix.
Algorithm :
1- Traverse matrix character by character and take one character as string start
2- For each character find the string in all the four directions recursively
3- If a string found, we increase the count
4- When we are done with one character as start, we repeat the same process for the next character
5- Calculate the sum of count for each character
6- Final count will be the answer'''
# Python code for finding count
# of string in a given 2D
# character array.
# utility function to search
# complete string from any
# given index of 2d array
def internalSearch(ii, needle, row, col, hay,
row_max, col_max):
found = 0
if (row >= 0 and row <= row_max and
col >= 0 and col <= col_max and
needle[ii] == hay[row][col]):
match = needle[ii]
ii += 1
hay[row][col] = 0
if (ii == len(needle)):
found = 1
else:
# through Backtrack searching
# in every directions
found += internalSearch(ii, needle, row,
col + 1, hay, row_max, col_max)
found += internalSearch(ii, needle, row,
col - 1, hay, row_max, col_max)
found += internalSearch(ii, needle, row + 1,
col, hay, row_max, col_max)
found += internalSearch(ii, needle, row - 1,
col, hay, row_max, col_max)
hay[row][col] = match
return found
# Function to search the string in 2d array
def searchString(needle, row, col, strr,
row_count, col_count):
found = 0
for r in range(row_count):
for c in range(col_count):
found += internalSearch(0, needle, r, c,
strr, row_count - 1, col_count - 1)
return found
# Driver code
needle = "MAGIC"
inputt = ["BBABBM", "CBMBBA", "IBABBG",
"GOZBBI", "ABBBBC", "MCIGAM"]
strr = [0] * len(inputt)
for i in range(len(inputt)):
strr[i] = list(inputt[i])
print("count: ", searchString(needle, 0, 0, strr,
len(strr), len(strr[0])))
| """
https://www.geeksforgeeks.org/find-count-number-given-string-present-2d-character-array/
Given a 2-Dimensional character array and a string, we need to find the given string in 2-dimensional character array such that individual characters can be present left to right, right to left, top to down or down to top.
Examples:
In case you wish to attend live classes with experts, please refer DSA Live Classes for Working Professionals and Competitive Programming Live for Students.
Input : a ={
{D,D,D,G,D,D},
{B,B,D,E,B,S},
{B,S,K,E,B,K},
{D,D,D,D,D,E},
{D,D,D,D,D,E},
{D,D,D,D,D,G}
}
str= "GEEKS"
Output :2
Input : a = {
{B,B,M,B,B,B},
{C,B,A,B,B,B},
{I,B,G,B,B,B},
{G,B,I,B,B,B},
{A,B,C,B,B,B},
{M,C,I,G,A,M}
}
str= "MAGIC"
Output :3
We have discussed simpler problem to find if a word exists or not in a matrix.
To count all occurrences, we follow simple brute force approach. Traverse through each character of the matrix and taking each character as start of the string to be found, try to search in all the possible directions. Whenever, a word is found, increase the count, and after traversing the matrix what ever will be the value of count will be number of times string exists in character matrix.
Algorithm :
1- Traverse matrix character by character and take one character as string start
2- For each character find the string in all the four directions recursively
3- If a string found, we increase the count
4- When we are done with one character as start, we repeat the same process for the next character
5- Calculate the sum of count for each character
6- Final count will be the answer"""
def internal_search(ii, needle, row, col, hay, row_max, col_max):
found = 0
if row >= 0 and row <= row_max and (col >= 0) and (col <= col_max) and (needle[ii] == hay[row][col]):
match = needle[ii]
ii += 1
hay[row][col] = 0
if ii == len(needle):
found = 1
else:
found += internal_search(ii, needle, row, col + 1, hay, row_max, col_max)
found += internal_search(ii, needle, row, col - 1, hay, row_max, col_max)
found += internal_search(ii, needle, row + 1, col, hay, row_max, col_max)
found += internal_search(ii, needle, row - 1, col, hay, row_max, col_max)
hay[row][col] = match
return found
def search_string(needle, row, col, strr, row_count, col_count):
found = 0
for r in range(row_count):
for c in range(col_count):
found += internal_search(0, needle, r, c, strr, row_count - 1, col_count - 1)
return found
needle = 'MAGIC'
inputt = ['BBABBM', 'CBMBBA', 'IBABBG', 'GOZBBI', 'ABBBBC', 'MCIGAM']
strr = [0] * len(inputt)
for i in range(len(inputt)):
strr[i] = list(inputt[i])
print('count: ', search_string(needle, 0, 0, strr, len(strr), len(strr[0]))) |
#
# PHASE: jvm flags
#
# DOCUMENT THIS
#
def phase_jvm_flags(ctx, p):
if ctx.attr.tests_from:
archives = _get_test_archive_jars(ctx, ctx.attr.tests_from)
else:
archives = p.compile.merged_provider.runtime_output_jars
serialized_archives = _serialize_archives_short_path(archives)
test_suite = _gen_test_suite_flags_based_on_prefixes_and_suffixes(
ctx,
serialized_archives,
)
return [
"-ea",
test_suite.archiveFlag,
test_suite.prefixesFlag,
test_suite.suffixesFlag,
test_suite.printFlag,
test_suite.testSuiteFlag,
]
def _gen_test_suite_flags_based_on_prefixes_and_suffixes(ctx, archives):
return struct(
archiveFlag = "-Dbazel.discover.classes.archives.file.paths=%s" %
archives,
prefixesFlag = "-Dbazel.discover.classes.prefixes=%s" % ",".join(
ctx.attr.prefixes,
),
printFlag = "-Dbazel.discover.classes.print.discovered=%s" %
ctx.attr.print_discovered_classes,
suffixesFlag = "-Dbazel.discover.classes.suffixes=%s" % ",".join(
ctx.attr.suffixes,
),
testSuiteFlag = "-Dbazel.test_suite=%s" % ctx.attr.suite_class,
)
def _serialize_archives_short_path(archives):
archives_short_path = ""
for archive in archives:
archives_short_path += archive.short_path + ","
return archives_short_path[:-1] #remove redundant comma
def _get_test_archive_jars(ctx, test_archives):
flattened_list = []
for archive in test_archives:
class_jars = [java_output.class_jar for java_output in archive[JavaInfo].outputs.jars]
flattened_list.extend(class_jars)
return flattened_list
| def phase_jvm_flags(ctx, p):
if ctx.attr.tests_from:
archives = _get_test_archive_jars(ctx, ctx.attr.tests_from)
else:
archives = p.compile.merged_provider.runtime_output_jars
serialized_archives = _serialize_archives_short_path(archives)
test_suite = _gen_test_suite_flags_based_on_prefixes_and_suffixes(ctx, serialized_archives)
return ['-ea', test_suite.archiveFlag, test_suite.prefixesFlag, test_suite.suffixesFlag, test_suite.printFlag, test_suite.testSuiteFlag]
def _gen_test_suite_flags_based_on_prefixes_and_suffixes(ctx, archives):
return struct(archiveFlag='-Dbazel.discover.classes.archives.file.paths=%s' % archives, prefixesFlag='-Dbazel.discover.classes.prefixes=%s' % ','.join(ctx.attr.prefixes), printFlag='-Dbazel.discover.classes.print.discovered=%s' % ctx.attr.print_discovered_classes, suffixesFlag='-Dbazel.discover.classes.suffixes=%s' % ','.join(ctx.attr.suffixes), testSuiteFlag='-Dbazel.test_suite=%s' % ctx.attr.suite_class)
def _serialize_archives_short_path(archives):
archives_short_path = ''
for archive in archives:
archives_short_path += archive.short_path + ','
return archives_short_path[:-1]
def _get_test_archive_jars(ctx, test_archives):
flattened_list = []
for archive in test_archives:
class_jars = [java_output.class_jar for java_output in archive[JavaInfo].outputs.jars]
flattened_list.extend(class_jars)
return flattened_list |
METADATA = 'metadata'
CONTENT = 'content'
FILENAME = 'filename'
PARAM_CREATION_DATE = '_audit_creation_date'
PARAM_R1 = '_diffrn_reflns_av_R_equivalents'
PARAM_SIGMI_NETI = '_diffrn_reflns_av_sigmaI/netI'
PARAM_COMPLETENESS = '_reflns_odcompleteness_completeness'
PARAM_SPACEGROUP = '_space_group_name_H-M_alt'
PARAM_SPACEGROUP_NUM = '_space_group_IT_number'
PARAM_CONST_CELLA = '_cell_length_a'
PARAM_CONST_CELLB = '_cell_length_b'
PARAM_CONST_CELLC = '_cell_length_c'
PARAM_CONST_AL = '_cell_angle_alpha'
PARAM_CONST_BE = '_cell_angle_beta'
PARAM_CONST_GA = '_cell_angle_gamma'
PARAM_CONST_VOL = '_cell_volume'
PARAM_REFLECTIONS = '_cell_measurement_reflns_used'
PARAM_WAVELENGTH = '_diffrn_radiation_wavelength'
PARAM_CELLA = '_cell_oxdiff_length_a'
PARAM_CELLB = '_cell_oxdiff_length_b'
PARAM_CELLC = '_cell_oxdiff_length_c'
PARAM_AL = '_cell_oxdiff_angle_alpha'
PARAM_BE = '_cell_oxdiff_angle_beta'
PARAM_GA = '_cell_oxdiff_angle_gamma'
PARAM_VOL = '_cell_oxdiff_volume'
PARAM_UB11 = '_diffrn_orient_matrix_UB_11'
PARAM_UB12 = '_diffrn_orient_matrix_UB_12'
PARAM_UB13 = '_diffrn_orient_matrix_UB_13'
PARAM_UB21 = '_diffrn_orient_matrix_UB_21'
PARAM_UB22 = '_diffrn_orient_matrix_UB_22'
PARAM_UB23 = '_diffrn_orient_matrix_UB_23'
PARAM_UB31 = '_diffrn_orient_matrix_UB_31'
PARAM_UB32 = '_diffrn_orient_matrix_UB_32'
PARAM_UB33 = '_diffrn_orient_matrix_UB_33'
PARAM_2THETA_MIN = '_cell_measurement_theta_min'
PARAM_2THETA_MAX = '_cell_measurement_theta_max'
| metadata = 'metadata'
content = 'content'
filename = 'filename'
param_creation_date = '_audit_creation_date'
param_r1 = '_diffrn_reflns_av_R_equivalents'
param_sigmi_neti = '_diffrn_reflns_av_sigmaI/netI'
param_completeness = '_reflns_odcompleteness_completeness'
param_spacegroup = '_space_group_name_H-M_alt'
param_spacegroup_num = '_space_group_IT_number'
param_const_cella = '_cell_length_a'
param_const_cellb = '_cell_length_b'
param_const_cellc = '_cell_length_c'
param_const_al = '_cell_angle_alpha'
param_const_be = '_cell_angle_beta'
param_const_ga = '_cell_angle_gamma'
param_const_vol = '_cell_volume'
param_reflections = '_cell_measurement_reflns_used'
param_wavelength = '_diffrn_radiation_wavelength'
param_cella = '_cell_oxdiff_length_a'
param_cellb = '_cell_oxdiff_length_b'
param_cellc = '_cell_oxdiff_length_c'
param_al = '_cell_oxdiff_angle_alpha'
param_be = '_cell_oxdiff_angle_beta'
param_ga = '_cell_oxdiff_angle_gamma'
param_vol = '_cell_oxdiff_volume'
param_ub11 = '_diffrn_orient_matrix_UB_11'
param_ub12 = '_diffrn_orient_matrix_UB_12'
param_ub13 = '_diffrn_orient_matrix_UB_13'
param_ub21 = '_diffrn_orient_matrix_UB_21'
param_ub22 = '_diffrn_orient_matrix_UB_22'
param_ub23 = '_diffrn_orient_matrix_UB_23'
param_ub31 = '_diffrn_orient_matrix_UB_31'
param_ub32 = '_diffrn_orient_matrix_UB_32'
param_ub33 = '_diffrn_orient_matrix_UB_33'
param_2_theta_min = '_cell_measurement_theta_min'
param_2_theta_max = '_cell_measurement_theta_max' |
class cves():
cve_url = "https://services.nvd.nist.gov/rest/json/cves/1.0?pubStartDate=2021-09-01T00:00:00:000+UTC-00:00&resultsPerPage=100&keyword="
keywords = ["RHCS", "RHEL", "Thales", "nShield", "Certificate+Authority&isExactMatch=true", "NSS", "tomcat", "TLS"]
| class Cves:
cve_url = 'https://services.nvd.nist.gov/rest/json/cves/1.0?pubStartDate=2021-09-01T00:00:00:000+UTC-00:00&resultsPerPage=100&keyword='
keywords = ['RHCS', 'RHEL', 'Thales', 'nShield', 'Certificate+Authority&isExactMatch=true', 'NSS', 'tomcat', 'TLS'] |
class SingleMethods:
def __init__(self,
finished_reports_dictionary,
single_reports_dictionary,
sample_data,
latex_header_and_sample_list_dictionary,
loq_dictionary
):
self.finished_reports_dictionary = finished_reports_dictionary
self.single_reports_dictionary = single_reports_dictionary
self.sample_data = sample_data
self.latex_header_and_sample_list_dictionary = latex_header_and_sample_list_dictionary
self.loq_dictionary = loq_dictionary
def generate_single_sample_reports(self):
for key, value in self.single_reports_dictionary.items():
if value[0] == 'Percent' and value[1] == 'Basic':
self.generate_single_percent_basic_report(key)
elif value[0] == 'Percent' and value[1] == 'Deluxe':
self.generate_single_percent_deluxe_report(key)
elif value[0] == 'mg/g' and value[1] == 'Basic':
self.generate_single_mg_g_basic_report(key)
elif value[0] == 'mg/g' and value[1] == 'Deluxe':
self.generate_single_mg_g_deluxe_report(key)
elif value[0] == 'mg/mL' and value[1] == 'Basic':
self.generate_single_mg_ml_basic_report(key)
elif value[0] == 'mg/mL' and value[1] == 'Deluxe':
self.generate_single_mg_ml_deluxe_report(key)
elif value[0] == 'per unit' and value[1] == 'Basic':
self.generate_single_unit_basic_report(key)
elif value[0] == 'per unit' and value[1] == 'Deluxe':
self.generate_single_unit_deluxe_report(key)
else:
self.generate_single_percent_deluxe_report(key)
return self.finished_reports_dictionary
def generate_single_percent_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'Percent',
'Basic')
temporary_table = self.create_single_basic_table(temporary_data, 'Percent')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_g_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'mg_g',
'Basic')
temporary_table = self.create_single_basic_table(temporary_data, 'mg_g')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_percent_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'Percent',
'Deluxe')
temporary_table = self.create_single_deluxe_table(temporary_data, 'Percent')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_g_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'mg_g',
'Deluxe')
temporary_table = self.create_single_deluxe_table(temporary_data, 'mg_g')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_ml_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Basic',
'density')
temporary_table = self.create_single_basic_table_unit(temporary_data, 'density')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_ml_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Deluxe',
'density')
temporary_table = self.create_single_deluxe_table_unit(temporary_data, 'density')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_unit_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Basic',
'unit')
temporary_table = self.create_single_basic_table_unit(temporary_data, 'unit')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_unit_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Deluxe',
'unit')
temporary_table = self.create_single_deluxe_table_unit(temporary_data, 'unit')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def get_standard_recovery_values(self, report_type):
temporary_data_frame = self.sample_data.best_recovery_qc_data_frame
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0,
['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
['percrecovery']].iloc[0]['percrecovery']
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
['percrecovery']].iloc[0]['percrecovery']
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
['percrecovery']].iloc[0]['percrecovery']
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
# cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
# ['percrecovery']].iloc[0]['percrecovery']
# cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
['percrecovery']].iloc[0]['percrecovery']
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
['percrecovery']].iloc[0]['percrecovery']
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
['percrecovery']].iloc[0]['percrecovery']
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
['percrecovery']].iloc[0]['percrecovery']
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
['percrecovery']].iloc[0]['percrecovery']
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
['percrecovery']].iloc[0]['percrecovery']
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
['percrecovery']].iloc[0]['percrecovery']
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
['percrecovery']].iloc[0]['percrecovery']
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
['percrecovery']].iloc[0]['percrecovery']
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
['percrecovery']].iloc[0]['percrecovery']
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
['percrecovery']].iloc[0]['percrecovery']
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
['percrecovery']].iloc[0]['percrecovery']
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
['percrecovery']].iloc[0]['percrecovery']
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
['percrecovery']].iloc[0]['percrecovery']
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
if report_type == 'Deluxe':
return [ibu_recovery_value, cbdv_value, cbdva_value, thcv_value, "N/A", cbd_value, cbg_value,
cbda_value, cbn_value, cbga_value, thcva_value, d9_thc_value, d8_thc_value, cbl_value, cbc_value,
cbna_value, thca_value, cbla_value, cbca_value]
else:
return [ibu_recovery_value, cbd_value, cbda_value, cbn_value, cbna_value, d9_thc_value, thca_value,
d8_thc_value]
def get_relevant_values_and_recoveries_for_single_reports(self, temporary_data_frame, sample_type, report_type):
if sample_type == 'Percent':
sample_column_type = 'percentage_concentration'
elif sample_type == 'mg_g':
sample_column_type = 'mg_g'
elif sample_type == 'mg_ml':
sample_column_type = 'mg_ml'
else:
sample_column_type = 'percentage_concentration'
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0,
['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
[sample_column_type]].iloc[0][sample_column_type]
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
[sample_column_type]].iloc[0][sample_column_type]
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
[sample_column_type]].iloc[0][sample_column_type]
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
[sample_column_type]].iloc[0][sample_column_type]
cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
[sample_column_type]].iloc[0][sample_column_type]
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
[sample_column_type]].iloc[0][sample_column_type]
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
[sample_column_type]].iloc[0][sample_column_type]
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
[sample_column_type]].iloc[0][sample_column_type]
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
[sample_column_type]].iloc[0][sample_column_type]
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
[sample_column_type]].iloc[0][sample_column_type]
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
[sample_column_type]].iloc[0][sample_column_type]
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
[sample_column_type]].iloc[0][sample_column_type]
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
[sample_column_type]].iloc[0][sample_column_type]
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
[sample_column_type]].iloc[0][sample_column_type]
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
[sample_column_type]].iloc[0][sample_column_type]
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
[sample_column_type]].iloc[0][sample_column_type]
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
[sample_column_type]].iloc[0][sample_column_type]
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
[sample_column_type]].iloc[0][sample_column_type]
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
if report_type == 'Deluxe':
return [ibu_recovery_value, cbdv_value, cbdva_value, thcv_value, cbgva_value, cbd_value, cbg_value,
cbda_value, cbn_value, cbga_value, thcva_value, d9_thc_value, d8_thc_value, cbl_value, cbc_value,
cbna_value, thca_value, cbla_value, cbca_value]
else:
return [ibu_recovery_value, cbd_value, cbda_value, cbn_value, cbna_value, d9_thc_value, thca_value,
d8_thc_value]
def get_relevant_values_and_recoveries_for_single_reports_unit(self, temporary_data_frame, report_type, unit_type):
if unit_type == 'unit':
column_1 = 'mg_g'
column_2 = 'mg_unit'
elif unit_type == 'density':
column_1 = 'mg_ml'
column_2 = 'percentage_concentration'
else:
column_1 = 'percentage_concentration'
column_2 = 'percentage_concentration'
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0,
['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
[column_1]].iloc[0][column_1]
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
[column_1]].iloc[0][column_1]
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
[column_1]].iloc[0][column_1]
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
[column_1]].iloc[0][column_1]
cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
[column_1]].iloc[0][column_1]
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
[column_1]].iloc[0][column_1]
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
[column_1]].iloc[0][column_1]
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
[column_1]].iloc[0][column_1]
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
[column_1]].iloc[0][column_1]
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
[column_1]].iloc[0][column_1]
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
[column_1]].iloc[0][column_1]
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
[column_1]].iloc[0][column_1]
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
[column_1]].iloc[0][column_1]
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
[column_1]].iloc[0][column_1]
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
[column_1]].iloc[0][column_1]
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
[column_1]].iloc[0][column_1]
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
[column_1]].iloc[0][column_1]
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
[column_1]].iloc[0][column_1]
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
# UNITS
cbdv_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
[column_2]].iloc[0][column_2]
cbdv_value_u = self.round_down_to_correct_decimal_point(cbdv_value_u)
cbdva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
[column_2]].iloc[0][column_2]
cbdva_value_u = self.round_down_to_correct_decimal_point(cbdva_value_u)
thcv_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
[column_2]].iloc[0][column_2]
thcv_value_u = self.round_down_to_correct_decimal_point(thcv_value_u)
cbgva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
[column_2]].iloc[0][column_2]
cbgva_value_u = self.round_down_to_correct_decimal_point(cbgva_value_u)
cbd_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
[column_2]].iloc[0][column_2]
cbd_value_u = self.round_down_to_correct_decimal_point(cbd_value_u)
cbg_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
[column_2]].iloc[0][column_2]
cbg_value_u = self.round_down_to_correct_decimal_point(cbg_value_u)
cbda_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
[column_2]].iloc[0][column_2]
cbda_value_u = self.round_down_to_correct_decimal_point(cbda_value_u)
cbn_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
[column_2]].iloc[0][column_2]
cbn_value_u = self.round_down_to_correct_decimal_point(cbn_value_u)
cbga_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
[column_2]].iloc[0][column_2]
cbga_value_u = self.round_down_to_correct_decimal_point(cbga_value_u)
thcva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
[column_2]].iloc[0][column_2]
thcva_value_u = self.round_down_to_correct_decimal_point(thcva_value_u)
d9_thc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
[column_2]].iloc[0][column_2]
d9_thc_value_u = self.round_down_to_correct_decimal_point(d9_thc_value_u)
d8_thc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
[column_2]].iloc[0][column_2]
d8_thc_value_u = self.round_down_to_correct_decimal_point(d8_thc_value_u)
cbl_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
[column_2]].iloc[0][column_2]
cbl_value_u = self.round_down_to_correct_decimal_point(cbl_value_u)
cbc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
[column_2]].iloc[0][column_2]
cbc_value_u = self.round_down_to_correct_decimal_point(cbc_value_u)
cbna_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
[column_2]].iloc[0][column_2]
cbna_value_u = self.round_down_to_correct_decimal_point(cbna_value_u)
thca_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
[column_2]].iloc[0][column_2]
thca_value_u = self.round_down_to_correct_decimal_point(thca_value_u)
cbla_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
[column_2]].iloc[0][column_2]
cbla_value_u = self.round_down_to_correct_decimal_point(cbla_value_u)
cbca_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
[column_2]].iloc[0][column_2]
cbca_value_u = self.round_down_to_correct_decimal_point(cbca_value_u)
if report_type == 'Deluxe':
return [ibu_recovery_value, [cbdv_value, cbdv_value_u], [cbdva_value, cbdva_value_u],
[thcv_value, thcv_value_u], [cbgva_value, cbgva_value_u], [cbd_value, cbd_value_u],
[cbg_value, cbg_value_u], [cbda_value, cbda_value_u], [cbn_value, cbn_value_u],
[cbga_value, cbga_value_u], [thcva_value, thcva_value_u], [d9_thc_value, d9_thc_value_u],
[d8_thc_value, d8_thc_value_u], [cbl_value, cbl_value_u], [cbc_value, cbc_value_u],
[cbna_value, cbna_value_u], [thca_value, thca_value_u], [cbla_value, cbla_value_u],
[cbca_value, cbca_value_u]]
else:
return [ibu_recovery_value, [cbd_value, cbd_value_u], [cbda_value, cbda_value_u], [cbn_value, cbn_value_u],
[cbna_value, cbna_value_u], [d9_thc_value, d9_thc_value_u], [thca_value, thca_value_u],
[d8_thc_value, d8_thc_value_u]]
def create_single_deluxe_table(self, data, sample_type):
thc_total = self.create_total_line('regular', 'deluxe', 'THC', data)
cbd_total = self.create_total_line('regular', 'deluxe', 'CBD', data)
recov_data = self.get_standard_recovery_values('Deluxe')
if sample_type == 'Percent':
sample_type = r'\%'
elif sample_type == 'mg_g':
sample_type = 'mg/g'
elif sample_type == 'mg_ml':
sample_type = 'mg/mL'
else:
sample_type = r'\%'
deluxe_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.490\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$\\
& (""" + sample_type + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$-THC & """ + data[11] + r""" & ND & """ + recov_data[11] + r"""& """ + self.loq_dictionary[11] + r"""\\
$\Delta^{9}$-THC Acid & """ + data[16] + r""" & ND & """ + recov_data[16] + r"""& """ + self.loq_dictionary[16] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total + r"""} & & &\\
\hline
\hline
$\Delta^{8}$THC & """ + data[12] + r""" & ND & """ + recov_data[12] + r"""& """ + self.loq_dictionary[12] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & N/A & N/A \\
\hline
Cannabichromene (CBC) & """ + data[14] + r""" & ND& """ + recov_data[14] + r"""& """ + self.loq_dictionary[14] + r"""\\
Cannabichromene Acid & """ + data[18] + r""" & ND & """ + recov_data[18] + r"""& """ + self.loq_dictionary[18] + r"""\\
\hline
Cannabidiol (CBD) &""" + data[5] + r""" & ND & """ + recov_data[5] + r"""& """ + self.loq_dictionary[5] + r"""\\
Cannabidiol Acid & """ + data[7] + r""" & ND & """ + recov_data[7] + r"""& """ + self.loq_dictionary[7] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total + r"""} & & &\\
\hline
\hline
Cannabigerol (CBG) & """ + data[6] + r""" & ND & """ + recov_data[6] + r"""& """ + self.loq_dictionary[6] + r"""\\
Cannabigerol Acid & """ + data[9] + r""" & ND & """ + recov_data[9] + r"""& """ + self.loq_dictionary[9] + r"""\\
\hline
Cannabicyclol (CBL) & """ + data[13] + r""" & ND & """ + recov_data[13] + r"""& """ + self.loq_dictionary[13] + r"""\\
Cannabicyclol Acid & """ + data[17] + r""" & ND & """ + recov_data[17] + r"""& """ + self.loq_dictionary[17] + r"""\\
\hline
Cannabidivarin (CBDV) & """ + data[1] + r""" & ND & """ + recov_data[1] + r"""& """ + self.loq_dictionary[1] + r"""\\
Cannabidivarin Acid & """ + data[2] + r""" & ND & """ + recov_data[2] + r"""&""" + self.loq_dictionary[2] + r"""\\
\hline
$\Delta^{9}$ THCV & """ + data[3] + r""" & ND& """ + recov_data[3] + r"""& """ + self.loq_dictionary[3] + r"""\\
$\Delta^{9}$ THCV Acid & """ + data[10] + r""" & ND & """ + recov_data[10] + r"""& """ + self.loq_dictionary[10] + r"""\\
\hline
Cannabinol (CBN) & """ + data[8] + r""" & ND & """ + recov_data[8] + r"""& """ + self.loq_dictionary[8] + r"""\\
Cannabinol Acid & """ + data[15] + r""" & ND & """ + recov_data[15] + r"""& """ + self.loq_dictionary[15] + r""" \\
\hline
Cannabigerivarin Acid & ND & ND & N/A & N/A \\
\hline
\hline
\textbf{Moisture} & 0.00 & & &\\
\hline
\hline
\end{tabular}
\end{table}
"""
return deluxe_potency_table_string
def create_single_deluxe_table_unit(self, data, unit_type):
thc_total = self.create_total_line('unit', 'deluxe', 'THC', data)
cbd_total = self.create_total_line('unit', 'deluxe', 'CBD', data)
recov_data = self.get_standard_recovery_values('Deluxe')
if unit_type == 'unit':
sample_type_1 = 'mg/g'
sample_type_2 = 'mg/unit'
elif unit_type == 'density':
sample_type_1 = 'mg/mL'
sample_type_2 = r'\%'
else:
sample_type_1 = r'\%'
sample_type_2 = r'\%'
deluxe_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$ \\
& (""" + sample_type_1 + r""") & (""" + sample_type_2 + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$-THC & """ + data[11][0] + r""" & """ + data[11][1] + r""" & ND & """ + recov_data[11] + r"""&""" + \
self.loq_dictionary[11] + r"""\\
$\Delta^{9}$-THC Acid & """ + data[16][0] + r""" & """ + data[16][1] + r""" & ND & """ + recov_data[
16] + r"""& """ + self.loq_dictionary[16] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total[0] + r"""} & \textbf{""" + thc_total[1] + r"""} & & &\\
\hline
\hline
$\Delta^{8}$THC & """ + data[12][0] + r""" & """ + data[12][1] + r""" & ND & """ + recov_data[12] + r"""& """ + \
self.loq_dictionary[12] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & ND & N/A & N/A\\
\hline
Cannabichromene (CBC) & """ + data[14][0] + r""" & """ + data[14][1] + r""" & ND & """ + recov_data[14] + r"""& """ + \
self.loq_dictionary[14] + r"""\\
Cannabichromene Acid & """ + data[18][0] + r""" & """ + data[18][1] + r""" & ND & """ + recov_data[18] + r"""& """ + \
self.loq_dictionary[18] + r"""\\
\hline
Cannabidiol (CBD) &""" + data[5][0] + r""" & """ + data[5][1] + r""" & ND & """ + recov_data[5] + r"""& """ + \
self.loq_dictionary[5] + r"""\\
Cannabidiol Acid & """ + data[7][0] + r""" & """ + data[7][1] + r""" & ND & """ + recov_data[7] + r"""& """ + \
self.loq_dictionary[7] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total[0] + r"""} & \textbf{""" + cbd_total[1] + r"""} & & &\\
\hline
\hline
Cannabigerol (CBG) & """ + data[6][0] + r""" & """ + data[6][1] + r""" & ND & """ + recov_data[6] + r"""& """ + \
self.loq_dictionary[6] + r"""\\
Cannabigerol Acid & """ + data[9][0] + r""" & """ + data[9][1] + r""" & ND & """ + recov_data[9] + r"""& """ + \
self.loq_dictionary[9] + r"""\\
\hline
Cannabicyclol (CBL) & """ + data[13][0] + r""" & """ + data[13][1] + r""" & ND & """ + recov_data[
13] + r"""& """ + self.loq_dictionary[13] + r"""\\
Cannabicyclol Acid & """ + data[17][0] + r""" & """ + data[17][1] + r""" & ND & """ + recov_data[17] + r"""& """ + \
self.loq_dictionary[17] + r"""\\
\hline
Cannabidivarin (CBDV) & """ + data[1][0] + r""" & """ + data[1][1] + r""" & ND & """ + recov_data[1] + r"""& """ + \
self.loq_dictionary[1] + r"""\\
Cannabidivarin Acid & """ + data[2][0] + r""" & """ + data[2][1] + r""" & ND & """ + recov_data[2] + r"""& """ + \
self.loq_dictionary[2] + r"""\\
\hline
$\Delta^{9}$ THCV & """ + data[3][0] + r""" & """ + data[3][1] + r""" & ND & """ + recov_data[3] + r"""& """ + \
self.loq_dictionary[3] + r"""\\
$\Delta^{9}$ THCV Acid & """ + data[10][0] + r""" & """ + data[10][1] + r""" & ND & """ + recov_data[
10] + r"""& """ + self.loq_dictionary[10] + r"""\\
\hline
Cannabinol (CBN) & """ + data[8][0] + r""" & """ + data[8][1] + r""" & ND & """ + recov_data[8] + r"""& """ + \
self.loq_dictionary[8] + r"""\\
Cannabinol Acid & """ + data[15][0] + r""" & """ + data[15][1] + r""" & ND & """ + recov_data[15] + r"""& """ + \
self.loq_dictionary[15] + r""" \\
\hline
Cannabigerivarin Acid & ND & ND & N/A & N/A \\
\hline
\hline
\textbf{Moisture} & 0.00 & & & \\
\hline
\hline
\end{tabular}
\end{table}
"""
return deluxe_potency_table_string
def create_single_basic_table(self, data, sample_type):
thc_total = self.create_total_line('regular', 'basic', 'THC', data)
cbd_total = self.create_total_line('regular', 'basic', 'CBD', data)
recov_data = self.get_standard_recovery_values('Basic')
if sample_type == 'Percent':
sample_type = r'\%'
elif sample_type == 'mg_g':
sample_type = 'mg/g'
elif sample_type == 'mg_ml':
sample_type = 'mg/mL'
else:
sample_type = r'\%'
basic_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.490\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$\\
& (""" + sample_type + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$-THC & """ + data[5] + r""" & ND & """ + recov_data[5] + r"""& """ + self.loq_dictionary[5] + r"""\\
$\Delta^{9}$-THC Acid & """ + data[6] + r""" & ND & """ + recov_data[6] + r"""& """ + self.loq_dictionary[6] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total + r"""} & & &\\
\hline
\hline
$\Delta^{8}$-THC & """ + data[7] + r""" & ND & """ + recov_data[7] + r"""& """ + self.loq_dictionary[7] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & N/A & N/A \\
\hline
Cannabidiol (CBD) &""" + data[1] + r""" & ND & """ + recov_data[1] + r"""& """ + self.loq_dictionary[1] + r"""\\
Cannabidiol Acid &""" + data[2] + r""" & ND & """ + recov_data[2] + r"""& """ + self.loq_dictionary[2] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total + r"""} & & &\\
\hline
\hline
Cannabinol (CBN) & """ + data[3] + r""" & ND & """ + recov_data[3] + r"""& """ + self.loq_dictionary[3] + r"""\\
Cannabinol Acid & """ + data[4] + r""" & ND & """ + recov_data[4] + r"""& """ + self.loq_dictionary[4] + r"""\\
\hline
\hline
\textbf{Moisture} & 0.00 & & &\\
\hline
\hline
\end{tabular}
\end{table}
"""
return basic_potency_table_string
def create_single_basic_table_unit(self, data, unit_type):
thc_total = self.create_total_line('unit', 'basic', 'THC', data)
cbd_total = self.create_total_line('unit', 'basic', 'CBD', data)
recov_data = self.get_standard_recovery_values('Basic')
if unit_type == 'unit':
sample_type_1 = 'mg/g'
sample_type_2 = 'mg/unit'
elif unit_type == 'density':
sample_type_1 = 'mg/mL'
sample_type_2 = r'\%'
else:
sample_type_1 = r'\%'
sample_type_2 = r'\%'
basic_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$ \\
& (""" + sample_type_1 + r""") & (""" + sample_type_2 + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$ THC & """ + data[5][0] + r""" & """ + data[5][1] + r""" & ND & """ + recov_data[5] + r"""& """ + \
self.loq_dictionary[5] + r"""\\
$\Delta^{9}$ THC Acid & """ + data[6][0] + r""" & """ + data[6][1] + r""" & ND & """ + recov_data[
6] + r"""& """ + self.loq_dictionary[6] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total[0] + r"""} & \textbf{""" + thc_total[1] + r"""} & & &\\
\hline
\hline
$\Delta^{8}$ THC & """ + data[7][0] + r""" & """ + data[7][1] + r""" & ND & """ + recov_data[7] + r"""& """ + \
self.loq_dictionary[7] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & ND & N/A & N/A \\
\hline
Cannabidiol (CBD) &""" + data[1][0] + r""" & """ + data[1][1] + r""" & ND & """ + recov_data[1] + r"""& """ + \
self.loq_dictionary[1] + r"""\\
Cannabidiol Acid &""" + data[2][0] + r""" & """ + data[2][1] + r""" & ND & """ + recov_data[2] + r"""& """ + \
self.loq_dictionary[2] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total[0] + r"""} & \textbf{""" + cbd_total[1] + r"""} & & &\\
\hline
\hline
Cannabinol (CBN) & """ + data[3][0] + r""" & """ + data[3][1] + r""" & ND & """ + recov_data[3] + r"""& """ + \
self.loq_dictionary[3] + r"""\\
Cannabinol Acid & """ + data[4][0] + r""" & """ + data[4][1] + r""" & ND & """ + recov_data[4] + r"""& """ + \
self.loq_dictionary[4] + r"""\\
\hline
\hline
\textbf{Moisture} & 0.00 & & &\\
\hline
\hline
\end{tabular}
\end{table}
"""
return basic_potency_table_string
def generate_footer(self):
footer_string = r"""
Methods: solvent extraction; measured by UPLC-UV, tandem MS, P.I. 1.14 \& based on USP monograph 29 \newline
$\si{S_{o}}$ = standard deviation at zero analyte concentration. MDL generally considered to be 3x $\si{S_{o}}$ value. \newline\newline
ND = none detected. N/A = not applicable. THC = tetrahydrocannabinol.\newline
\textbf{*Total THC} = $\Delta^{9}$-THC + (THCA x 0.877 ). \textbf{**Total CBD} = CBD + (CBDA x 0.877).\newline\newline
Material will be held for up to 3 weeks unless alternative arrangements have been made. Sample holding time may vary and is dependent on MBL license restrictions.
\newline\newline\newline
R. Bilodeau \phantom{aaaaaaaaaaaaaaaaaaaaaaaaaxaaaaaasasssssssssssss}H. Hartmann\\ Analytical Chemist: \underline{\hspace{3cm}}{ \hspace{3.2cm} Sr. Analytical Chemist: \underline{\hspace{3cm}}
\fancyfoot[C]{\textbf{MB Laboratories Ltd.}\\ \textbf{Web:} www.mblabs.com}
\fancyfoot[R]{\textbf{Mail:} PO Box 2103\\ Sidney, B.C., V8L 356}
\fancyfoot[L]{\textbf{T:} 250 656 1334\\ \textbf{E:} info@mblabs.com}
\end{document}
"""
return footer_string
def round_down_to_correct_decimal_point(self, data_value):
if 100 > data_value >= 1:
data_value = str(data_value)[0:4]
elif 1 > data_value > 0:
data_value = str(data_value)[0:5]
elif data_value >= 100:
data_value = str(data_value)[0:3]
else:
data_value = 'ND'
return data_value
def create_total_line(self, total_line_type, report_type, cannabinoid, data):
if total_line_type == "unit":
if cannabinoid == 'THC':
if report_type == 'basic':
delta9 = data[5][0]
acid = data[6][0]
delta9_unit = data[5][1]
acid_unit = data[6][1]
else:
delta9 = data[11][0]
acid = data[16][0]
delta9_unit = data[11][1]
acid_unit = data[16][1]
if delta9 == 'ND':
delta9 = 0
if acid == 'ND':
acid = 0
if delta9_unit == 'ND':
delta9_unit = 0
if acid_unit == 'ND':
acid_unit = 0
total1 = float(delta9) + (float(acid) * 0.877)
total2 = float(delta9_unit) + (float(acid_unit) * 0.877)
if 100 > total1 >= 1:
total1 = str(total1)[0:4]
elif 1 > total1 > 0:
total1 = str(total1)[0:5]
elif total1 >= 100:
total1 = str(total1)[0:3]
else:
total1 = 'ND'
if 100 > total2 >= 1:
total2 = str(total2)[0:4]
elif 1 > total2 > 0:
total2 = str(total2)[0:5]
elif total2 >= 100:
total2 = str(total2)[0:3]
else:
total2 = 'ND'
return [total1, total2]
else:
if report_type == 'basic':
cbd = data[1][0]
acid = data[2][0]
cbd_unit = data[1][1]
acid_unit = data[2][1]
else:
cbd = data[5][0]
acid = data[7][0]
cbd_unit = data[5][1]
acid_unit = data[7][1]
if cbd == 'ND':
cbd = 0
if acid == 'ND':
acid = 0
if cbd_unit == 'ND':
cbd_unit = 0
if acid_unit == 'ND':
acid_unit = 0
total1 = float(cbd) + (float(acid) * 0.877)
total2 = float(cbd_unit) + (float(acid_unit) * 0.877)
if 100 > total1 >= 1:
total1 = str(total1)[0:4]
elif 1 > total1 > 0:
total1 = str(total1)[0:5]
elif total1 >= 100:
total1 = str(total1)[0:3]
else:
total1 = 'ND'
if 100 > total2 >= 1:
total2 = str(total2)[0:4]
elif 1 > total2 > 0:
total2 = str(total2)[0:5]
elif total2 >= 100:
total2 = str(total2)[0:3]
else:
total2 = 'ND'
return [total1, total2]
elif total_line_type == "regular":
if cannabinoid == 'THC':
if report_type == 'basic':
delta9 = data[5]
acid = data[6]
else:
delta9 = data[11]
acid = data[16]
if delta9 == 'ND':
delta9 = 0
if acid == 'ND':
acid = 0
total = float(delta9) + (float(acid) * 0.877)
if 100 > total >= 1:
total = str(total)[0:4]
elif 1 > total > 0:
total = str(total)[0:5]
elif total >= 100:
total = str(total)[0:3]
else:
total = 'ND'
return total
else:
if report_type == "basic":
cbd = data[1]
acid = data[2]
else:
cbd = data[5]
acid = data[7]
if cbd == 'ND':
cbd = 0
if acid == 'ND':
acid = 0
total = float(cbd) + (float(acid) * 0.877)
if 100 > total >= 1:
total = str(total)[0:4]
elif 1 > total > 0:
total = str(total)[0:5]
elif total >= 100:
total = str(total)[0:3]
else:
total = 'ND'
return total
| class Singlemethods:
def __init__(self, finished_reports_dictionary, single_reports_dictionary, sample_data, latex_header_and_sample_list_dictionary, loq_dictionary):
self.finished_reports_dictionary = finished_reports_dictionary
self.single_reports_dictionary = single_reports_dictionary
self.sample_data = sample_data
self.latex_header_and_sample_list_dictionary = latex_header_and_sample_list_dictionary
self.loq_dictionary = loq_dictionary
def generate_single_sample_reports(self):
for (key, value) in self.single_reports_dictionary.items():
if value[0] == 'Percent' and value[1] == 'Basic':
self.generate_single_percent_basic_report(key)
elif value[0] == 'Percent' and value[1] == 'Deluxe':
self.generate_single_percent_deluxe_report(key)
elif value[0] == 'mg/g' and value[1] == 'Basic':
self.generate_single_mg_g_basic_report(key)
elif value[0] == 'mg/g' and value[1] == 'Deluxe':
self.generate_single_mg_g_deluxe_report(key)
elif value[0] == 'mg/mL' and value[1] == 'Basic':
self.generate_single_mg_ml_basic_report(key)
elif value[0] == 'mg/mL' and value[1] == 'Deluxe':
self.generate_single_mg_ml_deluxe_report(key)
elif value[0] == 'per unit' and value[1] == 'Basic':
self.generate_single_unit_basic_report(key)
elif value[0] == 'per unit' and value[1] == 'Deluxe':
self.generate_single_unit_deluxe_report(key)
else:
self.generate_single_percent_deluxe_report(key)
return self.finished_reports_dictionary
def generate_single_percent_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame, 'Percent', 'Basic')
temporary_table = self.create_single_basic_table(temporary_data, 'Percent')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_g_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame, 'mg_g', 'Basic')
temporary_table = self.create_single_basic_table(temporary_data, 'mg_g')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_percent_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame, 'Percent', 'Deluxe')
temporary_table = self.create_single_deluxe_table(temporary_data, 'Percent')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_g_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame, 'mg_g', 'Deluxe')
temporary_table = self.create_single_deluxe_table(temporary_data, 'mg_g')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_ml_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame, 'Basic', 'density')
temporary_table = self.create_single_basic_table_unit(temporary_data, 'density')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_ml_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame, 'Deluxe', 'density')
temporary_table = self.create_single_deluxe_table_unit(temporary_data, 'density')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_unit_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame, 'Basic', 'unit')
temporary_table = self.create_single_basic_table_unit(temporary_data, 'unit')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_unit_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid'] == sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame, 'Deluxe', 'unit')
temporary_table = self.create_single_deluxe_table_unit(temporary_data, 'unit')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def get_standard_recovery_values(self, report_type):
temporary_data_frame = self.sample_data.best_recovery_qc_data_frame
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0, ['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0, ['percrecovery']].iloc[0]['percrecovery']
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0, ['percrecovery']].iloc[0]['percrecovery']
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0, ['percrecovery']].iloc[0]['percrecovery']
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0, ['percrecovery']].iloc[0]['percrecovery']
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0, ['percrecovery']].iloc[0]['percrecovery']
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0, ['percrecovery']].iloc[0]['percrecovery']
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0, ['percrecovery']].iloc[0]['percrecovery']
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0, ['percrecovery']].iloc[0]['percrecovery']
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0, ['percrecovery']].iloc[0]['percrecovery']
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0, ['percrecovery']].iloc[0]['percrecovery']
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0, ['percrecovery']].iloc[0]['percrecovery']
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0, ['percrecovery']].iloc[0]['percrecovery']
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0, ['percrecovery']].iloc[0]['percrecovery']
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0, ['percrecovery']].iloc[0]['percrecovery']
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0, ['percrecovery']].iloc[0]['percrecovery']
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0, ['percrecovery']].iloc[0]['percrecovery']
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0, ['percrecovery']].iloc[0]['percrecovery']
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
if report_type == 'Deluxe':
return [ibu_recovery_value, cbdv_value, cbdva_value, thcv_value, 'N/A', cbd_value, cbg_value, cbda_value, cbn_value, cbga_value, thcva_value, d9_thc_value, d8_thc_value, cbl_value, cbc_value, cbna_value, thca_value, cbla_value, cbca_value]
else:
return [ibu_recovery_value, cbd_value, cbda_value, cbn_value, cbna_value, d9_thc_value, thca_value, d8_thc_value]
def get_relevant_values_and_recoveries_for_single_reports(self, temporary_data_frame, sample_type, report_type):
if sample_type == 'Percent':
sample_column_type = 'percentage_concentration'
elif sample_type == 'mg_g':
sample_column_type = 'mg_g'
elif sample_type == 'mg_ml':
sample_column_type = 'mg_ml'
else:
sample_column_type = 'percentage_concentration'
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0, ['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0, [sample_column_type]].iloc[0][sample_column_type]
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0, [sample_column_type]].iloc[0][sample_column_type]
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0, [sample_column_type]].iloc[0][sample_column_type]
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0, [sample_column_type]].iloc[0][sample_column_type]
cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0, [sample_column_type]].iloc[0][sample_column_type]
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0, [sample_column_type]].iloc[0][sample_column_type]
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0, [sample_column_type]].iloc[0][sample_column_type]
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0, [sample_column_type]].iloc[0][sample_column_type]
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0, [sample_column_type]].iloc[0][sample_column_type]
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0, [sample_column_type]].iloc[0][sample_column_type]
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0, [sample_column_type]].iloc[0][sample_column_type]
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0, [sample_column_type]].iloc[0][sample_column_type]
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0, [sample_column_type]].iloc[0][sample_column_type]
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0, [sample_column_type]].iloc[0][sample_column_type]
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0, [sample_column_type]].iloc[0][sample_column_type]
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0, [sample_column_type]].iloc[0][sample_column_type]
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0, [sample_column_type]].iloc[0][sample_column_type]
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0, [sample_column_type]].iloc[0][sample_column_type]
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
if report_type == 'Deluxe':
return [ibu_recovery_value, cbdv_value, cbdva_value, thcv_value, cbgva_value, cbd_value, cbg_value, cbda_value, cbn_value, cbga_value, thcva_value, d9_thc_value, d8_thc_value, cbl_value, cbc_value, cbna_value, thca_value, cbla_value, cbca_value]
else:
return [ibu_recovery_value, cbd_value, cbda_value, cbn_value, cbna_value, d9_thc_value, thca_value, d8_thc_value]
def get_relevant_values_and_recoveries_for_single_reports_unit(self, temporary_data_frame, report_type, unit_type):
if unit_type == 'unit':
column_1 = 'mg_g'
column_2 = 'mg_unit'
elif unit_type == 'density':
column_1 = 'mg_ml'
column_2 = 'percentage_concentration'
else:
column_1 = 'percentage_concentration'
column_2 = 'percentage_concentration'
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0, ['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0, [column_1]].iloc[0][column_1]
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0, [column_1]].iloc[0][column_1]
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0, [column_1]].iloc[0][column_1]
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0, [column_1]].iloc[0][column_1]
cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0, [column_1]].iloc[0][column_1]
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0, [column_1]].iloc[0][column_1]
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0, [column_1]].iloc[0][column_1]
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0, [column_1]].iloc[0][column_1]
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0, [column_1]].iloc[0][column_1]
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0, [column_1]].iloc[0][column_1]
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0, [column_1]].iloc[0][column_1]
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0, [column_1]].iloc[0][column_1]
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0, [column_1]].iloc[0][column_1]
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0, [column_1]].iloc[0][column_1]
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0, [column_1]].iloc[0][column_1]
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0, [column_1]].iloc[0][column_1]
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0, [column_1]].iloc[0][column_1]
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0, [column_1]].iloc[0][column_1]
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
cbdv_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0, [column_2]].iloc[0][column_2]
cbdv_value_u = self.round_down_to_correct_decimal_point(cbdv_value_u)
cbdva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0, [column_2]].iloc[0][column_2]
cbdva_value_u = self.round_down_to_correct_decimal_point(cbdva_value_u)
thcv_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0, [column_2]].iloc[0][column_2]
thcv_value_u = self.round_down_to_correct_decimal_point(thcv_value_u)
cbgva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0, [column_2]].iloc[0][column_2]
cbgva_value_u = self.round_down_to_correct_decimal_point(cbgva_value_u)
cbd_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0, [column_2]].iloc[0][column_2]
cbd_value_u = self.round_down_to_correct_decimal_point(cbd_value_u)
cbg_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0, [column_2]].iloc[0][column_2]
cbg_value_u = self.round_down_to_correct_decimal_point(cbg_value_u)
cbda_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0, [column_2]].iloc[0][column_2]
cbda_value_u = self.round_down_to_correct_decimal_point(cbda_value_u)
cbn_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0, [column_2]].iloc[0][column_2]
cbn_value_u = self.round_down_to_correct_decimal_point(cbn_value_u)
cbga_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0, [column_2]].iloc[0][column_2]
cbga_value_u = self.round_down_to_correct_decimal_point(cbga_value_u)
thcva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0, [column_2]].iloc[0][column_2]
thcva_value_u = self.round_down_to_correct_decimal_point(thcva_value_u)
d9_thc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0, [column_2]].iloc[0][column_2]
d9_thc_value_u = self.round_down_to_correct_decimal_point(d9_thc_value_u)
d8_thc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0, [column_2]].iloc[0][column_2]
d8_thc_value_u = self.round_down_to_correct_decimal_point(d8_thc_value_u)
cbl_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0, [column_2]].iloc[0][column_2]
cbl_value_u = self.round_down_to_correct_decimal_point(cbl_value_u)
cbc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0, [column_2]].iloc[0][column_2]
cbc_value_u = self.round_down_to_correct_decimal_point(cbc_value_u)
cbna_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0, [column_2]].iloc[0][column_2]
cbna_value_u = self.round_down_to_correct_decimal_point(cbna_value_u)
thca_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0, [column_2]].iloc[0][column_2]
thca_value_u = self.round_down_to_correct_decimal_point(thca_value_u)
cbla_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0, [column_2]].iloc[0][column_2]
cbla_value_u = self.round_down_to_correct_decimal_point(cbla_value_u)
cbca_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0, [column_2]].iloc[0][column_2]
cbca_value_u = self.round_down_to_correct_decimal_point(cbca_value_u)
if report_type == 'Deluxe':
return [ibu_recovery_value, [cbdv_value, cbdv_value_u], [cbdva_value, cbdva_value_u], [thcv_value, thcv_value_u], [cbgva_value, cbgva_value_u], [cbd_value, cbd_value_u], [cbg_value, cbg_value_u], [cbda_value, cbda_value_u], [cbn_value, cbn_value_u], [cbga_value, cbga_value_u], [thcva_value, thcva_value_u], [d9_thc_value, d9_thc_value_u], [d8_thc_value, d8_thc_value_u], [cbl_value, cbl_value_u], [cbc_value, cbc_value_u], [cbna_value, cbna_value_u], [thca_value, thca_value_u], [cbla_value, cbla_value_u], [cbca_value, cbca_value_u]]
else:
return [ibu_recovery_value, [cbd_value, cbd_value_u], [cbda_value, cbda_value_u], [cbn_value, cbn_value_u], [cbna_value, cbna_value_u], [d9_thc_value, d9_thc_value_u], [thca_value, thca_value_u], [d8_thc_value, d8_thc_value_u]]
def create_single_deluxe_table(self, data, sample_type):
thc_total = self.create_total_line('regular', 'deluxe', 'THC', data)
cbd_total = self.create_total_line('regular', 'deluxe', 'CBD', data)
recov_data = self.get_standard_recovery_values('Deluxe')
if sample_type == 'Percent':
sample_type = '\\%'
elif sample_type == 'mg_g':
sample_type = 'mg/g'
elif sample_type == 'mg_ml':
sample_type = 'mg/mL'
else:
sample_type = '\\%'
deluxe_potency_table_string = '\n \\newline\n \\renewcommand{\\arraystretch}{1.2}\n \\begin{table}[h!]\\centering\n \\begin{tabular}{p{\\dimexpr0.270\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.490\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.1\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n }\n \\textbf{Cannabinoids} & \\textbf{Sample 1} & \\textbf{\\small Blank} & \\textbf{\\small Recovery} & $\\mathbf{\\small S_{0}}$\\\\\n & (' + sample_type + ') & (\\%) & (\\%) & (\\%) \\\\\n \\hline\n \\hline\n $\\Delta^{9}$-THC & ' + data[11] + ' & ND & ' + recov_data[11] + '& ' + self.loq_dictionary[11] + '\\\\\n $\\Delta^{9}$-THC Acid & ' + data[16] + ' & ND & ' + recov_data[16] + '& ' + self.loq_dictionary[16] + '\\\\\n \\hline\n \\hline\n \\textbf{Total THC*} & \\textbf{' + thc_total + '} & & &\\\\\n \\hline\n \\hline\n $\\Delta^{8}$THC & ' + data[12] + ' & ND & ' + recov_data[12] + '& ' + self.loq_dictionary[12] + '\\\\\n $\\Delta^{8}$THC Acid & ND & ND & N/A & N/A \\\\\n \\hline\n Cannabichromene (CBC) & ' + data[14] + ' & ND& ' + recov_data[14] + '& ' + self.loq_dictionary[14] + '\\\\\n Cannabichromene Acid & ' + data[18] + ' & ND & ' + recov_data[18] + '& ' + self.loq_dictionary[18] + '\\\\\n \\hline\n Cannabidiol (CBD) &' + data[5] + ' & ND & ' + recov_data[5] + '& ' + self.loq_dictionary[5] + '\\\\\n Cannabidiol Acid & ' + data[7] + ' & ND & ' + recov_data[7] + '& ' + self.loq_dictionary[7] + '\\\\\n \\hline\n \\hline\n \\textbf{Total CBD**} & \\textbf{' + cbd_total + '} & & &\\\\\n \\hline\n \\hline\n Cannabigerol (CBG) & ' + data[6] + ' & ND & ' + recov_data[6] + '& ' + self.loq_dictionary[6] + '\\\\\n Cannabigerol Acid & ' + data[9] + ' & ND & ' + recov_data[9] + '& ' + self.loq_dictionary[9] + '\\\\\n \\hline\n Cannabicyclol (CBL) & ' + data[13] + ' & ND & ' + recov_data[13] + '& ' + self.loq_dictionary[13] + '\\\\\n Cannabicyclol Acid & ' + data[17] + ' & ND & ' + recov_data[17] + '& ' + self.loq_dictionary[17] + '\\\\\n \\hline\n Cannabidivarin (CBDV) & ' + data[1] + ' & ND & ' + recov_data[1] + '& ' + self.loq_dictionary[1] + '\\\\\n Cannabidivarin Acid & ' + data[2] + ' & ND & ' + recov_data[2] + '&' + self.loq_dictionary[2] + '\\\\\n \\hline\n $\\Delta^{9}$ THCV & ' + data[3] + ' & ND& ' + recov_data[3] + '& ' + self.loq_dictionary[3] + '\\\\\n $\\Delta^{9}$ THCV Acid & ' + data[10] + ' & ND & ' + recov_data[10] + '& ' + self.loq_dictionary[10] + '\\\\\n \\hline\n Cannabinol (CBN) & ' + data[8] + ' & ND & ' + recov_data[8] + '& ' + self.loq_dictionary[8] + '\\\\\n Cannabinol Acid & ' + data[15] + ' & ND & ' + recov_data[15] + '& ' + self.loq_dictionary[15] + ' \\\\\n \\hline\n Cannabigerivarin Acid & ND & ND & N/A & N/A \\\\\n \\hline\n \\hline\n \\textbf{Moisture} & 0.00 & & &\\\\\n \\hline\n \\hline\n \\end{tabular}\n \\end{table}\n '
return deluxe_potency_table_string
def create_single_deluxe_table_unit(self, data, unit_type):
thc_total = self.create_total_line('unit', 'deluxe', 'THC', data)
cbd_total = self.create_total_line('unit', 'deluxe', 'CBD', data)
recov_data = self.get_standard_recovery_values('Deluxe')
if unit_type == 'unit':
sample_type_1 = 'mg/g'
sample_type_2 = 'mg/unit'
elif unit_type == 'density':
sample_type_1 = 'mg/mL'
sample_type_2 = '\\%'
else:
sample_type_1 = '\\%'
sample_type_2 = '\\%'
deluxe_potency_table_string = '\n \\newline\n \\renewcommand{\\arraystretch}{1.2}\n \\begin{table}[h!]\\centering\n \\begin{tabular}{p{\\dimexpr0.270\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.245\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.245\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.1\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n }\n \\textbf{Cannabinoids} & \\textbf{Sample 1} & \\textbf{Sample 1} & \\textbf{\\small Blank} & \\textbf{\\small Recovery} & $\\mathbf{\\small S_{0}}$ \\\\\n & (' + sample_type_1 + ') & (' + sample_type_2 + ') & (\\%) & (\\%) & (\\%) \\\\\n \\hline\n \\hline\n $\\Delta^{9}$-THC & ' + data[11][0] + ' & ' + data[11][1] + ' & ND & ' + recov_data[11] + '&' + self.loq_dictionary[11] + '\\\\\n $\\Delta^{9}$-THC Acid & ' + data[16][0] + ' & ' + data[16][1] + ' & ND & ' + recov_data[16] + '& ' + self.loq_dictionary[16] + '\\\\\n \\hline\n \\hline\n \\textbf{Total THC*} & \\textbf{' + thc_total[0] + '} & \\textbf{' + thc_total[1] + '} & & &\\\\\n \\hline\n \\hline\n $\\Delta^{8}$THC & ' + data[12][0] + ' & ' + data[12][1] + ' & ND & ' + recov_data[12] + '& ' + self.loq_dictionary[12] + '\\\\\n $\\Delta^{8}$THC Acid & ND & ND & ND & N/A & N/A\\\\\n \\hline\n Cannabichromene (CBC) & ' + data[14][0] + ' & ' + data[14][1] + ' & ND & ' + recov_data[14] + '& ' + self.loq_dictionary[14] + '\\\\\n Cannabichromene Acid & ' + data[18][0] + ' & ' + data[18][1] + ' & ND & ' + recov_data[18] + '& ' + self.loq_dictionary[18] + '\\\\\n \\hline\n Cannabidiol (CBD) &' + data[5][0] + ' & ' + data[5][1] + ' & ND & ' + recov_data[5] + '& ' + self.loq_dictionary[5] + '\\\\\n Cannabidiol Acid & ' + data[7][0] + ' & ' + data[7][1] + ' & ND & ' + recov_data[7] + '& ' + self.loq_dictionary[7] + '\\\\\n \\hline\n \\hline\n \\textbf{Total CBD**} & \\textbf{' + cbd_total[0] + '} & \\textbf{' + cbd_total[1] + '} & & &\\\\\n \\hline\n \\hline\n Cannabigerol (CBG) & ' + data[6][0] + ' & ' + data[6][1] + ' & ND & ' + recov_data[6] + '& ' + self.loq_dictionary[6] + '\\\\\n Cannabigerol Acid & ' + data[9][0] + ' & ' + data[9][1] + ' & ND & ' + recov_data[9] + '& ' + self.loq_dictionary[9] + '\\\\\n \\hline\n Cannabicyclol (CBL) & ' + data[13][0] + ' & ' + data[13][1] + ' & ND & ' + recov_data[13] + '& ' + self.loq_dictionary[13] + '\\\\\n Cannabicyclol Acid & ' + data[17][0] + ' & ' + data[17][1] + ' & ND & ' + recov_data[17] + '& ' + self.loq_dictionary[17] + '\\\\\n \\hline\n Cannabidivarin (CBDV) & ' + data[1][0] + ' & ' + data[1][1] + ' & ND & ' + recov_data[1] + '& ' + self.loq_dictionary[1] + '\\\\\n Cannabidivarin Acid & ' + data[2][0] + ' & ' + data[2][1] + ' & ND & ' + recov_data[2] + '& ' + self.loq_dictionary[2] + '\\\\\n \\hline\n $\\Delta^{9}$ THCV & ' + data[3][0] + ' & ' + data[3][1] + ' & ND & ' + recov_data[3] + '& ' + self.loq_dictionary[3] + '\\\\\n $\\Delta^{9}$ THCV Acid & ' + data[10][0] + ' & ' + data[10][1] + ' & ND & ' + recov_data[10] + '& ' + self.loq_dictionary[10] + '\\\\\n \\hline\n Cannabinol (CBN) & ' + data[8][0] + ' & ' + data[8][1] + ' & ND & ' + recov_data[8] + '& ' + self.loq_dictionary[8] + '\\\\\n Cannabinol Acid & ' + data[15][0] + ' & ' + data[15][1] + ' & ND & ' + recov_data[15] + '& ' + self.loq_dictionary[15] + ' \\\\\n \\hline\n Cannabigerivarin Acid & ND & ND & N/A & N/A \\\\\n \\hline\n \\hline\n \\textbf{Moisture} & 0.00 & & & \\\\\n \\hline\n \\hline\n \\end{tabular}\n \\end{table}\n '
return deluxe_potency_table_string
def create_single_basic_table(self, data, sample_type):
thc_total = self.create_total_line('regular', 'basic', 'THC', data)
cbd_total = self.create_total_line('regular', 'basic', 'CBD', data)
recov_data = self.get_standard_recovery_values('Basic')
if sample_type == 'Percent':
sample_type = '\\%'
elif sample_type == 'mg_g':
sample_type = 'mg/g'
elif sample_type == 'mg_ml':
sample_type = 'mg/mL'
else:
sample_type = '\\%'
basic_potency_table_string = '\n \\newline\n \\renewcommand{\\arraystretch}{1.2}\n \\begin{table}[h!]\\centering\n \\begin{tabular}{p{\\dimexpr0.270\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.490\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.1\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n }\n \\textbf{Cannabinoids} & \\textbf{Sample 1} & \\textbf{\\small Blank} & \\textbf{\\small Recovery} & $\\mathbf{\\small S_{0}}$\\\\\n & (' + sample_type + ') & (\\%) & (\\%) & (\\%) \\\\\n \\hline\n \\hline\n $\\Delta^{9}$-THC & ' + data[5] + ' & ND & ' + recov_data[5] + '& ' + self.loq_dictionary[5] + '\\\\\n $\\Delta^{9}$-THC Acid & ' + data[6] + ' & ND & ' + recov_data[6] + '& ' + self.loq_dictionary[6] + '\\\\\n \\hline\n \\hline\n \\textbf{Total THC*} & \\textbf{' + thc_total + '} & & &\\\\\n \\hline\n \\hline\n $\\Delta^{8}$-THC & ' + data[7] + ' & ND & ' + recov_data[7] + '& ' + self.loq_dictionary[7] + '\\\\\n $\\Delta^{8}$THC Acid & ND & ND & N/A & N/A \\\\\n \\hline\n Cannabidiol (CBD) &' + data[1] + ' & ND & ' + recov_data[1] + '& ' + self.loq_dictionary[1] + '\\\\\n Cannabidiol Acid &' + data[2] + ' & ND & ' + recov_data[2] + '& ' + self.loq_dictionary[2] + '\\\\\n \\hline\n \\hline\n \\textbf{Total CBD**} & \\textbf{' + cbd_total + '} & & &\\\\\n \\hline\n \\hline\n Cannabinol (CBN) & ' + data[3] + ' & ND & ' + recov_data[3] + '& ' + self.loq_dictionary[3] + '\\\\\n Cannabinol Acid & ' + data[4] + ' & ND & ' + recov_data[4] + '& ' + self.loq_dictionary[4] + '\\\\\n \\hline\n \\hline\n \\textbf{Moisture} & 0.00 & & &\\\\\n \\hline\n \\hline\n \\end{tabular}\n \\end{table}\n '
return basic_potency_table_string
def create_single_basic_table_unit(self, data, unit_type):
thc_total = self.create_total_line('unit', 'basic', 'THC', data)
cbd_total = self.create_total_line('unit', 'basic', 'CBD', data)
recov_data = self.get_standard_recovery_values('Basic')
if unit_type == 'unit':
sample_type_1 = 'mg/g'
sample_type_2 = 'mg/unit'
elif unit_type == 'density':
sample_type_1 = 'mg/mL'
sample_type_2 = '\\%'
else:
sample_type_1 = '\\%'
sample_type_2 = '\\%'
basic_potency_table_string = '\n \\newline\n \\renewcommand{\\arraystretch}{1.2}\n \\begin{table}[h!]\\centering\n \\begin{tabular}{p{\\dimexpr0.270\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.245\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.245\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}|\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.1\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n p{\\dimexpr0.07\\textwidth-2\\tabcolsep-\\arrayrulewidth\\relax}\n }\n \\textbf{Cannabinoids} & \\textbf{Sample 1} & \\textbf{Sample 1} & \\textbf{\\small Blank} & \\textbf{\\small Recovery} & $\\mathbf{\\small S_{0}}$ \\\\\n & (' + sample_type_1 + ') & (' + sample_type_2 + ') & (\\%) & (\\%) & (\\%) \\\\\n \\hline\n \\hline\n $\\Delta^{9}$ THC & ' + data[5][0] + ' & ' + data[5][1] + ' & ND & ' + recov_data[5] + '& ' + self.loq_dictionary[5] + '\\\\\n $\\Delta^{9}$ THC Acid & ' + data[6][0] + ' & ' + data[6][1] + ' & ND & ' + recov_data[6] + '& ' + self.loq_dictionary[6] + '\\\\\n \\hline\n \\hline\n \\textbf{Total THC*} & \\textbf{' + thc_total[0] + '} & \\textbf{' + thc_total[1] + '} & & &\\\\\n \\hline\n \\hline\n $\\Delta^{8}$ THC & ' + data[7][0] + ' & ' + data[7][1] + ' & ND & ' + recov_data[7] + '& ' + self.loq_dictionary[7] + '\\\\\n $\\Delta^{8}$THC Acid & ND & ND & ND & N/A & N/A \\\\\n \\hline\n Cannabidiol (CBD) &' + data[1][0] + ' & ' + data[1][1] + ' & ND & ' + recov_data[1] + '& ' + self.loq_dictionary[1] + '\\\\\n Cannabidiol Acid &' + data[2][0] + ' & ' + data[2][1] + ' & ND & ' + recov_data[2] + '& ' + self.loq_dictionary[2] + '\\\\\n \\hline\n \\hline\n \\textbf{Total CBD**} & \\textbf{' + cbd_total[0] + '} & \\textbf{' + cbd_total[1] + '} & & &\\\\\n \\hline\n \\hline\n Cannabinol (CBN) & ' + data[3][0] + ' & ' + data[3][1] + ' & ND & ' + recov_data[3] + '& ' + self.loq_dictionary[3] + '\\\\\n Cannabinol Acid & ' + data[4][0] + ' & ' + data[4][1] + ' & ND & ' + recov_data[4] + '& ' + self.loq_dictionary[4] + '\\\\\n \\hline\n \\hline\n \\textbf{Moisture} & 0.00 & & &\\\\\n \\hline\n \\hline\n \\end{tabular}\n \\end{table}\n '
return basic_potency_table_string
def generate_footer(self):
footer_string = '\n Methods: solvent extraction; measured by UPLC-UV, tandem MS, P.I. 1.14 \\& based on USP monograph 29 \\newline\n $\\si{S_{o}}$ = standard deviation at zero analyte concentration. MDL generally considered to be 3x $\\si{S_{o}}$ value. \\newline\\newline\n ND = none detected. N/A = not applicable. THC = tetrahydrocannabinol.\\newline \n \\textbf{*Total THC} = $\\Delta^{9}$-THC + (THCA x 0.877 ). \\textbf{**Total CBD} = CBD + (CBDA x 0.877).\\newline\\newline\n Material will be held for up to 3 weeks unless alternative arrangements have been made. Sample holding time may vary and is dependent on MBL license restrictions.\n \\newline\\newline\\newline\n R. Bilodeau \\phantom{aaaaaaaaaaaaaaaaaaaaaaaaaxaaaaaasasssssssssssss}H. Hartmann\\\\ Analytical Chemist: \\underline{\\hspace{3cm}}{ \\hspace{3.2cm} Sr. Analytical Chemist: \\underline{\\hspace{3cm}} \n \\fancyfoot[C]{\\textbf{MB Laboratories Ltd.}\\\\ \\textbf{Web:} www.mblabs.com}\n \\fancyfoot[R]{\\textbf{Mail:} PO Box 2103\\\\ Sidney, B.C., V8L 356}\n \\fancyfoot[L]{\\textbf{T:} 250 656 1334\\\\ \\textbf{E:} info@mblabs.com}\n \\end{document}\n '
return footer_string
def round_down_to_correct_decimal_point(self, data_value):
if 100 > data_value >= 1:
data_value = str(data_value)[0:4]
elif 1 > data_value > 0:
data_value = str(data_value)[0:5]
elif data_value >= 100:
data_value = str(data_value)[0:3]
else:
data_value = 'ND'
return data_value
def create_total_line(self, total_line_type, report_type, cannabinoid, data):
if total_line_type == 'unit':
if cannabinoid == 'THC':
if report_type == 'basic':
delta9 = data[5][0]
acid = data[6][0]
delta9_unit = data[5][1]
acid_unit = data[6][1]
else:
delta9 = data[11][0]
acid = data[16][0]
delta9_unit = data[11][1]
acid_unit = data[16][1]
if delta9 == 'ND':
delta9 = 0
if acid == 'ND':
acid = 0
if delta9_unit == 'ND':
delta9_unit = 0
if acid_unit == 'ND':
acid_unit = 0
total1 = float(delta9) + float(acid) * 0.877
total2 = float(delta9_unit) + float(acid_unit) * 0.877
if 100 > total1 >= 1:
total1 = str(total1)[0:4]
elif 1 > total1 > 0:
total1 = str(total1)[0:5]
elif total1 >= 100:
total1 = str(total1)[0:3]
else:
total1 = 'ND'
if 100 > total2 >= 1:
total2 = str(total2)[0:4]
elif 1 > total2 > 0:
total2 = str(total2)[0:5]
elif total2 >= 100:
total2 = str(total2)[0:3]
else:
total2 = 'ND'
return [total1, total2]
else:
if report_type == 'basic':
cbd = data[1][0]
acid = data[2][0]
cbd_unit = data[1][1]
acid_unit = data[2][1]
else:
cbd = data[5][0]
acid = data[7][0]
cbd_unit = data[5][1]
acid_unit = data[7][1]
if cbd == 'ND':
cbd = 0
if acid == 'ND':
acid = 0
if cbd_unit == 'ND':
cbd_unit = 0
if acid_unit == 'ND':
acid_unit = 0
total1 = float(cbd) + float(acid) * 0.877
total2 = float(cbd_unit) + float(acid_unit) * 0.877
if 100 > total1 >= 1:
total1 = str(total1)[0:4]
elif 1 > total1 > 0:
total1 = str(total1)[0:5]
elif total1 >= 100:
total1 = str(total1)[0:3]
else:
total1 = 'ND'
if 100 > total2 >= 1:
total2 = str(total2)[0:4]
elif 1 > total2 > 0:
total2 = str(total2)[0:5]
elif total2 >= 100:
total2 = str(total2)[0:3]
else:
total2 = 'ND'
return [total1, total2]
elif total_line_type == 'regular':
if cannabinoid == 'THC':
if report_type == 'basic':
delta9 = data[5]
acid = data[6]
else:
delta9 = data[11]
acid = data[16]
if delta9 == 'ND':
delta9 = 0
if acid == 'ND':
acid = 0
total = float(delta9) + float(acid) * 0.877
if 100 > total >= 1:
total = str(total)[0:4]
elif 1 > total > 0:
total = str(total)[0:5]
elif total >= 100:
total = str(total)[0:3]
else:
total = 'ND'
return total
else:
if report_type == 'basic':
cbd = data[1]
acid = data[2]
else:
cbd = data[5]
acid = data[7]
if cbd == 'ND':
cbd = 0
if acid == 'ND':
acid = 0
total = float(cbd) + float(acid) * 0.877
if 100 > total >= 1:
total = str(total)[0:4]
elif 1 > total > 0:
total = str(total)[0:5]
elif total >= 100:
total = str(total)[0:3]
else:
total = 'ND'
return total |
#!/usr/local/bin/python3
class Object(object):
def __init__(self, id):
self.id = id
self.children = []
root = Object("COM")
objects = {"COM": root}
def get_object(id):
if id not in objects:
objects[id] = Object(id)
return objects[id]
with open("input.txt") as f:
for line in f.readlines():
parent, child = map(get_object, line.strip().split(")"))
parent.children.append(child)
checksum = 0
def traverse(node, distance):
global checksum
checksum += distance
for child in node.children:
traverse(child, distance + 1)
traverse(root, 0)
print("checksum: %d" % checksum)
| class Object(object):
def __init__(self, id):
self.id = id
self.children = []
root = object('COM')
objects = {'COM': root}
def get_object(id):
if id not in objects:
objects[id] = object(id)
return objects[id]
with open('input.txt') as f:
for line in f.readlines():
(parent, child) = map(get_object, line.strip().split(')'))
parent.children.append(child)
checksum = 0
def traverse(node, distance):
global checksum
checksum += distance
for child in node.children:
traverse(child, distance + 1)
traverse(root, 0)
print('checksum: %d' % checksum) |
begin_unit
comment|'# Copyright (c) 2013 Intel, Inc.'
nl|'\n'
comment|'# Copyright (c) 2012 OpenStack Foundation'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
nl|'\n'
name|'import'
name|'glob'
newline|'\n'
name|'import'
name|'os'
newline|'\n'
name|'import'
name|'re'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_log'
name|'import'
name|'log'
name|'as'
name|'logging'
newline|'\n'
name|'import'
name|'six'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'exception'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_LW'
newline|'\n'
nl|'\n'
DECL|variable|LOG
name|'LOG'
op|'='
name|'logging'
op|'.'
name|'getLogger'
op|'('
name|'__name__'
op|')'
newline|'\n'
nl|'\n'
DECL|variable|PCI_VENDOR_PATTERN
name|'PCI_VENDOR_PATTERN'
op|'='
string|'"^(hex{4})$"'
op|'.'
name|'replace'
op|'('
string|'"hex"'
op|','
string|'"[\\da-fA-F]"'
op|')'
newline|'\n'
DECL|variable|_PCI_ADDRESS_PATTERN
name|'_PCI_ADDRESS_PATTERN'
op|'='
op|'('
string|'"^(hex{4}):(hex{2}):(hex{2}).(oct{1})$"'
op|'.'
nl|'\n'
name|'replace'
op|'('
string|'"hex"'
op|','
string|'"[\\da-fA-F]"'
op|')'
op|'.'
nl|'\n'
name|'replace'
op|'('
string|'"oct"'
op|','
string|'"[0-7]"'
op|')'
op|')'
newline|'\n'
DECL|variable|_PCI_ADDRESS_REGEX
name|'_PCI_ADDRESS_REGEX'
op|'='
name|'re'
op|'.'
name|'compile'
op|'('
name|'_PCI_ADDRESS_PATTERN'
op|')'
newline|'\n'
nl|'\n'
DECL|variable|_SRIOV_TOTALVFS
name|'_SRIOV_TOTALVFS'
op|'='
string|'"sriov_totalvfs"'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|pci_device_prop_match
name|'def'
name|'pci_device_prop_match'
op|'('
name|'pci_dev'
op|','
name|'specs'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Check if the pci_dev meet spec requirement\n\n Specs is a list of PCI device property requirements.\n An example of device requirement that the PCI should be either:\n a) Device with vendor_id as 0x8086 and product_id as 0x8259, or\n b) Device with vendor_id as 0x10de and product_id as 0x10d8:\n\n [{"vendor_id":"8086", "product_id":"8259"},\n {"vendor_id":"10de", "product_id":"10d8"}]\n\n """'
newline|'\n'
DECL|function|_matching_devices
name|'def'
name|'_matching_devices'
op|'('
name|'spec'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'all'
op|'('
name|'pci_dev'
op|'.'
name|'get'
op|'('
name|'k'
op|')'
op|'=='
name|'v'
name|'for'
name|'k'
op|','
name|'v'
name|'in'
name|'six'
op|'.'
name|'iteritems'
op|'('
name|'spec'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'any'
op|'('
name|'_matching_devices'
op|'('
name|'spec'
op|')'
name|'for'
name|'spec'
name|'in'
name|'specs'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|parse_address
dedent|''
name|'def'
name|'parse_address'
op|'('
name|'address'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Returns (domain, bus, slot, function) from PCI address that is stored in\n PciDevice DB table.\n """'
newline|'\n'
name|'m'
op|'='
name|'_PCI_ADDRESS_REGEX'
op|'.'
name|'match'
op|'('
name|'address'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'m'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDeviceWrongAddressFormat'
op|'('
name|'address'
op|'='
name|'address'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'m'
op|'.'
name|'groups'
op|'('
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|get_pci_address_fields
dedent|''
name|'def'
name|'get_pci_address_fields'
op|'('
name|'pci_addr'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dbs'
op|','
name|'sep'
op|','
name|'func'
op|'='
name|'pci_addr'
op|'.'
name|'partition'
op|'('
string|"'.'"
op|')'
newline|'\n'
name|'domain'
op|','
name|'bus'
op|','
name|'slot'
op|'='
name|'dbs'
op|'.'
name|'split'
op|'('
string|"':'"
op|')'
newline|'\n'
name|'return'
op|'('
name|'domain'
op|','
name|'bus'
op|','
name|'slot'
op|','
name|'func'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|get_pci_address
dedent|''
name|'def'
name|'get_pci_address'
op|'('
name|'domain'
op|','
name|'bus'
op|','
name|'slot'
op|','
name|'func'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
string|"'%s:%s:%s.%s'"
op|'%'
op|'('
name|'domain'
op|','
name|'bus'
op|','
name|'slot'
op|','
name|'func'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|get_function_by_ifname
dedent|''
name|'def'
name|'get_function_by_ifname'
op|'('
name|'ifname'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Given the device name, returns the PCI address of a device\n and returns True if the address in a physical function.\n """'
newline|'\n'
name|'dev_path'
op|'='
string|'"/sys/class/net/%s/device"'
op|'%'
name|'ifname'
newline|'\n'
name|'sriov_totalvfs'
op|'='
number|'0'
newline|'\n'
name|'if'
name|'os'
op|'.'
name|'path'
op|'.'
name|'isdir'
op|'('
name|'dev_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
comment|'# sriov_totalvfs contains the maximum possible VFs for this PF'
nl|'\n'
indent|' '
name|'with'
name|'open'
op|'('
name|'dev_path'
op|'+'
name|'_SRIOV_TOTALVFS'
op|')'
name|'as'
name|'fd'
op|':'
newline|'\n'
indent|' '
name|'sriov_totalvfs'
op|'='
name|'int'
op|'('
name|'fd'
op|'.'
name|'read'
op|'('
op|')'
op|')'
newline|'\n'
name|'return'
op|'('
name|'os'
op|'.'
name|'readlink'
op|'('
name|'dev_path'
op|')'
op|'.'
name|'strip'
op|'('
string|'"./"'
op|')'
op|','
nl|'\n'
name|'sriov_totalvfs'
op|'>'
number|'0'
op|')'
newline|'\n'
dedent|''
dedent|''
name|'except'
op|'('
name|'IOError'
op|','
name|'ValueError'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'os'
op|'.'
name|'readlink'
op|'('
name|'dev_path'
op|')'
op|'.'
name|'strip'
op|'('
string|'"./"'
op|')'
op|','
name|'False'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'None'
op|','
name|'False'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|is_physical_function
dedent|''
name|'def'
name|'is_physical_function'
op|'('
name|'domain'
op|','
name|'bus'
op|','
name|'slot'
op|','
name|'function'
op|')'
op|':'
newline|'\n'
indent|' '
name|'dev_path'
op|'='
string|'"/sys/bus/pci/devices/%(d)s:%(b)s:%(s)s.%(f)s/"'
op|'%'
op|'{'
nl|'\n'
string|'"d"'
op|':'
name|'domain'
op|','
string|'"b"'
op|':'
name|'bus'
op|','
string|'"s"'
op|':'
name|'slot'
op|','
string|'"f"'
op|':'
name|'function'
op|'}'
newline|'\n'
name|'if'
name|'os'
op|'.'
name|'path'
op|'.'
name|'isdir'
op|'('
name|'dev_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'sriov_totalvfs'
op|'='
number|'0'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'open'
op|'('
name|'dev_path'
op|'+'
name|'_SRIOV_TOTALVFS'
op|')'
name|'as'
name|'fd'
op|':'
newline|'\n'
indent|' '
name|'sriov_totalvfs'
op|'='
name|'int'
op|'('
name|'fd'
op|'.'
name|'read'
op|'('
op|')'
op|')'
newline|'\n'
name|'return'
name|'sriov_totalvfs'
op|'>'
number|'0'
newline|'\n'
dedent|''
dedent|''
name|'except'
op|'('
name|'IOError'
op|','
name|'ValueError'
op|')'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
dedent|''
name|'return'
name|'False'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_get_sysfs_netdev_path
dedent|''
name|'def'
name|'_get_sysfs_netdev_path'
op|'('
name|'pci_addr'
op|','
name|'pf_interface'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Get the sysfs path based on the PCI address of the device.\n\n Assumes a networking device - will not check for the existence of the path.\n """'
newline|'\n'
name|'if'
name|'pf_interface'
op|':'
newline|'\n'
indent|' '
name|'return'
string|'"/sys/bus/pci/devices/%s/physfn/net"'
op|'%'
op|'('
name|'pci_addr'
op|')'
newline|'\n'
dedent|''
name|'return'
string|'"/sys/bus/pci/devices/%s/net"'
op|'%'
op|'('
name|'pci_addr'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|get_ifname_by_pci_address
dedent|''
name|'def'
name|'get_ifname_by_pci_address'
op|'('
name|'pci_addr'
op|','
name|'pf_interface'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Get the interface name based on a VF\'s pci address\n\n The returned interface name is either the parent PF\'s or that of the VF\n itself based on the argument of pf_interface.\n """'
newline|'\n'
name|'dev_path'
op|'='
name|'_get_sysfs_netdev_path'
op|'('
name|'pci_addr'
op|','
name|'pf_interface'
op|')'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'dev_info'
op|'='
name|'os'
op|'.'
name|'listdir'
op|'('
name|'dev_path'
op|')'
newline|'\n'
name|'return'
name|'dev_info'
op|'.'
name|'pop'
op|'('
op|')'
newline|'\n'
dedent|''
name|'except'
name|'Exception'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDeviceNotFoundById'
op|'('
name|'id'
op|'='
name|'pci_addr'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|get_mac_by_pci_address
dedent|''
dedent|''
name|'def'
name|'get_mac_by_pci_address'
op|'('
name|'pci_addr'
op|','
name|'pf_interface'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Get the MAC address of the nic based on it\'s PCI address\n\n Raises PciDeviceNotFoundById in case the pci device is not a NIC\n """'
newline|'\n'
name|'dev_path'
op|'='
name|'_get_sysfs_netdev_path'
op|'('
name|'pci_addr'
op|','
name|'pf_interface'
op|')'
newline|'\n'
name|'if_name'
op|'='
name|'get_ifname_by_pci_address'
op|'('
name|'pci_addr'
op|','
name|'pf_interface'
op|')'
newline|'\n'
name|'addr_file'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'dev_path'
op|','
name|'if_name'
op|','
string|"'address'"
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'with'
name|'open'
op|'('
name|'addr_file'
op|')'
name|'as'
name|'f'
op|':'
newline|'\n'
indent|' '
name|'mac'
op|'='
name|'next'
op|'('
name|'f'
op|')'
op|'.'
name|'strip'
op|'('
op|')'
newline|'\n'
name|'return'
name|'mac'
newline|'\n'
dedent|''
dedent|''
name|'except'
op|'('
name|'IOError'
op|','
name|'StopIteration'
op|')'
name|'as'
name|'e'
op|':'
newline|'\n'
indent|' '
name|'LOG'
op|'.'
name|'warning'
op|'('
name|'_LW'
op|'('
string|'"Could not find the expected sysfs file for "'
nl|'\n'
string|'"determining the MAC address of the PCI device "'
nl|'\n'
string|'"%(addr)s. May not be a NIC. Error: %(e)s"'
op|')'
op|','
nl|'\n'
op|'{'
string|"'addr'"
op|':'
name|'pci_addr'
op|','
string|"'e'"
op|':'
name|'e'
op|'}'
op|')'
newline|'\n'
name|'raise'
name|'exception'
op|'.'
name|'PciDeviceNotFoundById'
op|'('
name|'id'
op|'='
name|'pci_addr'
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|get_vf_num_by_pci_address
dedent|''
dedent|''
name|'def'
name|'get_vf_num_by_pci_address'
op|'('
name|'pci_addr'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Get the VF number based on a VF\'s pci address\n\n A VF is associated with an VF number, which ip link command uses to\n configure it. This number can be obtained from the PCI device filesystem.\n """'
newline|'\n'
name|'VIRTFN_RE'
op|'='
name|'re'
op|'.'
name|'compile'
op|'('
string|'"virtfn(\\d+)"'
op|')'
newline|'\n'
name|'virtfns_path'
op|'='
string|'"/sys/bus/pci/devices/%s/physfn/virtfn*"'
op|'%'
op|'('
name|'pci_addr'
op|')'
newline|'\n'
name|'vf_num'
op|'='
name|'None'
newline|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'for'
name|'vf_path'
name|'in'
name|'glob'
op|'.'
name|'iglob'
op|'('
name|'virtfns_path'
op|')'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'re'
op|'.'
name|'search'
op|'('
name|'pci_addr'
op|','
name|'os'
op|'.'
name|'readlink'
op|'('
name|'vf_path'
op|')'
op|')'
op|':'
newline|'\n'
indent|' '
name|'t'
op|'='
name|'VIRTFN_RE'
op|'.'
name|'search'
op|'('
name|'vf_path'
op|')'
newline|'\n'
name|'vf_num'
op|'='
name|'t'
op|'.'
name|'group'
op|'('
number|'1'
op|')'
newline|'\n'
name|'break'
newline|'\n'
dedent|''
dedent|''
dedent|''
name|'except'
name|'Exception'
op|':'
newline|'\n'
indent|' '
name|'pass'
newline|'\n'
dedent|''
name|'if'
name|'vf_num'
name|'is'
name|'None'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'exception'
op|'.'
name|'PciDeviceNotFoundById'
op|'('
name|'id'
op|'='
name|'pci_addr'
op|')'
newline|'\n'
dedent|''
name|'return'
name|'vf_num'
newline|'\n'
dedent|''
endmarker|''
end_unit
| begin_unit
comment | '# Copyright (c) 2013 Intel, Inc.'
nl | '\n'
comment | '# Copyright (c) 2012 OpenStack Foundation'
nl | '\n'
comment | '# All Rights Reserved.'
nl | '\n'
comment | '#'
nl | '\n'
comment | '# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl | '\n'
comment | '# not use this file except in compliance with the License. You may obtain'
nl | '\n'
comment | '# a copy of the License at'
nl | '\n'
comment | '#'
nl | '\n'
comment | '# http://www.apache.org/licenses/LICENSE-2.0'
nl | '\n'
comment | '#'
nl | '\n'
comment | '# Unless required by applicable law or agreed to in writing, software'
nl | '\n'
comment | '# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl | '\n'
comment | '# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl | '\n'
comment | '# License for the specific language governing permissions and limitations'
nl | '\n'
comment | '# under the License.'
nl | '\n'
nl | '\n'
nl | '\n'
name | 'import'
name | 'glob'
newline | '\n'
name | 'import'
name | 'os'
newline | '\n'
name | 'import'
name | 're'
newline | '\n'
nl | '\n'
name | 'from'
name | 'oslo_log'
name | 'import'
name | 'log'
name | 'as'
name | 'logging'
newline | '\n'
name | 'import'
name | 'six'
newline | '\n'
nl | '\n'
name | 'from'
name | 'nova'
name | 'import'
name | 'exception'
newline | '\n'
name | 'from'
name | 'nova'
op | '.'
name | 'i18n'
name | 'import'
name | '_LW'
newline | '\n'
nl | '\n'
DECL | variable | LOG
name | 'LOG'
op | '='
name | 'logging'
op | '.'
name | 'getLogger'
op | '('
name | '__name__'
op | ')'
newline | '\n'
nl | '\n'
DECL | variable | PCI_VENDOR_PATTERN
name | 'PCI_VENDOR_PATTERN'
op | '='
string | '"^(hex{4})$"'
op | '.'
name | 'replace'
op | '('
string | '"hex"'
op | ','
string | '"[\\da-fA-F]"'
op | ')'
newline | '\n'
DECL | variable | _PCI_ADDRESS_PATTERN
name | '_PCI_ADDRESS_PATTERN'
op | '='
op | '('
string | '"^(hex{4}):(hex{2}):(hex{2}).(oct{1})$"'
op | '.'
nl | '\n'
name | 'replace'
op | '('
string | '"hex"'
op | ','
string | '"[\\da-fA-F]"'
op | ')'
op | '.'
nl | '\n'
name | 'replace'
op | '('
string | '"oct"'
op | ','
string | '"[0-7]"'
op | ')'
op | ')'
newline | '\n'
DECL | variable | _PCI_ADDRESS_REGEX
name | '_PCI_ADDRESS_REGEX'
op | '='
name | 're'
op | '.'
name | 'compile'
op | '('
name | '_PCI_ADDRESS_PATTERN'
op | ')'
newline | '\n'
nl | '\n'
DECL | variable | _SRIOV_TOTALVFS
name | '_SRIOV_TOTALVFS'
op | '='
string | '"sriov_totalvfs"'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | pci_device_prop_match
name | 'def'
name | 'pci_device_prop_match'
op | '('
name | 'pci_dev'
op | ','
name | 'specs'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Check if the pci_dev meet spec requirement\n\n Specs is a list of PCI device property requirements.\n An example of device requirement that the PCI should be either:\n a) Device with vendor_id as 0x8086 and product_id as 0x8259, or\n b) Device with vendor_id as 0x10de and product_id as 0x10d8:\n\n [{"vendor_id":"8086", "product_id":"8259"},\n {"vendor_id":"10de", "product_id":"10d8"}]\n\n """'
newline | '\n'
DECL | function | _matching_devices
name | 'def'
name | '_matching_devices'
op | '('
name | 'spec'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'return'
name | 'all'
op | '('
name | 'pci_dev'
op | '.'
name | 'get'
op | '('
name | 'k'
op | ')'
op | '=='
name | 'v'
name | 'for'
name | 'k'
op | ','
name | 'v'
name | 'in'
name | 'six'
op | '.'
name | 'iteritems'
op | '('
name | 'spec'
op | ')'
op | ')'
newline | '\n'
nl | '\n'
dedent | ''
name | 'return'
name | 'any'
op | '('
name | '_matching_devices'
op | '('
name | 'spec'
op | ')'
name | 'for'
name | 'spec'
name | 'in'
name | 'specs'
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | parse_address
dedent | ''
name | 'def'
name | 'parse_address'
op | '('
name | 'address'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Returns (domain, bus, slot, function) from PCI address that is stored in\n PciDevice DB table.\n """'
newline | '\n'
name | 'm'
op | '='
name | '_PCI_ADDRESS_REGEX'
op | '.'
name | 'match'
op | '('
name | 'address'
op | ')'
newline | '\n'
name | 'if'
name | 'not'
name | 'm'
op | ':'
newline | '\n'
indent | ' '
name | 'raise'
name | 'exception'
op | '.'
name | 'PciDeviceWrongAddressFormat'
op | '('
name | 'address'
op | '='
name | 'address'
op | ')'
newline | '\n'
dedent | ''
name | 'return'
name | 'm'
op | '.'
name | 'groups'
op | '('
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | get_pci_address_fields
dedent | ''
name | 'def'
name | 'get_pci_address_fields'
op | '('
name | 'pci_addr'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'dbs'
op | ','
name | 'sep'
op | ','
name | 'func'
op | '='
name | 'pci_addr'
op | '.'
name | 'partition'
op | '('
string | "'.'"
op | ')'
newline | '\n'
name | 'domain'
op | ','
name | 'bus'
op | ','
name | 'slot'
op | '='
name | 'dbs'
op | '.'
name | 'split'
op | '('
string | "':'"
op | ')'
newline | '\n'
name | 'return'
op | '('
name | 'domain'
op | ','
name | 'bus'
op | ','
name | 'slot'
op | ','
name | 'func'
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | get_pci_address
dedent | ''
name | 'def'
name | 'get_pci_address'
op | '('
name | 'domain'
op | ','
name | 'bus'
op | ','
name | 'slot'
op | ','
name | 'func'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'return'
string | "'%s:%s:%s.%s'"
op | '%'
op | '('
name | 'domain'
op | ','
name | 'bus'
op | ','
name | 'slot'
op | ','
name | 'func'
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | get_function_by_ifname
dedent | ''
name | 'def'
name | 'get_function_by_ifname'
op | '('
name | 'ifname'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Given the device name, returns the PCI address of a device\n and returns True if the address in a physical function.\n """'
newline | '\n'
name | 'dev_path'
op | '='
string | '"/sys/class/net/%s/device"'
op | '%'
name | 'ifname'
newline | '\n'
name | 'sriov_totalvfs'
op | '='
number | '0'
newline | '\n'
name | 'if'
name | 'os'
op | '.'
name | 'path'
op | '.'
name | 'isdir'
op | '('
name | 'dev_path'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'try'
op | ':'
newline | '\n'
comment | '# sriov_totalvfs contains the maximum possible VFs for this PF'
nl | '\n'
indent | ' '
name | 'with'
name | 'open'
op | '('
name | 'dev_path'
op | '+'
name | '_SRIOV_TOTALVFS'
op | ')'
name | 'as'
name | 'fd'
op | ':'
newline | '\n'
indent | ' '
name | 'sriov_totalvfs'
op | '='
name | 'int'
op | '('
name | 'fd'
op | '.'
name | 'read'
op | '('
op | ')'
op | ')'
newline | '\n'
name | 'return'
op | '('
name | 'os'
op | '.'
name | 'readlink'
op | '('
name | 'dev_path'
op | ')'
op | '.'
name | 'strip'
op | '('
string | '"./"'
op | ')'
op | ','
nl | '\n'
name | 'sriov_totalvfs'
op | '>'
number | '0'
op | ')'
newline | '\n'
dedent | ''
dedent | ''
name | 'except'
op | '('
name | 'IOError'
op | ','
name | 'ValueError'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'return'
name | 'os'
op | '.'
name | 'readlink'
op | '('
name | 'dev_path'
op | ')'
op | '.'
name | 'strip'
op | '('
string | '"./"'
op | ')'
op | ','
name | 'False'
newline | '\n'
dedent | ''
dedent | ''
name | 'return'
name | 'None'
op | ','
name | 'False'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | is_physical_function
dedent | ''
name | 'def'
name | 'is_physical_function'
op | '('
name | 'domain'
op | ','
name | 'bus'
op | ','
name | 'slot'
op | ','
name | 'function'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'dev_path'
op | '='
string | '"/sys/bus/pci/devices/%(d)s:%(b)s:%(s)s.%(f)s/"'
op | '%'
op | '{'
nl | '\n'
string | '"d"'
op | ':'
name | 'domain'
op | ','
string | '"b"'
op | ':'
name | 'bus'
op | ','
string | '"s"'
op | ':'
name | 'slot'
op | ','
string | '"f"'
op | ':'
name | 'function'
op | '}'
newline | '\n'
name | 'if'
name | 'os'
op | '.'
name | 'path'
op | '.'
name | 'isdir'
op | '('
name | 'dev_path'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'sriov_totalvfs'
op | '='
number | '0'
newline | '\n'
name | 'try'
op | ':'
newline | '\n'
indent | ' '
name | 'with'
name | 'open'
op | '('
name | 'dev_path'
op | '+'
name | '_SRIOV_TOTALVFS'
op | ')'
name | 'as'
name | 'fd'
op | ':'
newline | '\n'
indent | ' '
name | 'sriov_totalvfs'
op | '='
name | 'int'
op | '('
name | 'fd'
op | '.'
name | 'read'
op | '('
op | ')'
op | ')'
newline | '\n'
name | 'return'
name | 'sriov_totalvfs'
op | '>'
number | '0'
newline | '\n'
dedent | ''
dedent | ''
name | 'except'
op | '('
name | 'IOError'
op | ','
name | 'ValueError'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'pass'
newline | '\n'
dedent | ''
dedent | ''
name | 'return'
name | 'False'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | _get_sysfs_netdev_path
dedent | ''
name | 'def'
name | '_get_sysfs_netdev_path'
op | '('
name | 'pci_addr'
op | ','
name | 'pf_interface'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Get the sysfs path based on the PCI address of the device.\n\n Assumes a networking device - will not check for the existence of the path.\n """'
newline | '\n'
name | 'if'
name | 'pf_interface'
op | ':'
newline | '\n'
indent | ' '
name | 'return'
string | '"/sys/bus/pci/devices/%s/physfn/net"'
op | '%'
op | '('
name | 'pci_addr'
op | ')'
newline | '\n'
dedent | ''
name | 'return'
string | '"/sys/bus/pci/devices/%s/net"'
op | '%'
op | '('
name | 'pci_addr'
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | get_ifname_by_pci_address
dedent | ''
name | 'def'
name | 'get_ifname_by_pci_address'
op | '('
name | 'pci_addr'
op | ','
name | 'pf_interface'
op | '='
name | 'False'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Get the interface name based on a VF\'s pci address\n\n The returned interface name is either the parent PF\'s or that of the VF\n itself based on the argument of pf_interface.\n """'
newline | '\n'
name | 'dev_path'
op | '='
name | '_get_sysfs_netdev_path'
op | '('
name | 'pci_addr'
op | ','
name | 'pf_interface'
op | ')'
newline | '\n'
name | 'try'
op | ':'
newline | '\n'
indent | ' '
name | 'dev_info'
op | '='
name | 'os'
op | '.'
name | 'listdir'
op | '('
name | 'dev_path'
op | ')'
newline | '\n'
name | 'return'
name | 'dev_info'
op | '.'
name | 'pop'
op | '('
op | ')'
newline | '\n'
dedent | ''
name | 'except'
name | 'Exception'
op | ':'
newline | '\n'
indent | ' '
name | 'raise'
name | 'exception'
op | '.'
name | 'PciDeviceNotFoundById'
op | '('
name | 'id'
op | '='
name | 'pci_addr'
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | get_mac_by_pci_address
dedent | ''
dedent | ''
name | 'def'
name | 'get_mac_by_pci_address'
op | '('
name | 'pci_addr'
op | ','
name | 'pf_interface'
op | '='
name | 'False'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Get the MAC address of the nic based on it\'s PCI address\n\n Raises PciDeviceNotFoundById in case the pci device is not a NIC\n """'
newline | '\n'
name | 'dev_path'
op | '='
name | '_get_sysfs_netdev_path'
op | '('
name | 'pci_addr'
op | ','
name | 'pf_interface'
op | ')'
newline | '\n'
name | 'if_name'
op | '='
name | 'get_ifname_by_pci_address'
op | '('
name | 'pci_addr'
op | ','
name | 'pf_interface'
op | ')'
newline | '\n'
name | 'addr_file'
op | '='
name | 'os'
op | '.'
name | 'path'
op | '.'
name | 'join'
op | '('
name | 'dev_path'
op | ','
name | 'if_name'
op | ','
string | "'address'"
op | ')'
newline | '\n'
nl | '\n'
name | 'try'
op | ':'
newline | '\n'
indent | ' '
name | 'with'
name | 'open'
op | '('
name | 'addr_file'
op | ')'
name | 'as'
name | 'f'
op | ':'
newline | '\n'
indent | ' '
name | 'mac'
op | '='
name | 'next'
op | '('
name | 'f'
op | ')'
op | '.'
name | 'strip'
op | '('
op | ')'
newline | '\n'
name | 'return'
name | 'mac'
newline | '\n'
dedent | ''
dedent | ''
name | 'except'
op | '('
name | 'IOError'
op | ','
name | 'StopIteration'
op | ')'
name | 'as'
name | 'e'
op | ':'
newline | '\n'
indent | ' '
name | 'LOG'
op | '.'
name | 'warning'
op | '('
name | '_LW'
op | '('
string | '"Could not find the expected sysfs file for "'
nl | '\n'
string | '"determining the MAC address of the PCI device "'
nl | '\n'
string | '"%(addr)s. May not be a NIC. Error: %(e)s"'
op | ')'
op | ','
nl | '\n'
op | '{'
string | "'addr'"
op | ':'
name | 'pci_addr'
op | ','
string | "'e'"
op | ':'
name | 'e'
op | '}'
op | ')'
newline | '\n'
name | 'raise'
name | 'exception'
op | '.'
name | 'PciDeviceNotFoundById'
op | '('
name | 'id'
op | '='
name | 'pci_addr'
op | ')'
newline | '\n'
nl | '\n'
nl | '\n'
DECL | function | get_vf_num_by_pci_address
dedent | ''
dedent | ''
name | 'def'
name | 'get_vf_num_by_pci_address'
op | '('
name | 'pci_addr'
op | ')'
op | ':'
newline | '\n'
indent | ' '
string | '"""Get the VF number based on a VF\'s pci address\n\n A VF is associated with an VF number, which ip link command uses to\n configure it. This number can be obtained from the PCI device filesystem.\n """'
newline | '\n'
name | 'VIRTFN_RE'
op | '='
name | 're'
op | '.'
name | 'compile'
op | '('
string | '"virtfn(\\d+)"'
op | ')'
newline | '\n'
name | 'virtfns_path'
op | '='
string | '"/sys/bus/pci/devices/%s/physfn/virtfn*"'
op | '%'
op | '('
name | 'pci_addr'
op | ')'
newline | '\n'
name | 'vf_num'
op | '='
name | 'None'
newline | '\n'
name | 'try'
op | ':'
newline | '\n'
indent | ' '
name | 'for'
name | 'vf_path'
name | 'in'
name | 'glob'
op | '.'
name | 'iglob'
op | '('
name | 'virtfns_path'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 'if'
name | 're'
op | '.'
name | 'search'
op | '('
name | 'pci_addr'
op | ','
name | 'os'
op | '.'
name | 'readlink'
op | '('
name | 'vf_path'
op | ')'
op | ')'
op | ':'
newline | '\n'
indent | ' '
name | 't'
op | '='
name | 'VIRTFN_RE'
op | '.'
name | 'search'
op | '('
name | 'vf_path'
op | ')'
newline | '\n'
name | 'vf_num'
op | '='
name | 't'
op | '.'
name | 'group'
op | '('
number | '1'
op | ')'
newline | '\n'
name | 'break'
newline | '\n'
dedent | ''
dedent | ''
dedent | ''
name | 'except'
name | 'Exception'
op | ':'
newline | '\n'
indent | ' '
name | 'pass'
newline | '\n'
dedent | ''
name | 'if'
name | 'vf_num'
name | 'is'
name | 'None'
op | ':'
newline | '\n'
indent | ' '
name | 'raise'
name | 'exception'
op | '.'
name | 'PciDeviceNotFoundById'
op | '('
name | 'id'
op | '='
name | 'pci_addr'
op | ')'
newline | '\n'
dedent | ''
name | 'return'
name | 'vf_num'
newline | '\n'
dedent | ''
endmarker | ''
end_unit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.