content stringlengths 7 1.05M |
|---|
def create(size, memory_buffer, temporary_directory, destination_directory, threads, buckets, bitfield,
chia_location='chia', temporary2_directory=None, farmer_public_key=None, pool_public_key=None,
exclude_final_directory=False):
flags = dict(
k=size,
b=memory_buffer,
t=temporary_directory,
d=destination_directory,
r=threads,
u=buckets,
)
if temporary2_directory is not None:
flags['2'] = temporary2_directory
if farmer_public_key is not None:
flags['f'] = farmer_public_key
if pool_public_key is not None:
flags['p'] = pool_public_key
if bitfield is False:
flags['e'] = ''
if exclude_final_directory:
flags['x'] = ''
data = [chia_location, 'plots', 'create']
for key, value in flags.items():
flag = f'-{key}'
data.append(flag)
if value == '':
continue
data.append(str(value))
return data
|
'''
3. Design an ATM
Interviewers would want to see you discuss things like:
Overdrawing: What would you do when the ATM doesn’t have any cash left?
Pin Verification: What if a user enters a wrong PIN multiple times?
Card Reading: How would you detect if the card has been correctly inserted or not?
'''
|
#! /usr/bin/env python
"""
Warnings informing that something went differently that intended. Does not
include messages before exiting with code 1
"""
CONFIG_OPEN_FAIL = """
Failed to find or open {} config file. Using default.
"""
NO_TAX_FILE_FOUND = """
WARNING!!! No proper tax.summary file found. The analysis will be incomplete.
"""
JUNK_GROUPS_DETECTED = """
{} can distort the analysis due to size too small
"""
JUNK_REMOVED = """
{} will be removed
"""
|
class StreamerDoesNotExistException(Exception):
pass
class StreamerIsOfflineException(Exception):
pass
class WrongCookiesException(Exception):
pass
|
def solution(A):
"""
Codility -
https://app.codility.com/demo/results/trainingKC23TS-77G/
100%
Idea is to maintain the start and end point marker for each disc/circle
Sort by start position of disc/circle
On each start position count the active circle/disc
On each end position reduce the active circle/disc
Count the total intersection using the active circle in each iteration
Reference -
https://www.youtube.com/watch?v=HV8tzIiidSw
http://www.lucainvernizzi.net/blog/2014/11/21/codility-beta-challenge-number-of-disc-intersections/
:param A:
:return: total intersections
"""
# contains start and end points of circle
circle_points = []
for i, a in enumerate(A):
# store start points and end points of circle
# as per problem statement -> The J-th disc is drawn with its center at (J, 0) and radius A[J].
# example 0-1, 0+1 second 1-5, 1+5
circle_points += [(i - a, True), (i + a, False)]
print("Original disc positions " + str(circle_points))
# Sort the array of disc, making sure that the start of a disk in a particular point P comes before the end of any disk at P.
circle_points.sort(key=lambda x: (x[0], not x[1]))
print("Sorted disc positions " + str(circle_points))
intersections, active_circles = 0, 0
# We now walk this array, keeping track of how thick the set of disk is at each new disc (below, the variable is called active_circles).
# Furthermore, we increase the number of intersection by active_circles if a new disk starts.
for _, is_beginning in circle_points:
# active circle found ie. starting disc found
if is_beginning:
# counting intersections by active circles ie. each active circle must be intersecting the comming new circle,
# we already know they are in sorted order by start position
intersections += active_circles
active_circles += 1
print(
"This is start disc -> intersections " + str(intersections) + " active_circles " + str(active_circles))
# ending circle position found, now active circle should be reduced by one
else:
print("Closing disc found.....")
print("Reduce active circle " + str(active_circles))
print()
active_circles -= 1
# ** is 10 to the power of 7
if intersections > 10 ** 7:
return -1
return intersections
result = solution([1, 5, 2, 1, 4, 0])
print("")
print("Solution " + str(result))
"""
solution_(A - steps run---
Original disc positions [(-1, True), (1, False), (-4, True), (6, False), (0, True), (4, False), (2, True), (4, False), (0, True), (8, False), (5, True), (5, False)]
Sorted disc positions [(-4, True), (-1, True), (0, True), (0, True), (1, False), (2, True), (4, False), (4, False), (5, True), (5, False), (6, False), (8, False)]
This is start disc -> intersections 0 active_circles 1
This is start disc -> intersections 1 active_circles 2
This is start disc -> intersections 3 active_circles 3
This is start disc -> intersections 6 active_circles 4
Closing disc found.....
Reduce active circle 4
This is start disc -> intersections 9 active_circles 4
Closing disc found.....
Reduce active circle 4
Closing disc found.....
Reduce active circle 3
This is start disc -> intersections 11 active_circles 3
Closing disc found.....
Reduce active circle 3
Closing disc found.....
Reduce active circle 2
Closing disc found.....
Reduce active circle 1
Solution 11
"""
|
class CollectionParams:
def __init__(self, dataset_count_map, system_count_map, dataset_collection_count, dataset_collection_count_map,
system_collection_count, system_collection_count_map, collection_count):
self.dataset_count_map = dataset_count_map
self.system_count_map = system_count_map
self.dataset_collection_count = dataset_collection_count
self.dataset_collection_count_map = dataset_collection_count_map
self.system_collection_count = system_collection_count
self.system_collection_count_map = system_collection_count_map
self.collection_count = collection_count
|
class Links(object) :
def __init__(self, links=None) :
"""initialize with a set of links default to none"""
self._links = []
if None != links:
self.add(links)
def add(self, links) :
"""links contains a map or array of maps in link-format"""
if isinstance(links, list) :
self._links.extend(links)
else :
self._links.append(links)
def get(self, selectMap=None) :
if selectMap == None or len(selectMap) == 0:
return(self._links)
else:
self._result = []
for index in self.select(selectMap) :
self._result.append(self._links[index])
return self._result
def selectMerge(self, selectMap, mergeMap) :
"""patch contains a selection map and merge map in JSON merge-patch format"""
self._linkList = self.select(selectMap)
if len(self._linkList) == 0:
self.add(mergeMap)
else:
# reverse the list so multiple deletions don't change the index of earlier elements
self._linkList.reverse()
for self._index in self._linkList :
if mergeMap == {} :
del self._links[self._index]
else:
for attribute in mergeMap:
if mergeMap[attribute] == None:
del self._links[self._index][attribute]
elif attribute not in self._links[self._index]:
self._links[self._index][attribute] = mergeMap[attribute]
else:
# merge attributes between lists and string values
if isinstance(self._links[self._index][attribute], list):
if isinstance(mergeMap[attribute], list) :
# adding a list to a list, factor out duplicates
self._links[self._index][attribute].extend( \
[ attr for attr in mergeMap[attribute] \
if attr not in self._links[self._index][attribute] ] )
elif mergeMap[attribute] not in self._links[self._index][attribute] :
self._links[self._index][attribute].append(mergeMap[attribute])
else:
if isinstance(mergeMap[attribute], list) :
# adding a list to a string results in a list
if self._links[self._index][attribute] in mergeMap[attribute] :
self._links[self._index][attribute] = mergeMap[attribute]
else:
self._links[self._index][attribute] = \
[self._links[self._index][attribute]].extend(mergeMap[attribute])
else:
# adding a string to a string results in a list
if mergeMap[attribute] != self._links[self._index][attribute]:
self._links[self._index][attribute] = \
[self._links[self._index][attribute], mergeMap[attribute]]
def select(self, selectMap) :
"""selectMap contains a selection map in query filter format"""
"""returns a list of indices to getLinks in the link array that match the query filter"""
self._selection = []
"""check all getLinks in the collection"""
for self._linkIndex in range(0, len(self._links)) :
self._selected = True
"""test each attribute in the selectMap"""
for attribute in selectMap :
if attribute not in self._links[self._linkIndex] :
self._selected = False
break
if isinstance(selectMap[attribute], list) :
"""multi value attributes in selectMap, all values must be present to select """
for self._attr_val in selectMap[attribute]:
if isinstance(self._links[self._linkIndex][attribute], list) :
"""multi value attribute in link"""
if self._attr_val not in self._links[self._linkIndex][attribute] :
self._selected = False
break
elif self._attr_val != self._links[self._linkIndex][attribute] :
"""single value attribute in link"""
self._selected = False
break
elif isinstance(self._links[self._linkIndex][attribute], list):
"""single value attribute in selectMap, multi value in link"""
if selectMap[attribute] not in self._links[self._linkIndex][attribute] :
self._selected = False
break
elif selectMap[attribute] != self._links[self._linkIndex][attribute] :
"""single value in both selectMap and link"""
self._selected = False
break
if self._selected :
self._selection.append(self._linkIndex)
return self._selection
|
#https://atcoder.jp/contests/abc072/tasks/arc082_b
N = int(input())
p = list(map(int,input().split()))
now = count = 0
while(now<N):
if p[now]==now+1:
if now+1 != N:
tmp = p[now]
p[now] = p[now+1]
p[now+1] = tmp
now -= 1
count += 1
continue
else:
tmp = p[now]
p[now] = p[now-1]
p[now-1] = tmp
now -= 1
count += 1
continue
now += 1
print(count)
|
"""
In this chapter, we used the dictionary value
{'1h': 'bking', '6c': 'wqueen', '2g': 'bbishop', '5h': 'bqueen', '3e': 'wking'} to represent a chess board.
Write a function named isValidChessBoard() that takes a dictionary argument
and returns True or False depending on if the board is valid.
A valid board will have exactly one black king and exactly one white king.
Each player can only have at most 16 pieces, at most 8 pawns,
and all pieces must be on a valid space from '1a' to '8h';
that is, a piece can’t be on space '9z'.
The piece names begin with either a 'w' or 'b' to represent white or black,
followed by 'pawn', 'knight', 'bishop', 'rook', 'queen', or 'king'.
This function should detect when a bug has resulted in an improper chess board.
"""
def is_valid_chess_board(board):
valid_spaces = ["1a", "1b", "1c", "1d", "1e", "1f", "1g", "1h",
"2a", "2b", "2c", "2d", "2e", "2f", "2g", "2h",
"3a", "3b", "3c", "3d", "3e", "3f", "3g", "3h",
"4a", "4b", "4c", "4d", "4e", "4f", "4g", "4h",
"5a", "5b", "5c", "5d", "5e", "5f", "5g", "5h",
"6a", "6b", "6c", "6d", "6e", "6f", "6g", "6h",
"6a", "6b", "6c", "6d", "6e", "6f", "6g", "6h",
"6a", "6b", "6c", "6d", "6e", "6f", "6g", "6h"]
black_valid_pieces = ['bpawn', 'bknight', 'bbishop', 'brook', 'bqueen', 'bking', "", " "]
white_valid_pieces = ['wpawn', 'wknight', 'wbishop', 'wrook', 'wqueen', 'wking', "", " "]
board_values = list(board.values())
board_keys = list(board.keys())
black_king = board_values.count("bking")
white_king = board_values.count("wking")
black_pawn_numbers = board_values.count("bpawn")
white_pawn_numbers = board_values.count("wpawn")
total_black = 0
total_white = 0
# sums up number of pieces on both sides
for piece in board_values:
if piece in black_valid_pieces:
total_black += 1
elif piece in white_valid_pieces:
total_white += 1
else:
return False
# checks validity of 1 white king, 1 black king, player pieces of 16 or less,
# checks if pawns are at most 8 for both players
if total_black <= 16 and total_white <= 16 and black_king == 1 and white_king == 1 and black_pawn_numbers <= 8 and white_pawn_numbers <= 8:
# checks validity of space
for space in board_keys:
if space in valid_spaces:
pass
else:
return False
# check validity of pieces
for key in board_values:
if key in white_valid_pieces or key in black_valid_pieces:
pass
else:
return False
return True
print(is_valid_chess_board({'1h': 'bking', '6c': 'wqueen', '2g': 'bbishop', '5h': 'bqueen', '3e': 'wking'}))
|
#!/bin/python3
for j in range(int(input())):
st = input()
print(st[::2], st[1::2]) |
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if not needle:
return 0
for i in range(len(haystack)):
if haystack[i]==needle[0] and haystack[i:i+len(needle)] == needle and i+len(needle) < len(haystack):
return i
return -1
haystack = "hello"
needle = "ll"
p = Solution()
print(p.strStr(haystack, needle)) |
serial = 9221
def get_power(position):
rackID = position[0] + 10
power = rackID * position[1]
power += serial
power *= rackID
power = (abs(power)%1000)//100
power -= 5
return(power)
def get_square(tlpos, size=3):
total = 0
for x in range(tlpos[0], tlpos[0]+size):
for y in range(tlpos[1], tlpos[1]+size):
total += grid[x][y]
return(total)
grid = {}
for row in range(1, 301):
grid[row] = {}
for col in range(1, 301):
grid[row][col] = get_power((row, col))
top_sqr = (None, float("-inf"))
for row in range(1, 299):
print("Row {r}".format(r=row))
for col in range(1, 299):
for size in range(1, 301-max((row, col))):
c_squ = get_square((row, col), size)
if top_sqr[1] < c_squ:
top_sqr = ((row, col, size), c_squ)
print(",".join(map(str, top_sqr[0])))
|
BASE91_ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!#$%&()*+,./:;<=>?@[]^_`{|}~"'
MASK1 = 2**13 - 1
MASK2 = 2**14 - 1
MASK3 = 2**8 - 1
def b91encode(num):
encoded = ""
n = 0
b = 0
for digit in num.encode('latin-1'):
b |= (digit << n)
n += 8
if n > 13:
v = b & MASK1
if v > 88:
b >>= 13
n -= 13
else:
v = b & MASK2
b >>= 14
n -= 14
encoded += BASE91_ALPHA[v % 91] + BASE91_ALPHA[v // 91]
if n:
encoded += BASE91_ALPHA[b % 91]
if n > 7 or b > 90:
encoded += BASE91_ALPHA[b // 91]
return encoded
def b91decode(num):
decoded = ""
n = 0
b = 0
v = -1
for digit in num:
c = BASE91_ALPHA.index(digit)
if v < 0:
v = c
else:
v += c * 91
b |= (v << n)
if (v & MASK1) > 88:
n += 13
else:
n += 14
while n > 7:
decoded += chr(b & MASK3)
b >>= 8
n -= 8
v = -1
if v+1:
decoded += chr((b | v << n) & MASK3)
return decoded
def b91check(num):
return set(num).issubset(set(BASE91_ALPHA))
assert b91decode(b91encode(BASE91_ALPHA)) == BASE91_ALPHA |
DOMAIN = "modernforms"
DEVICES = "devices"
COORDINATORS = "coordinators"
CONF_FAN_HOST = "fan_host"
CONF_FAN_NAME = "fan_name"
CONF_ENABLE_LIGHT = "enable_fan_light"
SERVICE_REBOOT = "reboot"
|
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A, B):
L = len(A)
P_MOD = (1 << max(B)) - 1 # 2 ** max(B) - 1
sequenceSolution = []
fiboSeq = [0] * (L + 2)
fiboSeq[1] = 1
for i in range(2, L + 2):
fiboSeq[i] = (fiboSeq[i - 1] + fiboSeq[i - 2]) & P_MOD
# iterate through list
for i in range(L):
N = A[i]
MOD = (1 << B[i]) - 1
sequenceSolution.append(fiboSeq[N + 1] & MOD)
return sequenceSolution
|
'''
Desafio CPF
CPF = 168.995.350-09
--------------------
1 * 10 = 10 # 1 * 11 = 11
6 * 9 = 12 # 6 * 10 = 60
8 * 8 = 24 # 8 * 9 = 72
9 * 7 = 63 # 9 * 8 = 72
9 * 6 = 54 # 9 * 7 = 63
5 * 5 = 25 # 5 * 6 = 30
3 * 4 = 12 # 3 * 5 = 15
5 * 3 = 15 # 5 * 4 = 20
0 * 2 = 0 # 0 * 3 = 0
# 0 * 2 = 0
total = 297 total = 343
11 - (297 % 11) = 11 11 - ( 343 % 11) = 9
11 > 9 = 0
digito 1 = 0 Digito 2 = 9
'''
# cpf = '16899535009'
cpf = input('Digite o numero do CPF: ')
cpf_parc = cpf[:-2]
if len(cpf) != 11:
print('Tamanho do numero de CPF incorreto!')
else:
# calculo do primeiro digito
s = 0
for c, v in enumerate(range(10, 1, -1)):
s += (int(cpf_parc[c]) * v )
r = 11 - (s % 11)
if r > 9:
cpf_parc += '0'
else:
cpf_parc += str(r)
# print( cpf_parc )
# Calculo do segundo digito
s = 0
for c, v in enumerate(range(11, 1, -1)):
s += (int(cpf_parc[c]) * v )
r = 11 - (s % 11)
if r > 9:
cpf_parc += '0'
else:
cpf_parc += str(r)
# print( cpf_parc )
fcpf = cpf[:3]+'.'+cpf[3:6]+'.'+cpf[6:9]+'-'+cpf[9:11] # CPF formatado
if cpf == cpf_parc:
print(f'Numero de CPF:{fcpf} esta CORRETO.')
else:
print(f'Numero de CPF:{fcpf} INCORRETO, favor verifique !')
|
dict={"be":"b","before":"b4","are":"r","you":"u","please":"plz","people":"ppl","really":"rly","have":"haz","know":"no","fore":"4","for":"4","to":"2","too":"2"}
def n00bify(text):
text=text.replace("'","").replace(",","").replace(".","")
res=text.split()
for i,j in enumerate(res):
for k in range(len(j)-1):
if j[k:k+2].lower() in dict:
res[i]=j[:k]+dict[j[k:k+2].lower()]+j[k+2:] if j[k].islower() else j[:k]+dict[j[k:k+2].lower()].capitalize()+j[k+2:]
elif j[k:k+2].lower()=="oo":
res[i]=res[i].replace(j[k:k+2], "00")
for k in range(len(j)-2):
if j[k:k+3].lower() in dict:
res[i]=j[:k]+dict[j[k:k+3].lower()]+j[k+3:] if j[k].islower() else j[:k]+dict[j[k:k+3].lower()].capitalize()+j[k+3:]
for k in range(len(j)-3):
if j[k:k+4].lower() in dict:
res[i]=j[:k]+dict[j[k:k+4].lower()]+j[k+4:] if j[k].islower() else j[:k]+dict[j[k:k+4].lower()].capitalize()+j[k+4:]
for k in range(len(j)-5):
if j[k:k+6].lower() in dict:
res[i]=j[:k]+dict[j[k:k+6].lower()]+j[k+6:] if j[k].islower() else j[:k]+dict[j[k:k+6].lower()].capitalize()+j[k+6:]
res[i]=res[i].replace("S", "Z").replace("s", "z")
if res[0][0].lower() =="h":
for i,j in enumerate(res):
res[i]=j.upper()
elif res[0][0].lower()=="w":
res.insert(0,"LOL")
temp=" ".join(res)
if (len(temp)-temp.count("?")-temp.count("!"))>=32:
res.insert(1,"OMG") if res[0]=="LOL" else res.insert(0,"OMG")
for i,j in enumerate(res):
if (i+1)%2==0:
res[i]=j.upper()
if "?" in j:
res[i]=res[i].replace("?", "?"*len(res))
if "!" in j:
exclamation=""
for k in range(len(res)):
exclamation+="!" if k%2==0 else "1"
res[i]=res[i].replace("!", exclamation)
return " ".join(res) |
n1 = int(input('Digite o primeiro valor: '))
n2 = int(input('Digite o segundo valor: '))
if n1 > n2:
print('O primeiro valor é o maior!')
elif n2 > n1:
print('O segundo valor é o maior!')
else:
print('Os dois valores são iguais!')
|
# Checks if the provided char is at least 5 chars long
def validateMinCharLength(field):
if len(field) >= 5:
return True
else:
return False
# validates that the value provided by the user is a float value above 0
def validateValue(value):
if isinstance(value, float):
if value > 0.0:
return True
return False
# validates that the number of units provided by the user is an integer value above 0
def validateUnits(units):
if isinstance(units, int):
if units > 0:
return True
return False |
def sortByBinaryOnes (numList):
binary = [str((bin(i)[2:])) for i in numList]
ones = [i.count("1") for i in binary]
sorting = sorted(list(zip(ones, numList, binary)))
sorting.sort(key=lambda k: (k[0], -k[1]), reverse=True)
numList = [i[1] for i in sorting]
return numList
sortByBinaryOnes([1,15,5,7,3])
"""
http://www.codewars.com/kata/sort-by-binary-ones/train/python
In this example you need to implement a function that sort a list of integers based on it's binary representation.
The rules are simple:
sort the list based on the amount of 1's in the binary representation of each number.
if two numbers have the same amount of 1's, the shorter string goes first. (ex: "11" goes before "101" when sorting 3 and 5 respectively)
if the amount of 1's is same, lower decimal number goes first. (ex: 21 = "10101" and 25 = "11001", then 21 goes first as is lower)
Examples:
Input: [1,15,5,7,3]
( in binary strings is: ["1", "1111", "101", "111", "11"])
Output: [15, 7, 3, 5, 1]
(and after sortByBinaryOnes is: ["1111", "111", "11", "101", "1"])
def sortByBinaryOnes(numList):
def key(n):
s = format(n, 'b')
return -s.count('1'), len(s), n
return sorted(numList, key=key)
"""
|
# 44. Wildcard Matching
# ---------------------
#
# Given an input string (`s`) and a pattern (`p`), implement wildcard pattern matching with support for `'?'` and `'*'`
# where:
#
# * `'?'` Matches any single character.
# * `'*'` Matches any sequence of characters (including the empty sequence).
#
# The matching should cover the **entire** input string (not partial).
#
# ### Constraints:
#
# * `0 <= s.length, p.length <= 2000`
# * `s` contains only lowercase English letters.
# * `p` contains only lowercase English letters, `'?'` or `'*'`.
#
# Source: https://leetcode.com/problems/wildcard-matching/
# ### Author's remark:
#
# This naive solution demonstrates surprisingly decent results:
#
# > Runtime: 48 ms, faster than 91.54% of Python3 online submissions for Wildcard Matching.
# > Memory Usage: 14.2 MB, less than 96.90% of Python3 online submissions for Wildcard Matching.
def substr(text, pat, offset=0):
m, n = 0, min(len(pat), len(text) - offset)
while m < n and (pat[m] == '?' or pat[m] == text[offset + m]):
m += 1
return m == len(pat)
def find(text, pat, offset=0):
for m in range(offset, len(text) - len(pat) + 1):
if substr(text, pat, m):
return m
return -1
def findall(text, pats):
m = 0
for pat in pats:
loc = find(text, pat, m)
if loc < 0:
break
yield loc
m = loc + len(pat)
class Solution:
def isMatch(self, s: str, p: str) -> bool:
pats = p.split('*')
if len(pats) == 1:
return len(s) == len(p) and substr(s, p)
else:
locs = list(findall(s, pats))
prefix, suffix = pats[0], pats[-1]
return len(locs) == len(pats) and substr(s[:len(prefix)], prefix) and substr(s[-len(suffix):], suffix)
if __name__ == '__main__':
s = Solution()
# Example 1:
#
# Input: s = "aa", p = "a"
# Output: false
# Explanation: "a" does not match the entire string "aa".
print(f"{s.isMatch(s='aa', p='a')} == false")
# Example 2:
#
# Input: s = "aa", p = "*"
# Output: true
# Explanation: '*' matches any sequence.
print(f"{s.isMatch(s='aa', p='*')} == true")
# Example 3:
#
# Input: s = "cb", p = "?a"
# Output: false
# Explanation: '?' matches 'c', but the second letter is 'a', which does not match 'b'.
print(f"{s.isMatch(s='cb', p='?a')} == false")
# Example 4:
#
# Input: s = "adceb", p = "*a*b"
# Output: true
# Explanation: The first '*' matches the empty sequence, while the second '*' matches the substring "dce".
print(f"{s.isMatch(s='adceb', p='*a*b')} == true")
# Example 5:
#
# Input: s = "acdcb", p = "a*c?b"
# Output: false
print(f"{s.isMatch(s='acdcb', p='a*c?b')} == false")
# Example 6:
#
# Input: s = "ab", p = "?*"
# Output: true
print(f"{s.isMatch(s='ab', p='?*')} == true")
# Example 7:
#
# Input: s = "", p = "ab*"
# Output: true
print(f"{s.isMatch(s='', p='ab*')} == false") |
def grid_traveller_basic(n, m):
if n == 0 or m == 0:
return 0
if n < 2 and m < 2:
return min(n, m)
return grid_traveller_basic(n - 1, m) + grid_traveller_basic(n, m - 1)
def grid_traveller_memo(n, m, memo=dict()):
if n == 0 or m == 0:
return 0
if n < 2 and m < 2:
return min(n, m)
key = f"{n}{m}"
if key in memo:
return memo[key]
memo[key] = grid_traveller_memo(n - 1, m, memo) + grid_traveller_memo(
n, m - 1, memo
)
return memo[key]
def grid_traveller_table(n, m):
table = [[0 for x in range(n + 1)] for x in range(m + 1)]
table[1][1] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
cur = table[i][j]
if i < m:
table[i + 1][j] += cur
if j < n:
table[i][j + 1] += cur
return table[m][n]
|
"""
@title: Winter is coming
@author: DSAghicha (Darshaan Aghicha)
@access: public
"""
def mandatory_courses(course: list[list[int]],answer: list[list[int]]) -> list[bool]:
result: list[bool] = []
indirect_dependency: list[int] = []
for i in range(len(course)):
if i + 1 < len(course) and course[i][1] == course[i+1][0]:
pr: list[int] = [course[i][0], course[i+1][1]]
indirect_dependency = pr if pr not in course else []
for answers in answer:
if indirect_dependency == answers:
result.append(True)
else:
result.append(answers in course)
return result
def main() -> None:
try:
num: int = int(input("Enter number of courses: "))
courses: list[list[int]] = []
for _ in range(num - 1):
print("Enter course: ", end='')
course = [int(x) for x in input().split(" ")]
if len(course) != 2:
raise ValueError
courses.append(course)
answers: list[list[int]] = []
for _ in range(2):
print("Enter answer: ", end='')
answer = [int(x) for x in input().split(" ")]
if len(answer) != 2:
raise ValueError
answers.append(answer)
output = mandatory_courses(courses, answers)
print(f"Output: {output}")
print(f"\n{'-' * 10}\n")
for i in range(len(answers)):
if output[i] is True:
print(f"Course {answers[i][0]} is mandatory to take up course {answers[i][1]}.")
elif output[i] is False:
print(f"Course {answers[i][0]} is not mandatory to take up course {answers[i][1]}.")
except ValueError:
print("\nInvalid Input!\n")
main()
if __name__ == "__main__":
main()
|
[
{
'date': '2019-01-01',
'description': 'Nieuwjaar',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-04-21',
'description': 'Pasen',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-04-22',
'description': 'Paasmaandag',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-05-01',
'description': 'Dag van de arbeid',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-05-30',
'description': 'Onze Lieve Heer hemelvaart',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-06-09',
'description': 'Pinksteren',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-06-10',
'description': 'Pinkstermaandag',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2019-07-21',
'description': 'Nationale feestdag',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-08-15',
'description': 'Onze Lieve Vrouw hemelvaart',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-11-01',
'description': 'Allerheiligen',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2019-11-11',
'description': 'Wapenstilstand',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2019-12-25',
'description': 'Kerstmis',
'locale': 'nl-BE',
'notes': '',
'region': '',
'type': 'NRF'
}
] |
class NotEnoughSpace(Exception):
pass
class UnknownNodeType(Exception):
def __init__(self, expr):
self.expr = expr
def __str__(self):
attrs = ', '.join('%s=%s' % (a, getattr(self.expr, a))
for a in dir(self.expr)
if not a.startswith('_'))
return (('Unkown expression type: %s;\n\n'
'dir(expr) = %s\n\n'
'attrs: %s') % (type(self.expr),
dir(self.expr), attrs))
|
default_min_token = -9223372036854775808
default_max_token = 9223372036854775807
split_q_size = 20000
worker_q_size = 20000
mapper_q_size = 20000
reducer_q_size = 20000
results_q_size = 20000
stats_q_size = 20000
# you can also specify your database credentials here
# when specified here, they will take precedence over same
# settings specified on the CLI
#
#db_user = "testuser"
#db_password = "testpass"
#ssl_cert = "test.cer.pem"
#ssl_key = "test.key.pem"
#ssl_cacert = "ca.crt" |
class Solution():
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
note: the max num in nums is len(nums)
"""
n = len(nums)
return (n * (n + 1) // 2) - sum(nums)
|
image_queries = {
"pixel": {"filename": "pixel.gif", "media_type": "image/gif"},
"gif": {"filename": "badge.gif", "media_type": "image/gif"},
"flat": {"filename": "badge-flat.svg", "media_type": "image/svg+xml"},
"flat-gif": {"filename": "badge-flat.gif", "media_type": "image/gif"},
"other": {"filename": "badge.svg", "media_type": "image/svg+xml"},
}
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# @lint-ignore-every BUCKRESTRICTEDSYNTAX
"""
shape.bzl provides a convenient strongly-typed bridge from Buck bzl parse
time to Python runtime.
## Shape objects
Shape objects are immutable instances of a shape type, that have been
validated to match the shape type spec as described below.
## Shape Types
Shape types are a collection of strongly typed fields that can be validated
at Buck parse time (by `shape.new`) and at Python runtime (by `shape.loader`
implementations).
## Field Types
A shape field is a named member of a shape type. There are a variety of field
types available:
primitive types (bool, int, float, str)
other shapes
homogenous lists of a single `field` element type
dicts with homogenous key `field` types and homogenous `field` value type
heterogenous tuples with `field` element types
enums with string values
unions via shape.union(type1, type2, ...)
If using a union, use the most specific type first as Pydantic will attempt to
coerce to the types in the order listed
(see https://pydantic-docs.helpmanual.io/usage/types/#unions) for more info.
## Optional and Defaulted Fields
By default, fields are required to be set at instantiation time
(`shape.new`).
Fields declared with `shape.field(..., default='val')` do not have to be
instantiated explicitly.
Additionally, fields can be marked optional by using the `optional` kwarg in
`shape.field` (or any of the collection field types: `shape.list`,
`shape.tuple`, or `shape.dict`).
For example, `shape.field(int, optional=True)` denotes an integer field that
may or may not be set in a shape object.
Obviously, optional fields are still subject to the same type validation as
non-optional fields, but only if they have a non-None value.
## Loaders
`shape.loader` codegens a type-hinted Python library that is capable of
parsing and validating a shape object at runtime.
The return value of shape.loader is the fully-qualified name of the
`python_library` rule that contains the implementation of this loader.
## Serialization formats
shape.bzl provides two mechanisms to pass shape objects to Python runtime code.
`shape.json_file` dumps a shape object to an output file. This can be read
from a file or resource, using `read_resource` or `read_file` of the
generated loader class.
`shape.python_data` dumps a shape object to a raw python source file. This
is useful for some cases where a python_binary is expected to be fully
self-contained, but still require some build-time information. It is also
useful in cases when shapes are being dynamically generated based on inputs
to a macro. See the docblock of the function for an example.
## Naming Conventions
Shape types should be named with a suffix of '_t' to denote that it is a
shape type.
Shape instances should conform to whatever convention is used where they are
declared (usually snake_case variables).
## Example usage
Inspired by `image_actions/mount.bzl`:
```
mount_t = shape.shape(
mount_config=shape.shape(
build_source=shape.shape(
source=str,
type=str,
),
default_mountpoint=str,
is_directory=bool,
),
mountpoint = shape.field(str, optional=True),
target = shape.field(str, optional=True),
)
mount = shape.new(
mount_t,
mount_config=shape.new(
mount.mount_config,
build_source=shape.new(
mount.mount_config.build_source,
source="/etc/fbwhoami",
type="host",
),
default_mountpoint="/etc/fbwhoami",
is_directory=False,
),
)
```
See tests/shape_test.bzl for full example usage and selftests.
"""
load("@bazel_skylib//lib:shell.bzl", "shell")
load("@bazel_skylib//lib:types.bzl", "types")
load(":oss_shim.bzl", "buck_genrule", "python_library", "target_utils", "third_party")
load(":sha256.bzl", "sha256_b64")
load(":structs.bzl", "structs")
load(":target_helpers.bzl", "antlir_dep", "normalize_target")
_SERIALIZING_LOCATION_MSG = (
"shapes with layer/target fields cannot safely be serialized in the" +
" output of a buck target.\n" +
"For buck_genrule uses, consider passing an argument with the (shell quoted)" +
" result of 'shape.do_not_cache_me_json'\n" +
"For unit tests, consider setting an environment variable with the same" +
" JSON string"
)
_NO_DEFAULT = struct(no_default = True)
def _python_type(t):
if t == int:
return "int"
if t == bool:
return "bool"
if t == str:
return "str"
if _is_collection(t):
if t.collection == dict:
k, v = t.item_type
return "Mapping[{}, {}]".format(_python_type(k), _python_type(v))
if t.collection == list:
# list input is codegened as a homogenous tuple so that the
# resulting field in the python class reflects the readonly nature
# of the source
return "Tuple[{}, ...]".format(_python_type(t.item_type))
if t.collection == tuple:
return "Tuple[{}]".format(", ".join([_python_type(x) for x in t.item_type]))
if _is_enum(t):
return "_".join([str(v.capitalize()) for v in t.enum])
if _is_field(t):
python_type = _python_type(t.type)
if t.optional:
python_type = "Optional[{}]".format(python_type)
return python_type
if _is_shape(t):
# deterministically name the class based on the shape field names and types
# to allow for buck caching and proper starlark runtime compatibility
return "_" + sha256_b64(
str({key: _python_type(field) for key, field in t.fields.items()}),
).replace("-", "_")
if _is_union(t):
type_names = [_python_type(union_t) for union_t in t.union_types]
return "Union[{}]".format(", ".join(type_names))
# If t is a string, then it should be the name of a type that will exist in
# the Shape generated code context
if types.is_string(t):
return t
fail("unknown type {}".format(t)) # pragma: no cover
# Poor man's debug pretty-printing. Better version coming on a stack.
def _pretty(x):
return structs.to_dict(x) if structs.is_struct(x) else x
def _get_is_instance_error(val, t):
if not _is_instance(val, t):
return (
(
"{} is not an instance of {} -- note that structs & dicts " +
"are NOT currently automatically promoted to shape"
).format(
_pretty(val),
_pretty(t),
)
)
return None
def _check_type(x, t):
"""Check that x is an instance of t.
This is a little more complicated than `isinstance(x, t)`, and supports
more use cases. _check_type handles primitive types (bool, int, str),
shapes and collections (dict, list, tuple).
Return: None if successful, otherwise a str to be passed to `fail` at a
site that has more context for the user
"""
if t == int:
if types.is_int(x):
return None
return "expected int, got {}".format(x)
if t == bool:
if types.is_bool(x):
return None
return "expected bool, got {}".format(x)
if t == str:
if types.is_string(x):
return None
return "expected str, got {}".format(x)
if _is_enum(t):
if x in t.enum:
return None
return "expected one of {}, got {}".format(t.enum, x)
if t == "Path":
return _check_type(x, str)
if t == "Target":
type_error = _check_type(x, str)
if not type_error:
# If parsing the target works, we don't have an error
if target_utils.parse_target(x):
return None
else:
return type_error
if _is_field(t):
if t.optional and x == None:
return None
return _check_type(x, t.type)
if _is_shape(t):
# Don't need type-check the internals of `x` because we trust it to
# have been type-checked at the time of construction.
return _get_is_instance_error(x, t)
if _is_collection(t):
return _check_collection_type(x, t)
if _is_union(t):
_matched_type, error = _find_union_type(x, t)
return error
return "unsupported type {}".format(t) # pragma: no cover
# Returns a mutually exclusive tuple:
# ("matched type" or None, "error if no type matched" or None)
def _find_union_type(x, t):
type_errors = []
for union_t in t.union_types:
type_error = _check_type(x, union_t)
if type_error == None:
return union_t, None
type_errors.append(type_error)
return None, "{} not matched in union {}: {}".format(
x,
t.union_types,
"; ".join(type_errors),
)
# Returns a mutually exclusive tuple:
# ([tuple type, tuple element] or None, "type error" or None)
def _values_and_types_for_tuple(x, t):
if not _is_collection(t) or t.collection != tuple: # pragma: no cover
# This is an assertion, not a user error.
fail("{} is not a tuple type (value {})".format(_pretty(t), _pretty(x)))
if not types.is_list(x) and not types.is_tuple(x):
return None, "{} is not tuple".format(x)
if len(x) != len(t.item_type):
return None, "length of {} does not match {}".format(
_pretty(x),
_pretty(t.item_type),
)
# Explicit `list` since the tests run as Python, where `zip` is a generator
values_and_types = list(zip(x, t.item_type))
for i, (val, item_type) in enumerate(values_and_types):
type_error = _check_type(val, item_type)
if type_error:
return None, "item {}: {}".format(i, type_error)
return values_and_types, None
def _check_collection_type(x, t):
if t.collection == dict:
if not types.is_dict(x):
return "{} is not dict".format(x)
key_type, val_type = t.item_type
for key, val in x.items():
key_type_error = _check_type(key, key_type)
if key_type_error:
return "key: " + key_type_error
val_type_error = _check_type(val, val_type)
if val_type_error:
return "val: " + val_type_error
return None
if t.collection == list:
if not types.is_list(x) and not types.is_tuple(x):
return "{} is not list".format(x)
for i, val in enumerate(x):
type_error = _check_type(val, t.item_type)
if type_error:
return "item {}: {}".format(i, type_error)
return None
if t.collection == tuple:
_values_and_types, error = _values_and_types_for_tuple(x, t)
return error
return "unsupported collection type {}".format(t.collection) # pragma: no cover
def _shapes_for_field(field_or_type):
# recursively codegen classes for every shape that is contained in this
# field, or any level of nesting beneath
src = []
if _is_field(field_or_type):
field = field_or_type
if _is_shape(field.type):
src.extend(_codegen_shape(field.type))
if _is_collection(field.type):
item_types = []
# some collections have multiple types and some have only one
if types.is_list(field.type.item_type) or types.is_tuple(field.type.item_type):
item_types = list(field.type.item_type)
else:
item_types = [field.type.item_type]
for t in item_types:
src.extend(_shapes_for_field(t))
if _is_enum(field.type):
src.extend(_codegen_enum(field.type))
elif _is_shape(field_or_type):
src.extend(_codegen_shape(field_or_type))
return src
def _codegen_field(name, field):
# for nested shapes, the class definitions must be listed in the body
# before the fields, so that forward references are avoided
src = []
python_type = _python_type(field)
src.extend(_shapes_for_field(field))
if field.default == _NO_DEFAULT:
src.append("{}: {}".format(name, python_type))
else:
default_repr = repr(field.default)
if structs.is_struct(field.default):
default_repr = "{}(**{})".format(python_type, repr(
_as_serializable_dict(field.default),
))
src.append("{}: {} = {}".format(name, python_type, default_repr))
return src
def _codegen_shape(shape, classname = None):
if classname == None:
classname = _python_type(shape)
src = [
"class {}(Shape):".format(classname),
" __GENERATED_SHAPE__ = True",
]
for name, field in shape.fields.items():
src.extend([" " + line for line in _codegen_field(name, field)])
return src
def _codegen_enum(enum):
classname = _python_type(enum)
src = [
"class {}(Enum):".format(classname),
]
src.extend([" {} = {}".format(value.upper(), repr(value)) for value in enum.enum])
return src
def _field(type, optional = False, default = _NO_DEFAULT):
if optional and default == _NO_DEFAULT:
default = None
return struct(
type = type,
optional = optional,
default = default,
)
def _is_field(x):
return structs.is_struct(x) and sorted(structs.to_dict(x).keys()) == sorted(["type", "optional", "default"])
def _dict(key_type, val_type, **field_kwargs):
return _field(
type = struct(
collection = dict,
item_type = (key_type, val_type),
),
**field_kwargs
)
def _list(item_type, **field_kwargs):
return _field(
type = struct(
collection = list,
item_type = item_type,
),
**field_kwargs
)
def _tuple(*item_types, **field_kwargs):
return _field(
type = struct(
collection = tuple,
item_type = item_types,
),
**field_kwargs
)
def _is_collection(x):
return structs.is_struct(x) and sorted(structs.to_dict(x).keys()) == sorted(["collection", "item_type"])
def _is_union(x):
return structs.is_struct(x) and sorted(structs.to_dict(x).keys()) == sorted(["union_types"])
def _union_type(*union_types):
"""
Define a new union type that can be used when defining a field. Most
useful when a union type is meant to be typedef'd and reused. To define
a shape field directly, see shape.union.
Example usage:
```
mode_t = shape.union_t(int, str) # could be 0o644 or "a+rw"
type_a = shape.shape(mode=mode_t)
type_b = shape.shape(mode=shape.field(mode_t, optional=True))
```
"""
if len(union_types) == 0:
fail("union must specify at one type")
return struct(
union_types = union_types,
)
def _union(*union_types, **field_kwargs):
return _field(
type = _union_type(*union_types),
**field_kwargs
)
def _enum(*values, **field_kwargs):
# since enum values go into class member names, they must be strings
for val in values:
if not types.is_string(val):
fail("all enum values must be strings, got {}".format(_pretty(val)))
return _field(
type = struct(
enum = tuple(values),
),
**field_kwargs
)
def _is_enum(t):
return structs.is_struct(t) and sorted(structs.to_dict(t).keys()) == sorted(["enum"])
def _path(**field_kwargs):
return _field(type = "Path", **field_kwargs)
# A target is special kind of Path in that it will be resolved to an on-disk location
# when the shape is rendered to json. But when the shape instance is being
# used in bzl macros, the field will be a valid buck target.
def _target(**field_kwargs):
return _field(type = "Target", **field_kwargs)
def _shape(**fields):
"""
Define a new shape type with the fields as given by the kwargs.
Example usage:
```
shape.shape(hello=str)
```
"""
for name, f in fields.items():
# Avoid colliding with `__shape__`. Also, in Python, `_name` is private.
if name.startswith("_"):
fail("Shape field name {} must not start with _: {}".format(
name,
_pretty(fields),
))
# transparently convert fields that are just a type have no options to
# the rich field type for internal use
if not hasattr(f, "type") or _is_union(f):
fields[name] = _field(f)
return struct(
fields = fields,
# for external usage, make the fields top-level attributes
**{key: f.type for key, f in fields.items()}
)
def _is_shape(x):
if not structs.is_struct(x):
return False
if not hasattr(x, "fields"):
return False
return sorted(structs.to_dict(x).keys()) == sorted(["fields"] + list(x.fields.keys()))
def _shape_defaults_dict(shape):
defaults = {}
for key, field in shape.fields.items():
if field.default != _NO_DEFAULT:
defaults[key] = field.default
return defaults
def _new_shape(shape, **fields):
"""
Type check and instantiate a struct of the given shape type using the
values from the **fields kwargs.
Example usage:
```
example_t = shape.shape(hello=str)
example = shape.new(example_t, hello="world")
```
"""
with_defaults = _shape_defaults_dict(shape)
with_defaults.update(fields)
for field, value in fields.items():
if field not in shape.fields:
fail("field `{}` is not defined in the shape".format(field))
error = _check_type(value, shape.fields[field])
if error:
fail(error)
return struct(__shape__ = shape, **with_defaults)
def _loader(name, shape, classname = "shape", **kwargs): # pragma: no cover
"""codegen a fully type-hinted python source file to load the given shape"""
if not _is_shape(shape):
fail("expected shape type, got {}".format(shape))
python_src = "from typing import *\nfrom antlir.shape import *\n"
python_src += "\n".join(_codegen_shape(shape, classname))
buck_genrule(
name = "{}.py".format(name),
cmd = "echo {} > $OUT".format(shell.quote(python_src)),
# Antlir users should not directly use `shape`, but we do use it
# as an implementation detail of "builder" / "publisher" targets.
antlir_rule = "user-internal",
)
python_library(
name = name,
srcs = {":{}.py".format(name): "{}.py".format(name)},
deps = [antlir_dep(":shape")],
# Antlir users should not directly use `shape`, but we do use it
# as an implementation detail of "builder" / "publisher" targets.
antlir_rule = "user-internal",
**kwargs
)
return normalize_target(":" + name)
# Does a recursive (deep) copy of `val` which is expected to be of type
# `t` (in the `shape` sense of type compatibility).
#
# `opts` changes the output as follows:
#
# - Set `opts.include_dunder_shape == False` to strip `__shape__` from the
# resulting instance structs. This is desirable when serializing,
# because that field will e.g. fail with `struct.to_json()`.
#
# - `opts.on_target_fields` has 3 possible values:
#
# * "preserve": Leave the field as a `//target:path` string.
#
# * "fail": Fails at Buck parse-time. Used for scenarios that cannot
# reasonably support target -> buck output path resolution, like
# `shape.json_file()`. But, in the future, we should be able to
# migrate these to a `target_tagger.bzl`-style approach.
#
# * "uncacheable_location_macro"`, this will replace fields of
# type `Target` with a struct that has the target name and its on-disk
# path generated via a `$(location )` macro. This MUST NOT be
# included in cacheable Buck outputs.
def _recursive_copy_transform(val, t, opts):
if _is_shape(t):
error = _get_is_instance_error(val, t)
if error: # pragma: no cover -- an internal invariant, not a user error
fail(error)
new = {}
for name, field in t.fields.items():
new[name] = _recursive_copy_transform(
# The `_is_instance` above will ensure that `getattr` succeeds
getattr(val, name),
field,
opts,
)
if opts.include_dunder_shape:
if val.__shape__ != t: # pragma: no cover
fail("__shape__ {} didn't match type {}".format(
_pretty(val.__shape__),
_pretty(t),
))
new["__shape__"] = t
return struct(**new)
elif _is_field(t):
if t.optional and val == None:
return None
return _recursive_copy_transform(val, t.type, opts)
elif _is_collection(t):
if t.collection == dict:
return {
k: _recursive_copy_transform(v, t.item_type[1], opts)
for k, v in val.items()
}
elif t.collection == list:
return [
_recursive_copy_transform(v, t.item_type, opts)
for v in val
]
elif t.collection == tuple:
values_and_types, error = _values_and_types_for_tuple(val, t)
if error: # pragma: no cover
fail(error)
return [
_recursive_copy_transform(item_val, item_t, opts)
for (item_val, item_t) in values_and_types
]
# fall through to fail
elif _is_union(t):
matched_type, error = _find_union_type(val, t)
if error: # pragma: no cover
fail(error)
return _recursive_copy_transform(val, matched_type, opts)
elif t == "Target":
if opts.on_target_fields == "fail":
fail(_SERIALIZING_LOCATION_MSG)
elif opts.on_target_fields == "uncacheable_location_macro":
return struct(
name = val,
path = "$(location {})".format(val),
)
elif opts.on_target_fields == "preserve":
return val
fail(
# pragma: no cover
"Unknown on_target_fields: {}".format(opts.on_target_fields),
)
elif t == int or t == bool or t == str or t == "Path" or _is_enum(t):
return val
fail(
# pragma: no cover
"Unknown type {} for {}".format(_pretty(t), _pretty(val)),
)
def _safe_to_serialize_instance(instance):
return _recursive_copy_transform(
instance,
instance.__shape__,
struct(include_dunder_shape = False, on_target_fields = "fail"),
)
def _python_data(
name,
instance,
module = None,
classname = "shape",
**python_library_kwargs): # pragma: no cover
"""
Codegen a static shape data structure that can be directly 'import'ed by
Python. The object is available under the name "data". A common use case
is to call shape.python_data inline in a target's `deps`, with `module`
(defaults to `name`) then representing the name of the module that can be
imported in the underlying file.
Example usage:
```
python_binary(
name = provided_name,
deps = [
shape.python_data(
name = "bin_bzl_args",
instance = shape.new(
some_shape_t,
var = input_var,
),
),
],
...
)
```
can then be imported as:
from .bin_bzl_args import data
"""
shape = instance.__shape__
instance = _safe_to_serialize_instance(instance)
python_src = "from typing import *\nfrom antlir.shape import *\n"
python_src += "\n".join(_codegen_shape(shape, classname))
python_src += "\ndata = {classname}.parse_raw({shape_json})".format(
classname = classname,
shape_json = repr(instance.to_json()),
)
if not module:
module = name
buck_genrule(
name = "{}.py".format(name),
cmd = "echo {} >> $OUT".format(shell.quote(python_src)),
# Antlir users should not directly use `shape`, but we do use it
# as an implementation detail of "builder" / "publisher" targets.
antlir_rule = "user-internal",
)
python_library(
name = name,
srcs = {":{}.py".format(name): "{}.py".format(module)},
deps = [
antlir_dep(":shape"),
third_party.library("pydantic", platform = "python"),
],
# Antlir users should not directly use `shape`, but we do use it
# as an implementation detail of "builder" / "publisher" targets.
antlir_rule = "user-internal",
**python_library_kwargs
)
return normalize_target(":" + name)
def _json_file(name, instance, visibility = None): # pragma: no cover
"""
Serialize the given shape instance to a JSON file that can be used in the
`resources` section of a `python_binary` or a `$(location)` macro in a
`buck_genrule`.
Warning: this will fail to serialize any shape type that contains a
reference to a target location, as that cannot be safely cached by buck.
"""
instance = _safe_to_serialize_instance(instance).to_json()
buck_genrule(
name = name,
cmd = "echo {} > $OUT".format(shell.quote(instance)),
# Antlir users should not directly use `shape`, but we do use it
# as an implementation detail of "builder" / "publisher" targets.
antlir_rule = "user-internal",
visibility = visibility,
)
return normalize_target(":" + name)
def _do_not_cache_me_json(instance):
"""
Serialize the given shape instance to a JSON string, which is the only
way to safely refer to other Buck targets' locations in the case where
the binary being invoked with a certain shape instance is cached.
Warning: Do not ever put this into a target that can be cached, it should
only be used in cmdline args or environment variables.
"""
return _recursive_copy_transform(
instance,
instance.__shape__,
struct(
include_dunder_shape = False,
on_target_fields = "uncacheable_location_macro",
),
).to_json()
def _render_template(name, instance, template): # pragma: no cover
"""
Render the given Jinja2 template with the shape instance data to a file.
Warning: this will fail to serialize any shape type that contains a
reference to a target location, as that cannot be safely cached by buck.
"""
_json_file(name + "--data.json", instance)
buck_genrule(
name = name,
cmd = "$(exe {}-render) <$(location :{}--data.json) > $OUT".format(template, name),
antlir_rule = "user-internal",
)
return normalize_target(":" + name)
# Asserts that there are no "Buck target" in the shape. Contrast with
# `do_not_cache_me_json`.
#
# Converts a shape to a dict, as you would expected (field names are keys,
# values are scalars & collections as in the shape -- and nested shapes are
# also dicts).
def _as_serializable_dict(instance):
return structs.to_dict(_safe_to_serialize_instance(instance))
# Do not use this outside of `target_tagger.bzl`. Eventually, target tagger
# should be replaced by shape, so this is meant as a temporary shim.
#
# Unlike `as_serializable_dict`, does not fail on "Buck target" fields. Instead,
# these get represented as the target path (avoiding cacheability issues).
#
# target_tagger.bzl is the original form of matching target paths with their
# corresponding `$(location)`. Ideally, we should fold this functionality
# into shape. In the current implementation, it just needs to get the raw
# target path out of the shape, and nothing else.
def _as_dict_for_target_tagger(instance):
return structs.to_dict(_recursive_copy_transform(
instance,
instance.__shape__,
struct(
include_dunder_shape = False,
on_target_fields = "preserve",
),
))
# Returns True iff `instance` is a shape instance of any type.
def _is_any_instance(instance):
return structs.is_struct(instance) and hasattr(instance, "__shape__")
# Returns True iff `instance` is a `shape.new(shape, ...)`.
def _is_instance(instance, shape):
if not _is_shape(shape):
fail("Checking if {} is a shape instance, but {} is not a shape".format(
_pretty(instance),
_pretty(shape),
))
return (
structs.is_struct(instance) and
getattr(instance, "__shape__", None) == shape
)
# Converts `shape.new(foo_t, x='a', y=shape.new(bar_t, z=3))` to
# `{'x': 'a', 'y': shape.new(bar_t, z=3)}`.
#
# The primary use-case is unpacking a shape in order to construct a modified
# variant. E.g.
#
# def new_foo(a, b=3):
# if (a + b) % 1:
# fail("a + b must be even, got {} + {}".format(a, b))
# return shape.new(_foo_t, a=a, b=b, c=a+b)
#
# def modify_foo(foo, ... some overrides ...):
# d = shape.as_dict_shallow(instance)
# d.update(... some overrides ...)
# d.pop('c')
# return new_foo(**d)
#
# Notes:
# - This dict is NOT intended for serialization, since nested shape remain
# as shapes, and are not converted to `dict`.
# - There is no special treament for `shape.target` fields, they remain as
# `//target:path` strings.
# - `shape.new` is the mathematical inverse of `_as_dict_shallow`. On the
# other hand, we do not yet provide `_as_dict_deep`. The latter would
# NOT be invertible, since `shape` does not yet have a way of
# recursively converting nested dicts into nested shapes.
def _as_dict_shallow(instance):
return {
field: getattr(instance, field)
for field in instance.__shape__.fields
}
shape = struct(
shape = _shape,
new = _new_shape,
field = _field,
dict = _dict,
list = _list,
tuple = _tuple,
union = _union,
union_t = _union_type,
enum = _enum,
path = _path,
target = _target,
loader = _loader,
json_file = _json_file,
python_data = _python_data,
do_not_cache_me_json = _do_not_cache_me_json,
render_template = _render_template,
struct = struct,
# There is no vanilla "as_dict" because:
#
# (a) There are many different possible use-cases, and one size does
# not fit all. The variants below handle the existing uses, but
# there can be more. For example, if you want to mutate an
# existing shape, you currently cannot do that correctly without
# recursively constructing a new one. We would need to provide a
# proper recursive "new from dict" to allow that to happen.
#
# (b) It's usually the wrong tool for the job / a sign of tech debt.
# For example, it should be possible to convert all features to
# shape, and make target_tagger a first-class feature of shape.
# At that point, both of the below uses disappear.
as_dict_shallow = _as_dict_shallow,
as_dict_for_target_tagger = _as_dict_for_target_tagger,
as_serializable_dict = _as_serializable_dict,
is_instance = _is_instance,
is_any_instance = _is_any_instance,
pretty = _pretty,
)
|
h = input()
j = int(h)**2
kl = h[::-1]
kll = int(kl)**2
h1 = str(kll)[::-1]
if str(j) == h1:
print("Adam number")
else:
print("Yeet") |
# Copyright (c) 2017, Softbank Robotics Europe
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# List of known Softbank Robotics device models
device_model_list=[
"SOFTBANK_ROBOTICS__NAO_T2_V32",
"SOFTBANK_ROBOTICS__NAO_T2_V33",
"SOFTBANK_ROBOTICS__NAO_T2_V40",
"SOFTBANK_ROBOTICS__NAO_T2_V50",
"SOFTBANK_ROBOTICS__NAO_T14_V32",
"SOFTBANK_ROBOTICS__NAO_T14_V33",
"SOFTBANK_ROBOTICS__NAO_T14_V40",
"SOFTBANK_ROBOTICS__NAO_T14_V50",
"SOFTBANK_ROBOTICS__NAO_H21_V32",
"SOFTBANK_ROBOTICS__NAO_H21_V33",
"SOFTBANK_ROBOTICS__NAO_H21_V40",
"SOFTBANK_ROBOTICS__NAO_H21_V50",
"SOFTBANK_ROBOTICS__NAO_H25_V32",
"SOFTBANK_ROBOTICS__NAO_H25_V33",
"SOFTBANK_ROBOTICS__NAO_H25_V40",
"SOFTBANK_ROBOTICS__NAO_H25_V50",
"SOFTBANK_ROBOTICS__PEPPER_V17",
"SOFTBANK_ROBOTICS__PEPPER_V18",
] |
class PenHandler:
EAST_DEGREES = 0
NORTH_DEGREES = 90
WEST_DEGREES = 180
SOUTH_DEGREES = 270
DRAW_MULTIPLIER = 50
def __init__(self):
self.pens = {}
self.pen_current = None
self.pen_active = False
def draw(self, distance, degrees):
if not self.pen_active:
return
self.pen_current.setheading(degrees)
self.pen_current.forward(distance * self.DRAW_MULTIPLIER)
|
load("@scala_things//:dependencies/dependencies.bzl", "java_dependency", "scala_dependency", "scala_fullver_dependency", "make_scala_versions", "apply_scala_version", "apply_scala_fullver_version")
load("@rules_jvm_external//:defs.bzl", "maven_install")
scala_versions = make_scala_versions(
"2",
"13",
"6",
)
grpc_version = "1.42.1"
project_deps = [
# gen
scala_dependency("com.thesamet.scalapb", "compilerplugin", "0.11.6"),
scala_dependency("com.thesamet.scalapb", "protoc-gen", "0.9.3"),
java_dependency("io.grpc", "grpc-stub", grpc_version),
java_dependency("io.grpc", "grpc-protobuf", grpc_version),
java_dependency("io.grpc", "grpc-netty", grpc_version),
java_dependency("io.grpc", "grpc-netty-shaded", grpc_version),
java_dependency("io.grpc", "grpc-services", grpc_version),
java_dependency("io.grpc", "protoc-gen-grpc-java", grpc_version),
scala_dependency("com.thesamet.scalapb", "scalapb-runtime", "0.11.6"),
scala_dependency("com.thesamet.scalapb", "scalapb-runtime-grpc", "0.11.6"),
# usage
scala_dependency("org.typelevel", "cats-effect", "3.2.9"),
java_dependency("io.grpc", "grpc-netty-shaded", grpc_version),
scala_dependency("co.fs2", "fs2-core", "3.1.2"),
scala_dependency("org.typelevel", "fs2-grpc-runtime", "2.3.0"),
]
def add_scala_fullver(s):
return apply_scala_fullver_version(scala_versions, s)
proto_lib_deps = [
"@maven//:org_typelevel_cats_core_2_13",
"@maven//:org_typelevel_cats_effect_2_13",
"@maven//:org_typelevel_cats_effect_kernel_2_13",
"@maven//:org_typelevel_cats_effect_std_2_13",
"@maven//:org_typelevel_cats_kernel_2_13",
"@maven//:co_fs2_fs2_core_2_13",
]
|
def same_structure_as(original, other):
if not type(original) == type(other):
return False
if not len(original) == len(other):
return False
def get_structure(lis, struct=[]):
for i in range(len(lis)):
if type(lis[i]) == list:
struct.append(str(i) + ': yes')
get_structure(lis[i], struct)
else:
struct.append(i)
return struct
return get_structure(original, []) == get_structure(other, [])
if __name__ == '__main__':
print(same_structure_as([1, 1, 1], [2, 2, 2]))
print(same_structure_as([1, [1, 1]], [2, [2, 2]]))
print(same_structure_as([1, [1, 1]], [[2, 2], 2]))
print(same_structure_as([1, [1, 1]], [2, [2]]))
print(same_structure_as([[[], []]], [[[], []]]))
print(same_structure_as([[[], []]], [[1, 1]]))
print(same_structure_as([1, [[[1]]]], [2, [[[2]]]]))
print(same_structure_as([], 1))
print(same_structure_as([], {}))
print(same_structure_as([1, '[', ']'], ['[', ']', 1]))
|
ENDPOINTS = {
'auction': '/auctions/{auction_id}',
'contract': '/auctions/{auction_id}/contracts/{contract_id}',
'contracts': '/auctions/{auction_id}/contracts',
'item': '/auctions/{auction_id}/items/{item_id}',
'items': '/auctions/{auction_id}/items',
}
|
__version__ = '1.0.0'
__author__ = 'Steven Huang'
__all__ = ["file", "basic", "geo_math", "geo_transformation"]
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.common.hardware
Cluster configuration
You can specify here what hardware will be emulated on this cluster and all
supported disk types/formats. Some variables have python-dictionary format.
Each element is sub-dictionary: <id>: {'param_name': 'param_value', ...}.
Usually required parameters are name and enabled. If any attitional are re-
quired, it is written in comment.
"""
## Network devices available for Virtual Machines. Add own if necessary
# (virtualizator has to support it!)
network_devices = {
'rtl8139': 0,
'virtio': 1,
'ne2k_pci': 2,
'e1000': 3
}
## Emulated video devices for Virtual Machines. Add own if necessary
# (virtualizator has to support it!)
video_devices = {
'cirrus': 0,
'vga': 1
}
## Disk controllers for virtual machines.
disk_controllers = {
'scsi': 0,
'virtio': 1,
'ide': 2,
'sata': 3,
'usb': 4
}
live_attach_disk_controllers = ['virtio', 'usb']
# Disk filesystems (must be supported by ResourceManager for this CM!)
# Required parameters:
# - name
# - command - formatting program. %%s will be replaced with formatting filename
# - enabled
disk_filesystems = {
'raw': 0,
'fat32': 2,
'ext4': 5,
'reiserfs': 6,
'xfs': 7,
'ntfs': 8
}
disk_format_commands = {
'raw': '',
'ntfs-full': '/sbin/mkfs.ntfs -F',
'fat32': '/sbin/mkfs.vfat',
'ext2': '/sbin/mkfs.ext2 -F',
'ext3': '/sbin/mkfs.ext3 -F',
'ext4': '/sbin/mkfs.ext4 -F',
'reiserfs': '/sbin/mkfs.reiserfs -f -q',
'xfs': '/sbin/mkfs.xfs -f',
'ntfs': '/sbin/mkfs.ntfs -Q -F',
}
video_devices_reversed = dict((v, k) for k, v in video_devices.iteritems())
disk_controllers_reversed = dict((v, k) for k, v in disk_controllers.iteritems())
network_devices_reversed = dict((v, k) for k, v in network_devices.iteritems())
disk_filesystems_reversed = dict((v, k) for k, v in disk_filesystems.iteritems()) |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
n = int(input())
for i in range(1, n+1):
if i % 2 == 0:
print('{}^2 = {}'.format(i, i**2))
|
# 执行用时 : 48 ms
# 内存消耗 : 13.4 MB
# 方案:n 是奇数就生成n个a; n 是偶数就生成n-1个a和一个b
class Solution:
def generateTheString(self, n: int) -> str:
# n 是奇数就生成n个a
# n 是偶数就生成n-1个a和一个b
if n % 2 == 0:
return "a" * (n - 1) + "b"
return "a" * n
|
class Solution:
def subdomainVisits(self, cpdomains):
"""
:type cpdomains: List[str]
:rtype: List[str]
"""
class Node:
def __init__(self):
self.visits = 0
self.children = {}
def traverse(name, node, output):
if not node:
return
output.append(str(node.visits) + " " + name)
for child_name, child in node.children.items():
if name:
traverse(child_name + '.' + name, child, output)
else:
traverse(child_name, child, output)
root = Node()
for entry in cpdomains:
visits, domains = entry.split()
node = root
for path in reversed(domains.split('.')):
if path not in node.children:
node.children[path] = Node()
node.children[path].visits += int(visits)
node = node.children[path]
output = []
traverse("", root, output)
return output[1:]
|
# Write a Python program that accepts an integer (n) and computes the value of n+nn+nnn.
num = int(input("Input an integer : "))
n1 = int( "%s" % num )
n2 = int( "%s%s" % (num,num) )
n3 = int( "%s%s%s" % (num,num,num) )
print (n1+n2+n3)
|
'''
example of models and GPU distribution sampler.
To run models demo, you need to download data files 'mnist_gray.mat'&'TREC.pkl' and put it under pydpm.example.data
data url: https://1drv.ms/u/s!AlkDawhaUUBWtHRWuNESEdOsDz7V?e=LQlGLW
'''
|
class Solution(object):
def pivotIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
total_sum = sum(nums)
sum_so_far = 0
for i in range(len(nums)):
if sum_so_far * 2 + nums[i] == total_sum:
return i
else:
sum_so_far += nums[i]
return -1
|
class DefaultConfig(object):
DEBUG = False
TESTING = False
class TestConfig(DefaultConfig):
TESTING = True
class DebugConfig(DefaultConfig):
DEBUG = True
|
def mac2ipv6(mac):
# only accept MACs separated by a colon
parts = mac.split(":")
# modify parts to match IPv6 value
parts.insert(3, "ff")
parts.insert(4, "fe")
parts[0] = "%x" % (int(parts[0], 16) ^ 2)
# format output
ipv6Parts = []
for i in range(0, len(parts), 2):
ipv6Parts.append("".join(parts[i:i+2]))
ipv6 = "fe80::%s/64" % (":".join(ipv6Parts))
return ipv6
def ipv62mac(ipv6):
# remove subnet info if given
subnetIndex = ipv6.find("/")
if subnetIndex != -1:
ipv6 = ipv6[:subnetIndex]
ipv6Parts = ipv6.split(":")
macParts = []
for ipv6Part in ipv6Parts[-4:]:
while len(ipv6Part) < 4:
ipv6Part = "0" + ipv6Part
macParts.append(ipv6Part[:2])
macParts.append(ipv6Part[-2:])
# modify parts to match MAC value
macParts[0] = "%02x" % (int(macParts[0], 16) ^ 2)
del macParts[4]
del macParts[3]
return ":".join(macParts)
# print(mac2ipv6('80:4a:14:69:b1:5d'))
print(ipv62mac('fe80::417:2066:cc4f:eff9'))
# 1e:4c:54:a4:5a:77
# 80:4a:14:69:b1:5d |
name = "sample_module"
"""sample_module - Sample control test module demonstrating required functions
"""
def match(request):
"""match(request) - Match conditions in order for module to be run
"""
if request.method == "PUT":
return True
return False
def run(request):
"""run(request) - Execute module and return a string
"""
return 'sample module complete'
|
############################
# Patch to implement pcNN for VASP ver. 5.4.4. written by Ryo Nagai
# email: r-nag@issp.u-tokyo.ac.jp
#
# This add-on is only available to owners of a valid VASP license.
# This add-on is distributed under an agreement from VASP Software GmbH.
# - This add-on comes without any waranty.
# - Commercial use and patent use are prohibited.
# - If the add-on is modified by inclusion of additional source code lines of the Add-on to the VASP software code,
# the developer must send the revised version of the add-on to VASP Software GmbH for further review.
# 1. Backup vasp/src/metagga.F.
# 2. Run this patch. You are asked the path to metagga.F.
# 3. metagga_patched.F is generated. Replace metagga.F with it.
# 4. Compile VASP.
# ## Citing this XC functinoal
# @misc{nagai2021machinelearningbased,
# title={Machine-Learning-Based Exchange-Correlation Functional with Physical Asymptotic Constraints},
# author={Ryo Nagai and Ryosuke Akashi and Osamu Sugino},
# year={2021},
# eprint={2111.15593},
# archivePrefix={arXiv},
# primaryClass={cond-mat.mtrl-sci}
# }
############################
def patch():
print("input path to metagga.F:")
target_file = input()
output = "metagga_patched.F"
with open(target_file, "r") as f:
l_strip = [s.strip("\n") for s in f.readlines()]
p1s = [p1]
p2s = [p2]
p3s = [p3]
p4s = [p4]
p5s = [p5]
p6s = [p6]
for i, li in enumerate(l_strip):
if "MODULE setxcmeta" in li:
l_strip = l_strip[:i]+p1s+l_strip[i:]
break
for i, li in enumerate(l_strip):
if "=='PBE') THEN" in li:
l_strip = l_strip[:i+4]+p2s+l_strip[i+4:]
break
for i, li in enumerate(l_strip):
if "! PBE, for testing mainly" in li:
l_strip = l_strip[:i+3]+p3s+l_strip[i+3:]
break
for i, li in enumerate(l_strip):
if "END MODULE metalib" in li:
l_strip = l_strip[:i-3]+p4s+l_strip[i-3:]
break
for i, li in enumerate(l_strip):
if "SUBROUTINE XC_META_" in li:
l_strip = l_strip[:i+1]+p5s+l_strip[i+1:]
break
for i, li in enumerate(l_strip):
if "SUBROUTINE METAGGASPIN(&" in li:
l_strip = l_strip[:i+22]+p6s+l_strip[i+22:]
break
with open(output, "w") as fo:
fo.write('\n'.join(l_strip))
p1 = """ module readNN
integer, PARAMETER :: hidden=100
real*8,save, dimension(:,:) :: w1(hidden,2), w2(hidden,hidden), w3(hidden,hidden), w4(1,hidden)
real*8,save, dimension(:,:) :: w1c(hidden,2), w2c(hidden,hidden), w3c(hidden,hidden*2), w4c(1,hidden)
real*8,save, dimension(:):: b1(hidden), b2(hidden), b3(hidden),b4(1)
real*8,save, dimension(:):: b1c(hidden), b2c(hidden), b3c(hidden),b4c(1)
contains
subroutine loadNN(w1, w2, w3, w4, b1, b2, b3, b4, &
& w1c, w2c, w3c, w4c, b1c, b2c, b3c, b4c)
real*8, dimension(:,:) :: w1, w2, w3, w4
real*8, dimension(:,:) :: w1c, w2c, w3c, w4c
real*8 :: b1(:), b2(:), b3(:), b4(:)
real*8 :: b1c(:), b2c(:), b3c(:), b4c(:)
integer :: n, m, i
n=hidden
m=2
open(80, file='nnparams/w1.txt')
do i =1,n
read(80,*) (w1(i,j), j=1,m)
end do
close(80)
n=hidden
m=hidden
open(81, file='nnparams/w2.txt')
do i =1,n
read(81,*) (w2(i,j), j=1,m)
end do
close(81)
n=hidden
m=hidden
open(82, file='nnparams/w3.txt')
do i =1,n
read(82,*) (w3(i,j), j=1,m)
end do
close(82)
n=1
m=hidden
open(83, file='nnparams/w4.txt')
do i =1,n
read(83,*) (w4(i,j), j=1,m)
end do
close(83)
n=hidden
open(84, file='nnparams/b1.txt')
do i =1,n
read(84,*) b1(i)
end do
close(84)
n=hidden
open(85, file='nnparams/b2.txt')
do i =1,n
read(85,*) b2(i)
end do
close(85)
n=hidden
open(86, file='nnparams/b3.txt')
do i =1,n
read(86,*) b3(i)
end do
close(86)
n=1
open(87, file='nnparams/b4.txt')
do i =1,n
read(87,*) b4(i)
end do
close(87)
n=hidden
m=2
open(88, file='nnparams/w1c.txt')
do i =1,n
read(88,*) (w1c(i,j), j=1,m)
end do
close(88)
n=hidden
m=hidden
open(89, file='nnparams/w2c.txt')
do i =1,n
read(89,*) (w2c(i,j), j=1,m)
end do
close(89)
n=hidden
m=hidden*2
open(90, file='nnparams/w3c.txt')
do i =1,n
read(90,*) (w3c(i,j), j=1,m)
end do
close(90)
n=1
m=hidden
open(91, file='nnparams/w4c.txt')
do i =1,n
read(91,*) (w4c(i,j), j=1,m)
end do
close(91)
n=hidden
open(92, file='nnparams/b1c.txt')
do i =1,n
read(92,*) b1c(i)
end do
close(92)
n=hidden
open(93, file='nnparams/b2c.txt')
do i =1,n
read(93,*) b2c(i)
end do
close(93)
n=hidden
open(94, file='nnparams/b3c.txt')
do i =1,n
read(94,*) b3c(i)
end do
close(94)
n=1
open(95, file='nnparams/b4c.txt')
do i =1,n
read(95,*) b4c(i)
end do
close(95)
end subroutine loadNN
end module readNN
"""
p2 = """ ELSEIF (SZNAM(1:6)=='NNMGGA') THEN
ID_METAGGA=777
LMETA_NEEDS_POT=.TRUE.
LMETA_NEEDS_MU=.TRUE.
call loadNN(w1, w2, w3, w4, b1, b2, b3, b4, &
&w1c, w2c, w3c, w4c, b1c, b2c, b3c, b4c)
"""
p3 = """ ELSEIF (ID_METAGGA==777) THEN
! NN-metaGGA
CALL NNMGGA_XC(&
RHOUP,RHODW,ABSNABUP,ABSNABDW,ABSNAB,TAUUP,TAUDW,&
Exc_NNM,VXD1,VXDD1,VXD2,VXDD2,AMUXD1,AMUXD2)
! Sum everything
EXC=Exc_NNM/(RHOUP+RHODW)
dEXCdRHOup=VXD1
dEXCdRHOdw=VXD2
dEXCdABSNABup=VXDD1
dEXCdABSNABdw=VXDD2
dEXCdTAUup=AMUXD1
dEXCdTAUdw=AMUXD2
! write(*,*) RHOUP,RHODW,ABSNABUP,ABSNABDW,ABSNAB,TAUUP,TAUDW, &
!& EXC,dEXCdRHOup,dEXCdRHOdw,dEXCdABSNABup,dEXCdABSNABdw,dEXCdTAUup,dEXCdTAUdw
! from Hartree to Rydberg
EXC=EXC*2
dEXCdRHOup=dEXCdRHOup*2
dEXCdRHOdw=dEXCdRHOdw*2
dEXCdABSNABup=dEXCdABSNABup*2
dEXCdABSNABdw=dEXCdABSNABdw*2
dEXCdTAUup=dEXCdTAUup*2
dEXCdTAUdw=dEXCdTAUdw*2
"""
p4 = """
subroutine NNMGGA_XC(&
RU,RD,DRU,DRD,DRT,TAUU,TAUD,&
Exc_NNM,VXCD1,VXCDD1,VXCD2,VXCDD2,AMUXCD1,AMUXCD2)
use :: readNN
IMPLICIT None
integer :: n,m,i,j
REAL(q) Exc_NNM, RU,RD,DRU,DRD,DRT,TAUU,TAUD,RHO
REAL(q) exc, fxc, fx_u, fx_d, fc, DRUDcos
REAL(q) VXCD1,VXCD2,VXCDD1,VXCDD2,AMUXCD1,AMUXCD2
REAL(q) Ex_SCAN1,VXD11,VXDD11,VXD12,VXDD12,AMUXD11,AMUXD12
REAL(q) Ex_SCAN2,VXD21,VXDD21,VXD22,VXDD22,AMUXD21,AMUXD22
REAL(q) Ec_SCAN,VCD1,VCDD1,VCD2,VCDD2,AMUCD1,AMUCD2
real*8, dimension(:) :: x(7)
real*8, dimension(:,:) :: dfx_dnu(1,4), dfx_dnd(1,4), dfc_dn(1,4)
real*8, dimension(:) :: t(4), logt(4), g1(hidden), h1(hidden), g2(hidden), h2(hidden), g3(hidden), h3(hidden), g4(1), h4(1), eunif_x(7)
real*8, dimension(:) :: fxc_g4(1), fxc_g3(hidden), fxc_g2(hidden), fxc_g1(hidden), fxc_h3(hidden),fxc_h2(hidden), fxc_h1(hidden)
real*8, dimension(:) :: fxc_logt(4), fxc_t(4), fxc_logt_t(4), t0_x(7), t1_x(7), t2_x(7), t3_x(7), fxc_t_x(7), fxc_x(7)
real*8, dimension(:,:):: t_x(4,7)
real*8, PARAMETER :: THRD=1./3.,THRD4=4./3.,THRD5=5./3.
real*8, PARAMETER :: a = 4.0, sles=0.2
real*8, dimension(:) :: backlogt(4)
! SCAN
! Exchange
CALL VSCANx(&
& RU,RU,DRU,DRU,DRU*2,TAUU,TAUU, &
& Ex_SCAN1,VXD11,VXDD11,VXD12,VXDD12,AMUXD11,AMUXD12)
Ex_SCAN1=Ex_SCAN1/(RU+RU)
CALL VSCANx(&
& RD,RD,DRD,DRD,DRD*2,TAUD,TAUD, &
& Ex_SCAN2,VXD21,VXDD21,VXD22,VXDD22,AMUXD21,AMUXD22)
Ex_SCAN2=Ex_SCAN2/(RD+RD)
! Correlation
CALL VSCANc(&
& RU,RD,DRU,DRD,DRT,TAUU,TAUD, &
& Ec_SCAN,VCD1,VCDD1,VCD2,VCDD2,AMUCD1,AMUCD2)
Ec_SCAN=Ec_SCAN/(RU+RD)
! Sum everything
! EXC_SCAN=(Ex_SCAN+Ec_SCAN)/(RU+RD)
! dEXCdRHOup_SCAN=VXD1+VCD1
! dEXCdRHOdw_SCAN=VXD2+VCD2
! dEXCdABSNABup_SCAN=VXDD1+VCDD1
! dEXCdABSNABdw_SCAN=VXDD2+VCDD2
! dEXCdTAUup_SCAN=AMUXD1+AMUCD1
! dEXCdTAUdw_SCAN=AMUXD2+AMUCD2
!write(*,*) RU,RD,DRU,DRD,DRT,TAUU,TAUD, &
!&Exc_SCAN,dEXCdRHOup_SCAN,dEXCdRHOdw_SCAN,dEXCdABSNABup_SCAN,dEXCdABSNABdw_SCAN,dEXCdTAUup_SCAN,dEXCdTAUdw_SCAN
call calc_x(RU*2,DRU*2,TAUU*2, fx_u, dfx_dnu)
call calc_x(RD*2,DRD*2,TAUD*2, fx_d, dfx_dnd)
call calc_c(RU, RD, DRT, TAUU+TAUD, fc, dfc_dn)
exc=0.5*(2.*RU*Ex_SCAN1*fx_u+2.*RD*Ex_SCAN2*fx_d)/(RU+RD)
exc=exc+fc*Ec_SCAN
Exc_NNM=exc*(RU+RD)
VXCD1=0.5*(VXD11+VXD12)*fx_u+2*RU*Ex_SCAN1*dfx_dnu(1,1)
VXCD1=VXCD1+VCD1*fc+(RU+RD)*Ec_SCAN*dfc_dn(1,1)
VXCD2=0.5*(VXD21+VXD22)*fx_d+2*RD*Ex_SCAN2*dfx_dnd(1,1)
VXCD2=VXCD2+VCD2*fc+(RU+RD)*Ec_SCAN*dfc_dn(1,2)
DRUDcos=0.5*(DRT**2.-DRU**2.-DRD**2.)
!TODO 下の式で正しいがなぜ合うのかわからない(exchange)
VXCDD1=0.5*(VXDD11*2.)*fx_u+RU*Ex_SCAN1*(dfx_dnu(1,3)*2.)
VXCDD1=VXCDD1+VCDD1*fc+(RU+RD)*Ec_SCAN*(dfc_dn(1,3)*0.5/DRT*(2.*DRU+2*DRUDcos/DRU))
VXCDD2=0.5*(VXDD21*2.)*fx_d+RD*Ex_SCAN2*(dfx_dnd(1,3)*2.)
VXCDD2=VXCDD2+VCDD2*fc+(RU+RD)*Ec_SCAN*(dfc_dn(1,3)*0.5/DRT*(2.*DRD+2*DRUDcos/DRD))
AMUXCD1=0.5*(AMUXD11+AMUXD12)*fx_u+2*RU*Ex_SCAN1*dfx_dnu(1,4)
AMUXCD1=AMUXCD1+AMUCD1*fc+(RU+RD)*Ec_SCAN*dfc_dn(1,4)
AMUXCD2=0.5*(AMUXD21+AMUXD22)*fx_d+2*RD*Ex_SCAN2*dfx_dnd(1,4)
AMUXCD2=AMUXCD2+AMUCD2*fc+(RU+RD)*Ec_SCAN*dfc_dn(1,4)
end subroutine NNMGGA_XC
subroutine calc_x(n, dn, tau, f, df_dn)
use :: readNN
implicit none
real*8, PARAMETER :: PI =3.141592653589793238
integer i,j,k,l
integer, PARAMETER :: nconds=2, nt=2, nsize=4
real*8, PARAMETER :: THRD=1./3.,THRD2=2./3.,THRD4=4./3.,THRD5=5./3., delta_x=1.
real*8 :: n, dn, tau
real*8 :: s, tmpval, csum, unif, iunif, tauunif, itauunif, dunif_dn, ft
real*8, dimension(:) :: t(nt), f0(nconds), cs(nconds)
real*8, dimension(:,:):: t0(nconds, nt), f_t0(nconds)
real*8, dimension(:,:):: dc_dt(nconds,nt)
real*8, dimension(:,:):: dft0_dt0(nconds,nt), dft_dt(1,nt)
real*8, dimension(:):: df0_dt(nconds,nt)
real*8, dimension(:)::dt_dn(nt, nsize)
real*8, dimension(:,:,:):: dc_dt0(nconds, nconds, nt), dt0_dt(nconds, nt, nt)
real*8:: f
real*8, dimension(:,:):: df_dn(1, nsize)
df0_dt=0.
unif=(n+1.e-7)**THRD
iunif=1.0/unif
s=(dn**2.0+0.1**(56.0/3.0))**0.5/unif**4.
t(1)=tanh(s/1.0)
tauunif=2.871234000188191*unif**5.
itauunif=1.0/tauunif
t(2)=tanh((tau*itauunif-1.0)/1.0)
dunif_dn=THRD*(n+1.e-7)**(-THRD2)!correct
tmpval=(1.-t(1))*(1.+t(1))
dt_dn(1,1)=tmpval*(-4.)*s*iunif*dunif_dn
dt_dn(1,2)=0
dt_dn(1,3)=tmpval*0.5*s/(dn**2.0+0.1**(56.0/3.0))*2.0*dn
dt_dn(1,4)=0
tmpval=(1.-t(2))*(1.+t(2))
dt_dn(2,1)=tmpval*(-5.)*tau*itauunif*iunif*dunif_dn
dt_dn(2,2)=0
dt_dn(2,3)=0
dt_dn(2,4)=tmpval*1.0/tauunif
call NN_x(t, ft, dft_dt)
!uniform electron gas
t0(1,1)=0
t0(1,2)=0
f0(1)=1
!f vanish like s^(-1/2) as s goes to inf
t0(2,1)=1
t0(2,2)=t(2)
f0(2)=1
dt0_dt=0.
dt0_dt(2,2,2)=1.
do i =1,2
call NN_x(t0(i,:), f_t0(i), dft0_dt0(i,:))
end do
call construct_c(t, t0, cs, dc_dt, dc_dt0, delta_x)
csum=0.
f=0.
do i =1,2
f=f+(ft-f_t0(i)+f0(i))*cs(i)
csum=csum+cs(i)
end do
f=f/csum
!!!!!!backprop!!!!!!
call backpropagation(nconds, nt, f, ft, f_t0, f0, cs, csum, dft_dt, dft0_dt0, &
&df0_dt, dc_dt, dc_dt0, dt0_dt, dt_dn, df_dn)
call shifted_softplus1(f, df_dn)
!correct: dft_dn, dt0_dn
! write(*,*) f
! write(*,*) df_dn(1,1),df_dn(1,2),df_dn(1,3),df_dn(1,4) !TODO 値が合わないので修正
!memo: df_dt0_dt と df_dft_dt がほぼ同じ値なため桁落ちが起きる
! write(*,*) (df_dt0_dt(1,i), i=1,2)
! write(*,*) (dft0_dt(2,i), i=1,2)#これはあう
! write(*,*) dft_dt(1,1)*csum/cs(2),dft_dn(1,2)*csum/cs(2),dft_dn(1,3)*csum/cs(2),dft_dn(1,4)*csum/cs(2)
!write(*,*) dft0_dt0(1,1),dft0_dt0(1,2),dft0_dt0(2,1),dft0_dt0(2,2)!合格
! write(*,*) f_t0(1,1)
!write(*,*) dt0_dt(2,2,2)*dt_dn(2,1), dt0_dt(2,2,2)*dt_dn(2,2), dt0_dt(2,2,2)*dt_dn(2,3), dt0_dt(2,2,2)*dt_dn(2,4)
!write(*,*) dt_dn(1,1),dt_dn(1,2),dt_dn(1,3),dt_dn(1,4)
! write(*,*) dt_dn(2,1),dt_dn(2,2),dt_dn(2,3),dt_dn(2,4)
end subroutine calc_x
subroutine calc_c(nup, ndw, dn, tau, fc, dfc_dn)
use :: readNN
implicit none
real*8, PARAMETER :: PI =3.141592653589793238
integer i,j,k,l
integer, PARAMETER :: nconds=3, nt=4, nsize=4
real*8, PARAMETER :: THRD=1./3.,THRD2=2./3.,THRD4=4./3.,THRD5=5./3., delta_c=1.
real*8 :: nup, ndw, n, dn, tau, div, ft
real*8 :: s, tmpval, tmpval2, csum, unif, iunif, tauunif, itauunif, dunif_dn, tmp1, tmp2
real*8, dimension(:) :: t(nt), f0(nconds), cs(nconds), t_low(nt), t_low0(nt), tmp3(1,nt), tmp4(1,nt)
real*8 :: f_low, f_low0
real*8, dimension(:,:):: t0(nconds, nt), f_t0(nconds)
real*8, dimension(:,:):: dc_dt(nconds,nt), df_dft(1,1)
real*8, dimension(:,:):: dft0_dt0(nconds,nt), dft_dt(1,nt)
real*8, dimension(:):: df0_dt(nconds,nt)
real*8, dimension(:):: dfdt_low(1,nt), dfdt_low0(1,nt)
real*8, dimension(:)::dt_dn(nt, nsize), dt_low_dt(nt, nt), dt_low0_dt(nt, nt)
real*8, dimension(:,:,:):: dc_dt0(nconds, nconds, nt), dt0_dt(nconds, nt, nt)
real*8:: fc
real*8, dimension(:,:):: dfc_dn(1, nsize)
real*8, dimension(:,:):: dc_dn(nconds,nt)
df0_dt=0.
dt_low_dt=0.
dt_low0_dt=0.
dt0_dt=0.
n=nup+ndw
unif=(n+1.e-7)**THRD
iunif=1.0/unif
t(1)=tanh(unif/1.0)
div=1./(n+1.e-7)
t(2)=(1+(nup-ndw)*div)**THRD4+(1-(nup-ndw)*div)**THRD4
t(2)=tanh(t(2)*0.5/1.0)
s=(dn**2.0+0.1**(56.0/3.0))**0.5/unif**4.
t(3)=tanh(s/1.0)
tauunif=5.921762640653615*unif**5.
itauunif=1.0/tauunif
t(4)=tanh((tau*itauunif-1.0)/1.0)
dunif_dn=THRD*(n+1.e-7)**(-THRD2)!correct
tmpval=(1.-t(1))*(1.+t(1))
dt_dn(1,1)=tmpval*dunif_dn
dt_dn(1,2)=tmpval*dunif_dn
dt_dn(1,3)=0
dt_dn(1,4)=0
tmpval=(1.-t(2))*(1.+t(2))
tmpval2=div*div
tmp1=THRD4*(1+(nup-ndw)*div)**THRD
tmp2=THRD4*(1-(nup-ndw)*div)**THRD
dt_dn(2,1)=tmp1*((n+1.e-7)*1.-1.*(nup-ndw))*tmpval2+tmp2*((n+1.e-7)*(-1.)-1.*(ndw-nup))*tmpval2
dt_dn(2,1)=tmpval*dt_dn(2,1)*0.5
dt_dn(2,2)=tmp1*((n+1.e-7)*(-1.)-1.*(nup-ndw))*tmpval2+tmp2*((n+1.e-7)*1.-1.*(ndw-nup))*tmpval2
dt_dn(2,2)=tmpval*dt_dn(2,2)*0.5
dt_dn(2,3)=0
dt_dn(2,4)=0
tmpval=(1.-t(3))*(1.+t(3))
tmpval2=tmpval*(-4.)*s*iunif*dunif_dn
dt_dn(3,1)=tmpval2
dt_dn(3,2)=tmpval2
dt_dn(3,3)=tmpval*0.5*s/(dn**2.0+0.1**(56.0/3.0))*2.0*dn
dt_dn(3,4)=0
tmpval=(1.-t(4))*(1.+t(4))
tmpval2=tmpval*(-5.)*tau*itauunif*iunif*dunif_dn
dt_dn(4,1)=tmpval2
dt_dn(4,2)=tmpval2
dt_dn(4,3)=0
dt_dn(4,4)=tmpval*1.0*itauunif
call NN_c(t, ft, dft_dt)
!uniform electron gas
t0(1,1)=t(1)
t0(1,2)=t(2)
t0(1,3)=0
t0(1,4)=0
f0(1)=1.
!f vanish like s^(-1/2) as s goes to inf
t0(2,1)=0
t0(2,2)=t(2)
t0(2,3)=t(3)
t0(2,4)=t(4)
t_low(1)=t(1)
t_low(2)=0.
t_low(3)=t(3)
t_low(4)=t(4)
dt_low_dt(1,1)=1.
dt_low_dt(3,3)=1.
dt_low_dt(4,4)=1.
t_low0(1)=0.
t_low0(2)=0.
t_low0(3)=t(3)
t_low0(4)=t(4)
dt_low0_dt(3,3)=1.
dt_low0_dt(4,4)=1.
call NN_c(t_low, f_low, dfdt_low(1,:))
call NN_c(t_low0, f_low0, dfdt_low0(1,:))
f0(2)=f_low-f_low0+1.
tmp3=matmul(dfdt_low, dt_low_dt)
tmp4=matmul(dfdt_low0, dt_low0_dt)
do i=1,4
df0_dt(2,i)=tmp3(1,i)-tmp4(1,i)
end do
t0(3,1)=1.
t0(3,2)=t(2)
t0(3,3)=t(3)
t0(3,4)=t(4)
f0(3)=1.
dt0_dt(1,1,1)=1.
dt0_dt(1,2,2)=1.
dt0_dt(2,2,2)=1.
dt0_dt(2,3,3)=1.
dt0_dt(2,4,4)=1.
dt0_dt(3,2,2)=1.
dt0_dt(3,3,3)=1.
dt0_dt(3,4,4)=1.
do i =1,nconds
call NN_c(t0(i,:), f_t0(i), dft0_dt0(i,:))
end do
call construct_c(t, t0, cs, dc_dt, dc_dt0, delta_c)
fc=0.
csum=0.
do i =1,nconds
fc=fc+(ft-f_t0(i)+f0(i))*cs(i)
csum=csum+cs(i)
end do
fc=fc/csum
call backpropagation(nconds, nt, fc, ft, f_t0, f0, cs, csum, dft_dt, dft0_dt0, &
&df0_dt, dc_dt, dc_dt0, dt0_dt, dt_dn, dfc_dn)
call shifted_softplus1(fc, dfc_dn)
end subroutine calc_c
function vmul(x,y)
real*8 :: x(:),y(:)
real*8 :: vmul(size(x))
do i=1, size(y)
vmul(i)=x(i)*y(i)
end do
end function vmul
function vadd(x,y)
real*8 :: x(:),y(:)
real*8 :: vadd(size(x))
do i=1, size(y)
vadd(i)=x(i)+y(i)
end do
end function vadd
function shifted_softplus0(x)
real*8, PARAMETER :: l2 =0.6931471805599453
real*8 :: x(:)
real*8 :: shifted_softplus0(size(x))
do i=1, size(x)
shifted_softplus0(i)=1.0/l2*log(1.0+exp(2.0*l2*x(i)))
end do
end function shifted_softplus0
function back_shifted_softplus0(x)
real*8, PARAMETER :: l2 =0.6931471805599453
real*8 :: x(:), tmp
real*8 :: back_shifted_softplus0(size(x))
do i=1, size(x)
tmp=exp(2.0*l2*x(i))
back_shifted_softplus0(i)=2.0*tmp/(1.0+tmp)
end do
end function back_shifted_softplus0
subroutine shifted_softplus1(f, df_dn)
implicit none
real*8, PARAMETER :: l2 =0.6931471805599453
real*8 :: f, df_dn(:,:), tmp, tmp2
integer :: i
tmp=exp(2.0*l2*(f-1.0))
tmp2=2.0*tmp/(1.0+tmp)
f=1.0/l2*log(1.0+tmp)
do i=1, size(df_dn(1,:))
df_dn(1,i)=tmp2*df_dn(1,i)
end do
end subroutine shifted_softplus1
subroutine NN_x(t, ft, dft_dt)
use :: readNN
implicit none
real*8, dimension(:) :: t(2)
real*8, dimension(:,:) :: dft_dt(1,2)
real*8 ::ft
real*8, dimension(:) :: g1(hidden), h1(hidden), g2(hidden), h2(hidden), g3(hidden), h3(hidden), g4(1)
real*8, dimension(:):: dft_dh3(hidden), dft_dg3(hidden), dft_dh2(hidden),dft_dg2(hidden),dft_dh1(hidden),dft_dg1(hidden)
g1=vadd(matmul(t, transpose(w1)),b1)
h1=shifted_softplus0(g1)
g2=vadd(matmul(h1, transpose(w2)),b2)
h2=shifted_softplus0(g2)
g3=vadd(matmul(h2, transpose(w3)),b3)
h3=shifted_softplus0(g3)
g4=vadd(matmul(h3, transpose(w4)),b4)
ft=g4(1)
dft_dh3=w4(1,:) !matmul(dft_dg4,w4)
dft_dg3=vmul(back_shifted_softplus0(g3),dft_dh3)
dft_dh2=matmul(dft_dg3,w3)
dft_dg2=vmul(back_shifted_softplus0(g2),dft_dh2)
dft_dh1=matmul(dft_dg2,w2)
dft_dg1=vmul(back_shifted_softplus0(g1),dft_dh1)
dft_dt(1,:)=matmul(dft_dg1,w1)
end subroutine NN_x
subroutine NN_c(t, ft, dft_dt)
use :: readNN
implicit none
real*8, dimension(:) :: t(4)
real*8, dimension(:,:) :: dft_dt(1,4)
real*8 ::ft
real*8, dimension(:) :: g11(hidden), h11(hidden), g21(hidden), g12(hidden), h12(hidden)
real*8, dimension(:) :: g22(hidden), h21(hidden), h22(hidden), h2(hidden*2), g3(hidden), h3(hidden), g4(1)
real*8, dimension(:):: dft_dh3(hidden), dft_dg3(hidden), dft_dh2(hidden*2),dft_dg2(hidden),dft_dh1(hidden),dft_dg1(hidden)
g11=vadd(matmul(t(1:2), transpose(w1c)),b1c)
h11=shifted_softplus0(g11)
g21=vadd(matmul(h11, transpose(w2c)),b2c)
h21=shifted_softplus0(g21)
g12=vadd(matmul(t(3:4), transpose(w1)),b1)
h12=shifted_softplus0(g12)
g22=vadd(matmul(h12, transpose(w2)),b2)
h22=shifted_softplus0(g22)
h2(1:100)=h21
h2(101:200)=h22
g3=vadd(matmul(h2, transpose(w3c)),b3c)
h3=shifted_softplus0(g3)
g4=vadd(matmul(h3, transpose(w4c)),b4c)
ft=g4(1)
! if (back==1) then
dft_dh3=w4c(1,:) !matmul(dft_dg4,w4)
dft_dg3=vmul(back_shifted_softplus0(g3),dft_dh3)
dft_dh2=matmul(dft_dg3,w3c)
dft_dg2=vmul(back_shifted_softplus0(g21),dft_dh2(1:100))
dft_dh1=matmul(dft_dg2,w2c)
dft_dg1=vmul(back_shifted_softplus0(g11),dft_dh1)
dft_dt(1,1:2)=matmul(dft_dg1,w1c)
dft_dg2=vmul(back_shifted_softplus0(g22),dft_dh2(101:200))
dft_dh1=matmul(dft_dg2,w2)
dft_dg1=vmul(back_shifted_softplus0(g12),dft_dh1)
dft_dt(1,3:4)=matmul(dft_dg1,w1)
! end if
end subroutine NN_c
subroutine construct_c(t, t0, cs, dc_dt, dc_dt0, delta)
implicit none
real*8 :: t(:), t0(:,:), cs(:), dc_dt(:,:), dc_dt0(:,:,:), delta
real*8 :: delta2, tmpval, ci_dj, denomi2
integer :: i,j,k,l, st, st0, i2, j2
real*8, dimension(:) :: dis(size(t0(:,1))), numer(size(t0(:,1))), denomi(size(t0(:,1)))
real*8, dimension(:,:) :: dis_ij(size(t0(:,1)),size(t0(:,1)))
st=size(t)
st0=size(t0(:,1))
delta2=delta**2.
do i =1,st0
dis(i)=0.
do j =1,st
dis(i)=dis(i)+(t(j)-t0(i,j))**2.
end do
dis(i)=tanh(dis(i)/delta2)
end do
do i =1,st0
do j =i+1,st0
dis_ij(i,j)=0.
do k =1,st
dis_ij(i,j)=dis_ij(i,j)+(t0(i,k)-t0(j,k))**2.
end do
dis_ij(i,j)=tanh(dis_ij(i,j)/delta2)
dis_ij(j,i)=dis_ij(i,j)
end do
end do
do i =1,st0
denomi(i)=1.
numer(i)=1.
do j =1,st0
if (j==i) cycle
denomi(i)=denomi(i)*dis(j)
numer(i)=numer(i)*dis_ij(i,j)
end do
cs(i)=denomi(i)/numer(i)
end do
do i =1,st0
do j =1,st0
if (i==j) then
do k=1,st
tmpval=0.0
do l=1,st0
if (l==i) cycle
tmpval=tmpval-2.*(t0(i,k)-t0(l,k))*(1.-dis_ij(i,l))*(1.+dis_ij(i,l))/dis_ij(i,l)
end do
dc_dt0(i,j,k)=cs(i)*tmpval/delta2
end do
else
do k=1,st
denomi2=1.
do j2 =1,st0
if (j2==i .or. j2==j) cycle
denomi2=denomi2*dis(j2)
end do
ci_dj=denomi2/numer(i)
!ci_dj=c(i)/dis(j) implemented to avoid zero division
tmpval=-2.*(t0(j,k)-t0(i,k))*(1.-dis_ij(i,j))*(1.+dis_ij(i,j))*cs(i)/dis_ij(i,j)&
&+2.*(t0(j,k)-t(k))*(1.-dis(j))*(1.+dis(j))*ci_dj
dc_dt0(i,j,k)=tmpval/delta2
end do
end if
end do
end do
do i =1,st0
do j =1,st
tmpval=0
do k=1,st0
if (i==k) cycle
denomi2=1.
do j2 =1,st0
if (j2==i .or. j2==k) cycle
denomi2=denomi2*dis(j2)
end do
ci_dj=denomi2/numer(i)
!ci_dj=c(i)/dis(k) implemented to avoid zero division
tmpval=tmpval+2.*ci_dj*(1.-dis(k))*&
&(1.+dis(k))*(t(j)-t0(k,j))
end do
dc_dt(i,j)=tmpval/delta2
end do
end do
end subroutine construct_c
subroutine backpropagation(nconds, nt, f, ft, f_t0, f0, cs, csum, dft_dt, dft0_dt0, &
&df0_dt, dc_dt, dc_dt0, dt0_dt, dt_dn, df_dn)
implicit none
integer i,j,k,l
integer:: nconds, nt
real*8 :: f, ft, f_t0(:), f0(:), csum, cs(:), dft_dt(:,:), dft0_dt0(:,:), df0_dt(:,:), dc_dt(:,:), dc_dt0(:,:,:)
real*8 :: dt_dn(:,:), df_dn(:,:), dt0_dt(:,:,:)
real*8, PARAMETER :: THRD=1./3.,THRD2=2./3.,THRD4=4./3.,THRD5=5./3., delta_c=1.
real*8 :: df_dft(1,1), df_dft0(1,nconds),df_df0(1,nconds),df_dc(1,nconds)
real*8 :: df_df0_dt(1,nt), df_dt0(1,nconds,nt), df_dft_dt(1,nt), df_dc_dt(1,nt)
real*8 :: df_dt0_dt(1,nt), df_dt(1,nt)
df_dft(1,1)=1.
do i =1,nconds
df_dft0(1,i)=-cs(i)/csum
df_df0(1,i)=cs(i)/csum
df_dc(1,i)=(ft-f_t0(i)+f0(i))/csum-f/csum
end do
df_df0_dt=matmul(df_df0, df0_dt)
do j=1,nt
do i=1,nconds
df_dt0(1,i,j)=df_dft0(1,i)*dft0_dt0(i,j)
end do
end do
df_dft_dt=matmul(df_dft, dft_dt)
do i=1,nconds
do k=1,nconds
do j=1,nt
do l=1,nt
dc_dt(i,j)=dc_dt(i,j)+dc_dt0(i,k,l)*dt0_dt(k,l,j)
end do
end do
end do
end do
df_dc_dt=matmul(df_dc,dc_dt)
df_dt0_dt=0.
do i=1,nt
do k=1,nt
do j=1,nconds
df_dt0_dt(1,i)=df_dt0_dt(1,i)+df_dt0(1,j,k)*dt0_dt(j,k,i)
end do
end do
end do
do i=1,nt
df_dt(1,i)=df_dt0_dt(1,i)+df_dft_dt(1,i)+df_dc_dt(1,i)+ df_df0_dt(1,i)
end do
df_dn=matmul(df_dt,dt_dn)
end subroutine backpropagation
"""
p5 = """ USE readNN"""
p6 = """ REAL(q) Exc_NNM"""
if __name__ == '__main__':
patch()
|
class Error(Exception):
pass
class WrappedError(Error):
BASE_MESSAGE = 'WrappedError'
def __init__(self, *args, origin_error=None, **kwargs):
self._origin_error = origin_error
if self._origin_error:
message = self.BASE_MESSAGE + ': ' + str(self._origin_error)
else:
message = self.BASE_MESSAGE
super().__init__(message, *args, **kwargs)
class ConnectError(WrappedError):
'''
errors when connect to database
'''
BASE_MESSAGE = 'ConnectError'
class UnexpectedError(WrappedError):
'''
uncategorized errors
'''
BASE_MESSAGE = 'UnexpectedError'
class OperationFailure(WrappedError):
'''
errors like timeout, mysql gone away, retriable
'''
BASE_MESSAGE = 'OperationFailure'
class ProgrammingError(WrappedError):
BASE_MESSAGE = 'ProgrammingError'
class DuplicateKeyError(Error):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 09:12:46 2019
@author: Brandon
"""
class nhcOutlooks():
def __init__(self, name, year, advisory_num):
self.name = name
self.yr = int(year)
self.adv_n = str(advisory_num)
self._check_inputs()
def _import_hurricane_info():
# return hurrs[self.yr]
# for now, assume year is 2019.
return {
"Andrea" : ["al01", "Atlantic"],
"Barry" : ["al02", "Atlantic"],
"Three" : ["al03", "Atlantic"],
"Chantal" : ["al04", "Atlantic"],
"Dorian" : ["al05", "Atlantic"],
"Erin" : ["al06", "Atlantic"],
"Fernand" : ["al07", "Atlantic"],
"Gabrielle" : ["al08", "Atlantic"],
"Humberto" : ["al09", "Atlantic"],
"Jerry" : ["al10", "Atlantic"],
"Imelda" : ["al11", "Atlantic"],
"Karen" : ["al12", "Atlantic"],
"Lorenzo" : ["al13", "Atlantic"],
"Alvin" : ["ep01", "East Pacific"],
"Barbara" : ["ep02", "East Pacific"],
"Cosme" : ["ep03", "East Pacific"],
"Four-e" : ["ep04", "East Pacific"],
"Dalila" : ["ep05", "East Pacific"],
"Erick" : ["ep06", "East Pacific"],
"Flossie" : ["ep07", "East Pacific"],
"Gil" : ["ep08", "East Pacific"],
"Henriette" : ["ep09", "East Pacific"],
"Ivo" : ["ep10", "East Pacific"],
"Juliette" : ["ep11", "East Pacific"],
"Akoni" : ["ep12", "East Pacific"],
"Kiko" : ["ep13", "East Pacific"],
"Mario" : ["ep14", "East Pacific"],
"Lorena" : ["ep15", "East Pacific"],
"Narda" : ["ep16", "East Pacific"]
}
# import the hurricane info; for now, hardcoded
def _check_inputs(self):
# checks to see if parameters are correct
if not 2008 <= self.yr <= 2019:
raise ValueError("Year must be between 2008 and current year")
if self.name not in self._import_hurricane_info():
raise ValueError("Cyclone name not in the specified year. Double check spelling and year.")
|
def convert_iso_to_json(dateString):
tempDateObject = dateString.split("-")
return {
"year": tempDateObject[0],
"month": tempDateObject[1],
"day": tempDateObject[2]
}
def convert_json_to_iso(json):
return "{0}-{1}-{2}".format(int(json['year']), int(json['month']), int(json['day'])) |
#
# Copyright © 2022 Christos Pavlatos, George Rassias, Christos Andrikos,
# Evangelos Makris, Aggelos Kolaitis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the “Software”), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
def string_align(str1, str2):
"""Given two strings it aligns them in
a custom way to minimize holes and
hole lengths
"""
# length align the two strings
common_length()
align1 = []
align2 = []
for i, j, k in enumerate(zip(str1, str2)):
if i == j:
align1.append(i)
align2.append(j)
else:
next_candidates = [
(str1[k:], str2[k + 1 :]),
(str1[k + 1 :], str2[k:]),
(str1[k + 1 :], str2[k + 1 :]),
]
partial_alignments = [
string_align(*substrings) for substrings in next_candidates
]
return (
"".join(align1) + partial_alignments[0][1],
"".join(align2) + partial_alignments[0][1],
)
# split the string into segments
|
def define_env(env):
env.variables["SHELLCHECK"] = "[ShellCheck](https://github.com/koalaman/shellcheck)"
env.variables["BLACK"] = "[black](https://black.readthedocs.io/)"
env.variables["ISORT"] = "[isort](https://pycqa.github.io/isort/)"
env.variables["FLAKE8"] = "[flake8](https://flake8.pycqa.org/)"
env.variables["PYLINT"] = "[pylint](https://pylint.pycqa.org/)"
env.variables["MYPY"] = "[mypy](https://mypy.readthedocs.io/)"
env.variables["USE_COMMON_LOC"] = "10_common.md"
env.variables["REF_COMMON_LOC"] = "common_makefile.md"
env.variables["REF_SHELL_LOC"] = "shell_makefile.md"
env.variables["REF_PYTHON_LOC"] = "python_makefile.md"
@env.macro
def override_env_macro(target):
return f"""
??? question "Comment overrider ce processus ?"
Vous avez plusieurs points d'extension :
- `before_{target}::` : permet d'exécuter des commandes **AVANT** l'instanciation du `{target}`
- `before_remove_{target}::` : permet d'exécuter des commandes **AVANT** la suppression du `{target}`
- `custom_remove_{target}::` : permet d'exécuter des commandes **APRES** la suppression du `{target}`
Vous pouvez également ajouter des pré-requis via la variable `{target.upper()}_PREREQ` (utilisez `+=` pour ajouter des pré-requis).
"""
@env.macro
def override_phony_macro(target, what):
return f"""
??? question "Comment overrider ce processus ?"
Vous avez plusieurs points d'extension :
- `before_{target}::` : permet d'exécuter des commandes **AVANT** l'appel réel des {what}.
- `custom_{target}::` : permet de définir et exécuter vos propres {what} (qui seront exécutées automatiquement **APRES** les {what} standards).
"""
|
#
# PySNMP MIB module WWP-LEOS-USER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WWP-LEOS-USER-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:38:33 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Counter64, iso, ModuleIdentity, Bits, TimeTicks, IpAddress, MibIdentifier, Integer32, Unsigned32, Gauge32, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Counter64", "iso", "ModuleIdentity", "Bits", "TimeTicks", "IpAddress", "MibIdentifier", "Integer32", "Unsigned32", "Gauge32", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
TextualConvention, TruthValue, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "TruthValue", "RowStatus", "DisplayString")
wwpModulesLeos, = mibBuilder.importSymbols("WWP-SMI", "wwpModulesLeos")
wwpLeosUserMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39))
wwpLeosUserMIB.setRevisions(('2012-07-11 00:00', '2012-06-27 00:00', '2011-07-06 00:00', '2007-03-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: wwpLeosUserMIB.setRevisionsDescriptions(('Changed the definitions of the wwpLeosUserPrivLevel values to match those used internally and at the CLI.', 'Corrected string lengths.', ' Added a new object wwpLeosUserAuthProviderScope.', 'Initial creation.',))
if mibBuilder.loadTexts: wwpLeosUserMIB.setLastUpdated('201207110000Z')
if mibBuilder.loadTexts: wwpLeosUserMIB.setOrganization('Ciena, Inc')
if mibBuilder.loadTexts: wwpLeosUserMIB.setContactInfo(' Mib Meister 115 North Sullivan Road Spokane Valley, WA 99037 USA Phone: +1 509 242 9000 Email: support@ciena.com')
if mibBuilder.loadTexts: wwpLeosUserMIB.setDescription('This MIB module defines the generic managed objects for User Information on WWP devices.')
wwpLeosUserMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1))
wwpLeosUser = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1))
wwpLeosUserMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 2))
wwpLeosUserMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 2, 0))
wwpLeosUserMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 3))
wwpLeosUserMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 3, 1))
wwpLeosUserMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 3, 2))
wwpLeosUserAuthProviderTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1), )
if mibBuilder.loadTexts: wwpLeosUserAuthProviderTable.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderTable.setDescription('Table of UserAuth Providers.')
wwpLeosUserAuthProviderEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1), ).setIndexNames((0, "WWP-LEOS-USER-MIB", "wwpLeosUserAuthProviderPriority"))
if mibBuilder.loadTexts: wwpLeosUserAuthProviderEntry.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderEntry.setDescription('An entry for each User Authorization Provider.')
wwpLeosUserAuthProviderPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2)))
if mibBuilder.loadTexts: wwpLeosUserAuthProviderPriority.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderPriority.setDescription('The priority of this user authentication provider.')
wwpLeosUserAuthProviderType = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("local", 2), ("radius", 3), ("tacacs", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserAuthProviderType.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderType.setDescription("The type/method of this user authentication provider. At least one entry must be a provider other than 'none' and any given provider may not be used twice. When a provider is changed to 'none', lower priority providers will have their priority increased to close the gap.")
wwpLeosUserAuthProviderCalled = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserAuthProviderCalled.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderCalled.setDescription('The number of calls to this user authentication provider. The counter is cleared automatically when AuthProviderType is changed or may be cleared manually.')
wwpLeosUserAuthProviderSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserAuthProviderSuccess.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderSuccess.setDescription('The number of times this user authentication provider returned a Success response. The counter is cleared automatically when AuthProviderType is changed or may be cleared manually.')
wwpLeosUserAuthProviderFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserAuthProviderFailure.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderFailure.setDescription('The number of times this user authentication provider returned a Failure response. The counter is cleared automatically when AuthProviderType is changed or may be cleared manually.')
wwpLeosUserAuthProviderSkipped = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 6), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserAuthProviderSkipped.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderSkipped.setDescription('The number of times this user authentication provider returned a Skip Me response. The counter is cleared automatically when AuthProviderType is changed or may be cleared manually.')
wwpLeosUserAuthProviderScope = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("none", 0), ("serial", 1), ("remote", 2), ("all", 3))).clone('all')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserAuthProviderScope.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserAuthProviderScope.setDescription('The scope to be used for each authentication method.')
wwpLeosUserWhoTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2), )
if mibBuilder.loadTexts: wwpLeosUserWhoTable.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoTable.setDescription('Table of logged in users.')
wwpLeosUserWhoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2, 1), ).setIndexNames((0, "WWP-LEOS-USER-MIB", "wwpLeosUserWhoPid"))
if mibBuilder.loadTexts: wwpLeosUserWhoEntry.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoEntry.setDescription('An entry for each logged in user.')
wwpLeosUserWhoPid = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: wwpLeosUserWhoPid.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoPid.setDescription('The pid of the users shell process.')
wwpLeosUserWhoUser = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosUserWhoUser.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoUser.setDescription('The username used during login authentication.')
wwpLeosUserWhoTerminal = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosUserWhoTerminal.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoTerminal.setDescription('The terminal the user logged in from.')
wwpLeosUserWhoIdleTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosUserWhoIdleTime.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoIdleTime.setDescription('The users idle time in minutes. This counter is reset to zero when ever the shell process detects input from the user.')
wwpLeosUserWhoStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 2, 1, 5), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosUserWhoStatus.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserWhoStatus.setDescription("Status of the users shell process. To kill a users shell, set this object to 'Destroy'.")
wwpLeosUserTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3), )
if mibBuilder.loadTexts: wwpLeosUserTable.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserTable.setDescription('Table of locally configured users.')
wwpLeosUserEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1), ).setIndexNames((0, "WWP-LEOS-USER-MIB", "wwpLeosUserUid"))
if mibBuilder.loadTexts: wwpLeosUserEntry.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserEntry.setDescription('An entry for each user in the local password file.')
wwpLeosUserUid = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 1), Unsigned32())
if mibBuilder.loadTexts: wwpLeosUserUid.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserUid.setDescription('The numeric userid of the user. These numbers are generated by the device in order to making indexing the table easy, but they are not tied to specific user names during a reboot. When a new user is created, the userid must be an unused value.')
wwpLeosUserName = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosUserName.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserName.setDescription('The name of the user.')
wwpLeosUserPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 34))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosUserPassword.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserPassword.setDescription('The users password in encrypted form. When setting this object you must set wwpLeosUserIsEncrypted at the same time in order to specify whether the password you are setting needs to be encrypted by the device or whether you have already encrypted it.')
wwpLeosUserPrivLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 0), ("limited", 1), ("admin", 2), ("super", 3), ("diag", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosUserPrivLevel.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserPrivLevel.setDescription('The privilege level of the user.')
wwpLeosUserIsDefault = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosUserIsDefault.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserIsDefault.setDescription('When this is set to True, the user is one of the default users created in the device at boot time.')
wwpLeosUserIsEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 6), TruthValue()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosUserIsEncrypted.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserIsEncrypted.setDescription('This will always be True on a Get as the password is always stored locally on the device in encrypted form. During a Set, it is False if you are sending wwpLeosUserPassword in the clear so the device can encrypt it, or True if wwpLeosUserPassword is already in encrypted MD5 form.')
wwpLeosUserIsModified = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosUserIsModified.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserIsModified.setDescription('When this is set to True, the user is one of the default users created in the device, but one or more properties of the user account has been altered from the default values.')
wwpLeosUserStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 39, 1, 1, 3, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosUserStatus.setStatus('current')
if mibBuilder.loadTexts: wwpLeosUserStatus.setDescription('Use CreateAndGo to create a new user, Destroy to remove a user.')
mibBuilder.exportSymbols("WWP-LEOS-USER-MIB", wwpLeosUserWhoEntry=wwpLeosUserWhoEntry, wwpLeosUserPrivLevel=wwpLeosUserPrivLevel, wwpLeosUserMIBCompliances=wwpLeosUserMIBCompliances, wwpLeosUserTable=wwpLeosUserTable, wwpLeosUserIsModified=wwpLeosUserIsModified, wwpLeosUserAuthProviderEntry=wwpLeosUserAuthProviderEntry, wwpLeosUserMIBGroups=wwpLeosUserMIBGroups, wwpLeosUserMIBConformance=wwpLeosUserMIBConformance, wwpLeosUserMIBObjects=wwpLeosUserMIBObjects, wwpLeosUserWhoPid=wwpLeosUserWhoPid, wwpLeosUserUid=wwpLeosUserUid, wwpLeosUserStatus=wwpLeosUserStatus, wwpLeosUserWhoUser=wwpLeosUserWhoUser, wwpLeosUserAuthProviderTable=wwpLeosUserAuthProviderTable, wwpLeosUserMIB=wwpLeosUserMIB, wwpLeosUserName=wwpLeosUserName, wwpLeosUserAuthProviderSuccess=wwpLeosUserAuthProviderSuccess, wwpLeosUserAuthProviderCalled=wwpLeosUserAuthProviderCalled, wwpLeosUserAuthProviderPriority=wwpLeosUserAuthProviderPriority, wwpLeosUserIsDefault=wwpLeosUserIsDefault, wwpLeosUser=wwpLeosUser, wwpLeosUserWhoIdleTime=wwpLeosUserWhoIdleTime, wwpLeosUserPassword=wwpLeosUserPassword, wwpLeosUserWhoTerminal=wwpLeosUserWhoTerminal, wwpLeosUserMIBNotifications=wwpLeosUserMIBNotifications, wwpLeosUserWhoStatus=wwpLeosUserWhoStatus, wwpLeosUserAuthProviderScope=wwpLeosUserAuthProviderScope, wwpLeosUserEntry=wwpLeosUserEntry, wwpLeosUserAuthProviderFailure=wwpLeosUserAuthProviderFailure, wwpLeosUserAuthProviderType=wwpLeosUserAuthProviderType, wwpLeosUserAuthProviderSkipped=wwpLeosUserAuthProviderSkipped, PYSNMP_MODULE_ID=wwpLeosUserMIB, wwpLeosUserIsEncrypted=wwpLeosUserIsEncrypted, wwpLeosUserMIBNotificationPrefix=wwpLeosUserMIBNotificationPrefix, wwpLeosUserWhoTable=wwpLeosUserWhoTable)
|
print('{:-^30}'.format(' Conversor de Temperatura '))
cel = float(input('Digite a temperatura em Celsius: '))
#formula para conversão: F=1,8C+32
far = ((1.8 * cel) + 32)
print(f'\nA temperatura de {cel:.2f}°C equivale a {far:.2f}°F')
|
REGISTERED_PLATFORMS = {}
def register_class(cls):
REGISTERED_PLATFORMS[cls.BASENAME] = cls
return cls
|
# 064_Tratando_varios_valores_v1.py
num = j = soma = 0
num = int(input("Entre com um número: "))
while (num != 9999):
soma += num
j += 1
num = int(input("Entre com um número: "))
print("Fim")
print(f"Você digitou {j} e a soma foi {soma}") |
# 7. Световен рекорд по плуване
# Иван решава да подобри Световния рекорд по плуване на дълги разстояния. На конзолата се въвежда рекордът в секунди,
# който Иван трябва да подобри, разстоянието в метри, което трябва да преплува и времето в секунди, за което плува разстояние от 1 м.
# Да се напише програма, която изчислява дали се е справил със задачата, като се има предвид, че:
# съпротивлението на водата го забавя на всеки 15 м. с 12.5 секунди. Когато се изчислява колко пъти Иванчо ще се забави,
# в резултат на съпротивлението на водата, резултатът трябва да се закръгли надолу до най-близкото цяло число.
# Да се изчисли времето в секунди, за което Иванчо ще преплува разстоянието и разликата спрямо Световния рекорд.
record_seconds = float(input())
distance = float(input())
seconds_for_each_meter = float(input())
time = distance * seconds_for_each_meter
count_slowdown_times = distance // 15
time += count_slowdown_times * 12.5
if time < record_seconds:
print(f'Yes, he succeeded! The new world record is {time:.2f} seconds.')
else:
print(f'No, he failed! He was {time - record_seconds:.2f} seconds slower.')
|
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=49):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
if __name__ == '__main__':
henrique = Pessoa(nome='Henrique')
gerarda = Pessoa(henrique, nome='Gerarda')
print(Pessoa.cumprimentar(gerarda))
print(id(gerarda))
print(gerarda.cumprimentar())
print(gerarda.nome)
print(gerarda.idade)
for filho in gerarda.filhos:
print(filho.nome)
gerarda.sobrenome = 'Correia'
del gerarda.filhos
gerarda.olhos = 1
del gerarda.olhos
print(gerarda.__dict__)
print(henrique.__dict__)
Pessoa.olhos = 3
print(gerarda.olhos)
print(henrique.olhos)
print(Pessoa.olhos)
print(id(Pessoa.olhos), id(gerarda.olhos), id(henrique.olhos)) |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
global counter, res
counter = 0
res = 0
def dfs(root, k):
if not root:
# 如果是空,直接退出
return
dfs(root.left, k)
global counter, res
counter += 1
if counter == k:
res = root.val
dfs(root.right, k)
dfs(root, k)
return res
|
# sito eratostenesa
def sito(num):
x = 2
tab = [False, False] + [True for i in range(x, num+1)]
while x * x <= num:
if tab[x]:
for i in range(x*x, num+1, x):
tab[i] = False
x += 1
else:
x += 1
return tab
if __name__ == '__main__':
num = int(input('podaj liczbe: '))
tab = sito(num)
for x in range(len(tab)):
print(x, tab[x])
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"@bazel_tools//tools/build_defs/repo:git.bzl",
"git_repository",
)
load(
"@bazel_gazelle//internal:go_repository.bzl",
_go_repository = "go_repository",
)
load(
"@bazel_gazelle//internal:go_repository_cache.bzl",
"go_repository_cache",
)
load(
"@bazel_gazelle//internal:go_repository_tools.bzl",
"go_repository_tools",
)
load(
"@bazel_gazelle//internal:go_repository_config.bzl",
"go_repository_config",
)
# Re-export go_repository . Users should get it from this file.
go_repository = _go_repository
def gazelle_dependencies(
go_sdk = "",
go_repository_default_config = "@//:WORKSPACE",
go_env = {}):
_maybe(
git_repository,
name = "bazel_skylib",
commit = "df3c9e2735f02a7fe8cd80db4db00fec8e13d25f", # `master` as of 2021-08-19
remote = "https://github.com/bazelbuild/bazel-skylib",
)
if go_sdk:
go_repository_cache(
name = "bazel_gazelle_go_repository_cache",
go_sdk_name = go_sdk,
go_env = go_env,
)
else:
go_sdk_info = {}
for name, r in native.existing_rules().items():
# match internal rule names but don't reference them directly.
# New rules may be added in the future, and they might be
# renamed (_go_download_sdk => go_download_sdk).
if name != "go_sdk" and ("go_" not in r["kind"] or "_sdk" not in r["kind"]):
continue
if r.get("goos", "") and r.get("goarch", ""):
platform = r["goos"] + "_" + r["goarch"]
else:
platform = "host"
go_sdk_info[name] = platform
go_repository_cache(
name = "bazel_gazelle_go_repository_cache",
go_sdk_info = go_sdk_info,
go_env = go_env,
)
go_repository_tools(
name = "bazel_gazelle_go_repository_tools",
go_cache = "@bazel_gazelle_go_repository_cache//:go.env",
)
go_repository_config(
name = "bazel_gazelle_go_repository_config",
config = go_repository_default_config,
)
_maybe(
go_repository,
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=",
version = "v0.0.0-20190523083050-ea95bdfd59fc",
)
_maybe(
go_repository,
name = "com_github_bazelbuild_buildtools",
importpath = "github.com/bazelbuild/buildtools",
sum = "h1:VMFMISXa1RypQNG0j4KVCbsUcrxFudkY/IvWzEJCyO8=",
version = "v0.0.0-20211007154642-8dd79e56e98e",
build_naming_convention = "go_default_library",
)
_maybe(
go_repository,
name = "com_github_bazelbuild_rules_go",
importpath = "github.com/bazelbuild/rules_go",
sum = "h1:SfxjyO/V68rVnzOHop92fB0gv/Aa75KNLAN0PMqXbIw=",
version = "v0.29.0",
)
_maybe(
go_repository,
name = "com_github_bmatcuk_doublestar",
importpath = "github.com/bmatcuk/doublestar",
sum = "h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=",
version = "v1.3.4",
)
_maybe(
go_repository,
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
_maybe(
go_repository,
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
_maybe(
go_repository,
name = "com_github_chzyer_logex",
importpath = "github.com/chzyer/logex",
sum = "h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=",
version = "v1.1.10",
)
_maybe(
go_repository,
name = "com_github_chzyer_readline",
importpath = "github.com/chzyer/readline",
sum = "h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=",
version = "v0.0.0-20180603132655-2972be24d48e",
)
_maybe(
go_repository,
name = "com_github_chzyer_test",
importpath = "github.com/chzyer/test",
sum = "h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=",
version = "v0.0.0-20180213035817-a1ea475d72b1",
)
_maybe(
go_repository,
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
_maybe(
go_repository,
name = "com_github_davecgh_go_spew",
importpath = "github.com/davecgh/go-spew",
sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=",
version = "v1.1.1",
)
_maybe(
go_repository,
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=",
version = "v0.9.1-0.20191026205805-5f8ba28d4473",
)
_maybe(
go_repository,
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
_maybe(
go_repository,
name = "com_github_fsnotify_fsnotify",
importpath = "github.com/fsnotify/fsnotify",
sum = "h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=",
version = "v1.5.1",
)
_maybe(
go_repository,
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
_maybe(
go_repository,
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=",
version = "v1.1.1",
)
_maybe(
go_repository,
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=",
version = "v1.4.3",
)
_maybe(
go_repository,
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
sum = "h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=",
version = "v0.5.6",
)
_maybe(
go_repository,
name = "com_github_pelletier_go_toml",
importpath = "github.com/pelletier/go-toml",
sum = "h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=",
version = "v1.9.4",
)
_maybe(
go_repository,
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
_maybe(
go_repository,
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
version = "v0.0.0-20190812154241-14fe0d1b01d4",
)
_maybe(
go_repository,
name = "com_github_yuin_goldmark",
importpath = "github.com/yuin/goldmark",
sum = "h1:OtISOGfH6sOWa1/qXqqAiOIAO6Z5J3AEAE18WAq6BiQ=",
version = "v1.4.0",
)
_maybe(
go_repository,
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
sum = "h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=",
version = "v0.26.0",
)
_maybe(
go_repository,
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=",
version = "v0.0.0-20161208181325-20d25e280405",
)
_maybe(
go_repository,
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
sum = "h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=",
version = "v2.2.2",
)
_maybe(
go_repository,
name = "net_starlark_go",
importpath = "go.starlark.net",
sum = "h1:xwwDQW5We85NaTk2APgoN9202w/l0DVGp+GZMfsrh7s=",
version = "v0.0.0-20210223155950-e043a3d3c984",
)
_maybe(
go_repository,
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=",
version = "v1.4.0",
)
_maybe(
go_repository,
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
sum = "h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=",
version = "v0.0.0-20200526211855-cb27e3aa2013",
)
_maybe(
go_repository,
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
sum = "h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg=",
version = "v1.27.0",
)
_maybe(
go_repository,
name = "org_golang_google_protobuf",
importpath = "google.golang.org/protobuf",
sum = "h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=",
version = "v1.25.0",
)
_maybe(
go_repository,
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=",
version = "v0.0.0-20191011191535-87dc89f01550",
)
_maybe(
go_repository,
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=",
version = "v0.0.0-20190121172915-509febef88a4",
)
_maybe(
go_repository,
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
sum = "h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=",
version = "v0.0.0-20190313153728-d0100b6bd8b3",
)
_maybe(
go_repository,
name = "org_golang_x_mod",
importpath = "golang.org/x/mod",
sum = "h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=",
version = "v0.5.1",
)
_maybe(
go_repository,
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=",
version = "v0.0.0-20210805182204-aaa1db679c0d",
)
_maybe(
go_repository,
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
sum = "h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=",
version = "v0.0.0-20180821212333-d2e6202438be",
)
_maybe(
go_repository,
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
sum = "h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=",
version = "v0.0.0-20210220032951-036812b2e83c",
)
_maybe(
go_repository,
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=",
version = "v0.0.0-20211007075335-d3039528d8ac",
)
_maybe(
go_repository,
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=",
version = "v0.3.6",
)
_maybe(
go_repository,
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
sum = "h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=",
version = "v0.1.7",
)
_maybe(
go_repository,
name = "org_golang_x_xerrors",
importpath = "golang.org/x/xerrors",
sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=",
version = "v0.0.0-20200804184101-5ec99f83aff1",
)
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
|
def user_information_helper(data) -> dict:
return {
"id": str(data["_id"]),
"name": data["timePredict"],
"email": data["email"],
"phoneNumber": data["phoneNumber"],
"weight": data["weight"],
"createAt": str(data["createAt"]),
"updateAt": str(data["updateAt"]),
}
def user_helper(data) -> dict:
return {
"id": str(data["_id"]),
"username": data["username"],
"password": data["password"],
"hashed_password": data["password"],
"role": data["role"],
"createAt": str(data["createAt"]),
"updateAt": str(data["updateAt"]),
}
|
def odd_and_even_sums(numbers):
even_sum = 0
odd_sum = 0
for digit in numbers:
if int(digit) % 2 == 0:
even_sum += int(digit)
else:
odd_sum += int(digit)
return f"Odd sum = {odd_sum}, Even sum = {even_sum}"
numbers = input()
answer = odd_and_even_sums(numbers)
print(answer) |
def selection(x):
n = len(x)
for i in range(0, n-1):
for j in range(i+1, n):
if x[i]>x[j]:
x[i], x[j] = x[j], x[i]
print('''{} - {} - {}'''.format(i, j, x))
return x
x = [2, 7, 8, 1, 3, 6]
print(selection(x)) |
"""
spider.supervised_learning sub-package
__init__.py
@author: zeyu sun
Supervised Learning primitives
difines the model index
"""
__all__ = [
"OWLRegression",
]
|
class Solution:
def countUnivalSubtrees(self, root: Optional[TreeNode]) -> int:
ans = 0
def isUnival(root: Optional[TreeNode], val: int) -> bool:
nonlocal ans
if not root:
return True
if isUnival(root.left, root.val) & isUnival(root.right, root.val):
ans += 1
return root.val == val
return False
isUnival(root, math.inf)
return ans
|
def subsitute_object(file, obj) -> str:
"""Subsitute fields of dictionatry into a copy of a file.
This function will seek out all the keys in the form of {key}
within the file, then it will subsitute the value into them.
Returns a string, does not create nor edit files
"""
template_file = open(file)
template = template_file.read()
template_file.close()
for field in obj.keys():
if template.find('{' + field + '}') == -1:
continue
# Strings are immutable
template = template.replace('{' + field + '}', obj[field])
return template |
POSITION_IMAGE_LEFT = 'image-left'
POSITION_IMAGE_RIGHT = 'image-right'
POSITION_CHOICES = (
(POSITION_IMAGE_LEFT, 'Image Left'),
(POSITION_IMAGE_RIGHT, 'Image Right'),
)
|
"""
14928. 큰 수 (BIG)
작성자: xCrypt0r
언어: Python 3
사용 메모리: 31,336 KB
소요 시간: 6,280 ms
해결 날짜: 2020년 9월 13일
"""
def main():
print(int(input()) % 20000303)
if __name__ == '__main__':
main()
|
# coding: utf-8
class Item:
def __init__(self, name, price):
self.name = name
self.price = price
im = Item('鼠标', 28.9)
print(im.__dict__) # ①
# 通过__dict__访问name属性
print(im.__dict__['name'])
# 通过__dict__访问price属性
print(im.__dict__['price'])
im.__dict__['name'] = '键盘'
im.__dict__['price'] = 32.8
print(im.name) # 键盘
print(im.price) # 32.8
|
def setup():
size(1000, 500)
smooth()
strokeWeight(30)
stroke(100)
def draw():
background(0)
line(frameCount, 300,100 + frameCount,400)
line(100 + frameCount, 300, frameCount, 400)
|
# -*- encoding=utf-8 -*-
# Copyright 2016 David Cary; licensed under the Apache License, Version 2.0
"""
Unit tests
"""
|
class Customer:
def __init__(self, name, age, phone_no):
self.name = name
self.age = age
self.phone_no = phone_no
def purchase(self, payment):
if payment.type == "card":
print("Paying by card")
elif payment.type == "e-wallet":
print("Paying by wallet")
else:
print("Paying by cash")
class Payment:
def __init__(self, type):
self.type = type
payment1 = Payment("card")
c = Customer("Jack", 23, 1234)
c.purchase(payment1)
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Event beans for Pelix.
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
class BundleEvent(object):
"""
Represents a bundle event
"""
__slots__ = ("__bundle", "__kind")
INSTALLED = 1
"""The bundle has been installed."""
STARTED = 2
"""The bundle has been started."""
STARTING = 128
"""The bundle is about to be activated."""
STOPPED = 4
"""
The bundle has been stopped. All of its services have been unregistered.
"""
STOPPING = 256
"""The bundle is about to deactivated."""
STOPPING_PRECLEAN = 512
"""
The bundle has been deactivated, but some of its services may still remain.
"""
UNINSTALLED = 16
"""The bundle has been uninstalled."""
UPDATED = 8
"""The bundle has been updated. (called after STARTED) """
UPDATE_BEGIN = 32
""" The bundle will be updated (called before STOPPING) """
UPDATE_FAILED = 64
""" The bundle update has failed. The bundle might be in RESOLVED state """
def __init__(self, kind, bundle):
"""
Sets up the event
"""
self.__kind = kind
self.__bundle = bundle
def __str__(self):
"""
String representation
"""
return "BundleEvent({0}, {1})".format(self.__kind, self.__bundle)
def get_bundle(self):
"""
Retrieves the modified bundle
"""
return self.__bundle
def get_kind(self):
"""
Retrieves the kind of event
"""
return self.__kind
# ------------------------------------------------------------------------------
class ServiceEvent(object):
"""
Represents a service event
"""
__slots__ = ("__kind", "__reference", "__previous_properties")
REGISTERED = 1
""" This service has been registered """
MODIFIED = 2
""" The properties of a registered service have been modified """
UNREGISTERING = 4
""" This service is in the process of being unregistered """
MODIFIED_ENDMATCH = 8
"""
The properties of a registered service have been modified and the new
properties no longer match the listener's filter
"""
def __init__(self, kind, reference, previous_properties=None):
"""
Sets up the event
:param kind: Kind of event
:param reference: Reference to the modified service
:param previous_properties: Previous service properties (for MODIFIED
and MODIFIED_ENDMATCH events)
"""
self.__kind = kind
self.__reference = reference
if previous_properties is not None and not isinstance(
previous_properties, dict
):
# Accept None or dict() only
previous_properties = {}
self.__previous_properties = previous_properties
def __str__(self):
"""
String representation
"""
return "ServiceEvent({0}, {1})".format(self.__kind, self.__reference)
def get_previous_properties(self):
"""
Returns the previous values of the service properties, meaningless if
the the event is not MODIFIED nor MODIFIED_ENDMATCH.
:return: The previous properties of the service
"""
return self.__previous_properties
def get_service_reference(self):
"""
Returns the reference to the service associated to this event
:return: A ServiceReference object
"""
return self.__reference
def get_kind(self):
"""
Returns the kind of service event (see the constants)
:return: the kind of service event
"""
return self.__kind
|
'''
YTV.su Playlist Downloader Plugin configuration file
'''
# Channels url
url = 'http://ytv.su/tv/channels'
|
""" constant values for the pygaro module """
ENDPOINT_RFID = "rest/chargebox/rfid"
DEFAULT_PORT = 2222
# api methods currently supported
METHOD_GET = "GET"
METHOD_POST = "POST"
METHOD_DELETE = "DELETE"
# status code
HTTP_OK = 200
|
add2 = addN(2)
add2
add2(7)
9
|
#!/usr/bin/env python
def times(x, y):
return x * y
z = times(2, 3)
print("2 times 3 is " + str(z));
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Actions for supporting exported symbols in link actions."""
load(
"@build_bazel_rules_apple//apple/internal:linking_support.bzl",
"linking_support",
)
def _exported_symbols_lists_impl(ctx):
return [
linking_support.exported_symbols_list_objc_provider(ctx.files.lists),
]
exported_symbols_lists = rule(
implementation = _exported_symbols_lists_impl,
attrs = {
"lists": attr.label_list(
allow_empty = False,
allow_files = True,
mandatory = True,
doc = "The list of files that contain exported symbols.",
),
},
fragments = ["apple", "objc"],
)
|
# Scrapy settings for chuansong project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = "chuansong"
SPIDER_MODULES = ['chuansong.spiders']
NEWSPIDER_MODULE = 'chuansong.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = [u'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0']
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'chuansong.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'chuansong.middlewares.MyCustomDownloaderMiddleware': 543,
#}
#DOWNLOADER_MIDDLEWARES = {
# 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware':None,
# 'chuansong.middlewares.useragent_middleware.RotateUserAgentMiddleware':400,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'chuansong.pipelines.JsonWriterPipeline': 0,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# DEPTH_LIMIT = 2
IMAGES_URLS_FIELD = 'image_urls'
IMAGES_RESULT_FIELD = 'images'
IMAGES_STORE = "images"
COUNT_DATA = True
# mongo pipeline settings
MONGODB_HOST = '127.0.0.1'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'scrapy'
MONGODB_DOCNAME = 'chuansong'
# elasticsearch pipeline settings
ELASTICSEARCH_SERVER = 'http://127.0.0.1'
ELASTICSEARCH_PORT = 9200
ELASTICSEARCH_INDEX = 'scrapy'
ELASTICSEARCH_TYPE = 'items'
ELASTICSEARCH_UNIQ_KEY = 'url'
|
a={}
def fib(n):
if n==0:
return 0
if n==1:
return 1
if n in a:
return a[n]
else:
ans=fib(n-1)+fib(n-2)
d[n]=ans
return ans
print(fib(int(input('Enter n : ')))) |
# Programowanie I R
# Operacje na plikach: zapis
# W celu przeprowadzenia operacji zapisu do pliku musimy otworzyć ten plik w trybie
# zapisu (w), dołączania (a), tworzenia (x) lub edycji (r+).
# Należy zachować ostrożność przy stosowaniu trybu zapisu (w), ponieważ jeśli plik
# już istnieje, cała jego zawartość zostanie skasowana natychmiast po jego otwarciu.
# Sposób I: write() ****************************************************************
# Zapis łańcucha tekstowego (w trybie tekstowym, t) lub sekwencji danych binarnych
# (w trybie binarnym, b) do pliku jest możliwy dzięki metodzie write().
with open("plik.txt", "w", encoding = "utf-8") as f:
f.write("Pierwsza linia.\n")
f.write("Druga ")
f.write("linia.\n")
f.write("Trzecia linia.")
# Metoda write() zwraca liczbę znaków lub bitów zapisanych do pliku.
# Warto zwrócić uwagę, że metoda write() nie dodaje na końcu łańcucha tekstowego
# znaku nowej linii (w przeciwieństwie np. do znanej nam już funkcji print()).
# Pisząc do pliku tekstowego, sami musimy zadbać o umieszczenie w nim znaków
# nowej linii ("\n") w odpowiednich miejscach.
# Sposób II: writelines() **********************************************************
# Metoda writelines() przyjmuje jako argument listę i zapisuje do pliku jej kolejne
# elementy, jeden po drugim. Wbrew nazwie, funkcja ta nie umieszcza na końcu
# każdego elementu listy znaku nowej linii.
txt = ["Pierwsza linia.\n", "Druga linia.\n", "Trzecia linia."]
with open("plik.txt", "w", encoding = "utf-8") as f:
f.writelines(txt)
# Sposób III: print() (tryb t) *****************************************************
# Istnieje możliwość przekierowania wyjścia funkcji print() tak, by wypisywała ona
# przekazany jej łańcuch tekstowy do pliku. Należy w tym celu wykorzystać argument
# file tej funkcji.
with open("plik.txt", "w", encoding = "utf-8") as f:
print("Pierwsza linia.", file = f)
print("Druga linia.", file = f)
print("Trzecia linia.", end = "", file = f) |
# This is a placeholder version of this file; in an actual installation, it
# is generated from scratch by the installer. The constants defined here
# are exposed by constants.py
DEFAULT_DATABASE_HOST = None # XXX
DEFAULT_PROXYCACHE_HOST = None # XXX
MASTER_MANAGER_HOST = None # XXX - used to define BUNDLES_SRC_HOST for worker_manager below
DEVPAYMENTS_API_KEY = None
|
"""
출처: https://www.acmicpc.net/problem/2565
"""
size = int(input())
nums = [list(map(int, input().split())) for _ in range(size)]
nums.sort(key=lambda x: x[0])
__to = [nums[i][1] for i in range(size)]
dp = [1] * size
for i in range(size):
for j in range(i):
if __to[i] > __to[j]:
dp[i] = max(dp[i], dp[j] + 1)
# b 전봇대에서 가장 긴 증가하는 부분수열을 빼주면 된다.
print(size - max(dp))
|
class Solution:
def maxChunksToSorted(self, arr: [int]) -> int:
stack = []
for num in arr:
if stack and num < stack[-1]:
head = stack.pop()
while stack and num < stack[-1]: stack.pop()
stack.append(head)
else: stack.append(num)
return len(stack)
s = Solution()
print(s.maxChunksToSorted([1,1,2,1,1,3,4,5,3,6]))
|
""" Asked by: Stripe.
Given an array of integers, find the first missing positive integer in linear time and constant space.
In other words, find the lowest positive integer that does not exist in the array.
The array can contain duplicates and negative numbers as well.
For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
You can modify the input array in-place.
"""
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class DescribeDevicePageVo(object):
def __init__(self, uuid=None, instanceId=None, deviceId=None, displayName=None, deviceType=None, deviceState=None, omId=None, deviceFilePath=None, omName=None, createTime=None, userPin=None, parentUuid=None, parentName=None, lastConnectTime=None):
"""
:param uuid: (Optional)
:param instanceId: (Optional)
:param deviceId: (Optional)
:param displayName: (Optional)
:param deviceType: (Optional)
:param deviceState: (Optional)
:param omId: (Optional)
:param deviceFilePath: (Optional)
:param omName: (Optional)
:param createTime: (Optional)
:param userPin: (Optional)
:param parentUuid: (Optional)
:param parentName: (Optional)
:param lastConnectTime: (Optional)
"""
self.uuid = uuid
self.instanceId = instanceId
self.deviceId = deviceId
self.displayName = displayName
self.deviceType = deviceType
self.deviceState = deviceState
self.omId = omId
self.deviceFilePath = deviceFilePath
self.omName = omName
self.createTime = createTime
self.userPin = userPin
self.parentUuid = parentUuid
self.parentName = parentName
self.lastConnectTime = lastConnectTime
|
def insertion_sort(arr):
"""Performs an Insertion Sort on the array arr."""
for i in range(1, len(arr)):
key = arr[i]
j = i-1
# 2 5
while key < arr[j] and j >= 0:
# swap(key, j, arr)
# 6 5
arr[j+1] = arr[j]
j -= 1
arr[j+1] = key
return arr
def swap(i, j, arr):
arr[i], arr[j] = arr[j], arr[i]
if __name__ == '__main__':
print('### Insertion Sort ###')
answer = insertion_sort([5, 2, 3, 1, 6])
print(answer) |
def water_depth_press(wd: "h_l", rho_water, g=9.81) -> "p_e":
p_e = rho_water * g * abs(wd)
return p_e
|
def aumentar(au=0, taxa=0, show=False):
au = au + ((au * taxa) / 100)
if show == True:
au = moeda(au)
return au
def diminuir(di=0, taxa=0, show=False):
di = di - ((di * taxa) / 100)
if show == True:
di = moeda(di)
return di
def dobro(do=0, show=False):
do *= 2
if show == True:
do = moeda(do)
return do
def metade(me=0, show=False):
me /= 2
if show == True:
me = moeda(me)
return me
def moeda(preco=0, moeda= 'R$'):
formatado = f'{preco:.2f}{moeda}'.replace('.', ',')
return formatado
|
"""
Sponge Knowledge Base
Test - KB from an archive file
"""
class Action2FromArchive(Action):
def onCall(self, arg):
return arg.lower()
|
class Card(object):
def __init__(self):
self.fields = []
for i in range (0, 5):
self.fields.append([])
for j in range(0, 5):
self.fields[i].append(Field())
class Field(object):
def __init__(self):
self.number = -1
self.marked = False
def check_for_bingo(card):
for i in range(0, 5):
bingo = True
for j in range(0, 5):
bingo &= card.fields[i][j].marked
if bingo:
return True
for j in range(0, 5):
bingo = True
for i in range(0, 5):
bingo &= card.fields[i][j].marked
if bingo:
return True
cards = []
calls = []
cards = []
with open('input.txt', 'r+') as file:
lines = file.readlines()
calls = list(map(lambda i: int(i), str.split(lines[0], ",")))
c = 2
while c < len(lines):
card = Card()
for i in range(0, 5):
for j in range(0, 5):
card.fields[i][j].number = int(lines[c+i][0+j*3:3+j*3])
cards.append(card)
c += 6
for call in calls:
for card in cards:
for row in card.fields:
for field in row:
if field.number == call:
field.marked = True
if check_for_bingo(card):
s = 0
for row in card.fields:
for field in row:
if not field.marked:
s += field.number
print(s * call)
quit() |
class Solution:
def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:
if sum(gas) - sum(cost) < 0: return -1
tank = start = total = 0
for i in range(len(gas)):
tank += gas[i] - cost[i]
if tank < 0:
start = i + 1
total += tank
tank = 0
return start if tank + total >= 0 else -1
|
# -*- coding: utf-8 -*-
'''
Created on 1983. 08. 09.
@author: Hye-Churn Jang, CMBU Specialist in Korea, VMware [jangh@vmware.com]
'''
name = 'VirtualPrivateZone' # custom resource name
sdk = 'vra' # imported SDK at common directory
inputs = {
'create': {
'VraManager': 'constant'
},
'read': {
},
'update': {
'VraManager': 'constant'
},
'delete': {
'VraManager': 'constant'
}
}
properties = {
'name': {
'type': 'string',
'title': 'Name',
'description': 'Name of virtual private zone'
},
'computes': {
'type': 'array',
'title': 'Computes',
'items': {
'type': 'string'
},
'description': 'Compute name list of placement hosts, clusters or resource pools'
},
'networks': {
'type': 'array',
'title': 'Networks',
'items': {
'type': 'string'
},
'description': 'Network id list'
},
'storage': {
'type': 'string',
'title': 'Storage',
'description': 'Datastore name to deploy',
},
'folder': {
'type': 'string',
'title': 'Folder',
'default': '',
'description': 'Folder name to deploy',
},
'placementPolicy': {
'type': 'string',
'title': 'Placement Policy',
'default': 'default',
'enum': ['default', 'binpack', 'spread'],
'description': 'Placement policy with "default", "binpack" or "spread"'
},
'loadBalancers': {
'type': 'array',
'title': 'Load Balancer',
'default': [],
'items': {
'type': 'string'
},
'description': 'Load balancer id list'
},
'edgeCluster': {
'type': 'string',
'title': 'Edge Cluster',
'default': '',
'description': 'Edge cluster name to use deployment',
},
'storageType': {
'type': 'string',
'title': 'Storage Type',
'default': 'thin',
'enum': ['thin', 'thick', 'eagerZeroedThick'],
'description': 'Storage type with "thin", "thick" or "eagerZeroedThick"'
},
} |
def to_dict_tools(target):
data = dict()
attribute = dir(target)
if 'public_info' in attribute:
public_info = getattr(target, 'public_info')
attribute = dir(target)
for item in attribute:
item = str(item)
if not item.startswith('_') and item in public_info:
data[item] = str(getattr(target, item))
return data
|
class Person:
population: int = 0
def __new__(cls):
cls.population += 1
print(f'DBG: new Person created, {cls.population=}')
return object.__new__(cls)
def __repr__(self) -> str:
return f'{self.__class__.__name__} {id(self)=} {self.population=}'
def main():
people = [
Person()
for _ in range(5)
]
print(people[0], people[-1], sep='\n')
if __name__ == '__main__':
main()
|
root = 'data/eval_det'
train = dict(
type='IcdarDataset',
ann_file=f'{root}/instances_training.json',
img_prefix=f'{root}/imgs',
pipeline=None)
test = dict(
type='IcdarDataset',
img_prefix=f'{root}/imgs',
ann_file=f'{root}/instances_test.json',
pipeline=None,
test_mode=True)
train_list = [train]
test_list = [test]
|
P = ("Rafael","Beto","Carlos")
for k in range(int(input())):
x, y = tuple(map(int,input().split()))
players = (9*x*x + y*y, 2*x*x + 25*y*y, -100*x + y*y*y)
winner = players.index(max(players))
print("{0} ganhou".format(P[winner]))
|
"""
Author: Huaze Shen
Date: 2019-07-09
"""
def remove_duplicates(nums):
if nums is None or len(nums) == 0:
return 0
length = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
continue
length += 1
nums[length - 1] = nums[i]
return length
if __name__ == '__main__':
nums_ = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]
length_ = remove_duplicates(nums_)
print(nums_[:length_])
|
test = { 'name': 'q3_1_2',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> type(event_result) in set([np.ndarray])\nTrue', 'hidden': False, 'locked': False},
{'code': '>>> # Your list should have 5 elements.;\n>>> len(event_result) == 5\nTrue', 'hidden': False, 'locked': False},
{ 'code': '>>> # Every element of your list should be a string.;\n>>> [type(i) in set([np.str_, str]) for i in event_result]\n[True, True, True, True, True]',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.