seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71129782185 | from urllib.request import urlopen
import random
import datetime
from Initialize import sqlitewrite, sqliteread, settings, sqliteFetchAll, getmoderators
commands_BotCommands = {
"!ping": ('bot.ping', 'cmdarguments', 'user'),
"!uptime": ('bot.uptime', 'cmdarguments', 'user'),
"!roll": ('bot.roll', 'cmdarguments', 'user'),
"!r": ('bot.roll', 'cmdarguments', 'user'), # Alias
"!reloaddb": ("STREAMER", 'dbCloner.manualCloneDb', 'None', 'None'),
"!quote": ('quotes', 'cmdarguments', 'user'),
"!addquote": ('quotes.addQuote', 'cmdarguments', 'user'),
"!removequote": ("MOD", 'quotes.rmQuote', 'cmdarguments', 'user'),
"!deletequote": ("MOD", 'quotes.rmQuote', 'cmdarguments', 'user'), # Alias
"!test": ('getCurrentGame', 'cmdarguments', 'user'),
}
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
def todaysDate():
today = datetime.datetime.now()
return today.strftime("%m/%d/%y")
class BotCommands:
def __init__(self):
pass
def ping(self, arg, user):
return "Pong"
def uptime(self, arg, user):
f = urlopen("https://beta.decapi.me/twitch/uptime/" + settings['CHANNEL'])
file = f.read().decode("utf-8")
if "offline" in file:
return file + "."
else:
return "The stream has been live for: " + file
def roll(self, arg, user):
arg = arg.replace("\r", "")
splitCmd = arg.split("d")
operators = ["+", "-", "/", "*"]
op = ''
mod = ''
if not is_number(splitCmd[0]):
splitCmd[0] = 1
for item in operators:
if item in splitCmd[1]:
op = item
secondSplitCmd = (splitCmd[1].split(item))
mod = secondSplitCmd[1]
splitCmd[1] = secondSplitCmd[0]
# Calculate Values
amt = int(splitCmd[0])
size = int(splitCmd[1])
total = 0
rolls = []
for item in operators:
if item in splitCmd[1]:
size = int(splitCmd[1].split(item)[0])
op = item
mod = int(splitCmd[1].split(item)[1])
# Roll Stuff
for x in range(0, amt):
roll = random.randint(1, size)
rolls.append(roll)
total = eval(str(sum(rolls)) + " " + op + " " + mod)
if (len(rolls) == 1) or (len(rolls) > 20):
return("You rolled: >[ " + str(total) + " ]<")
return("You rolled: " + str(rolls) + " with a total of: >[ " + str(total) + " ]<")
class QuoteControl:
def __init__(self):
self.usedQuotes = []
def __call__(self, arg, user):
if not arg.strip():
return self.displayQuote()
firstArg = arg.split()[0].lower()
arg = (arg.replace(arg.split()[0], '').strip())
if is_number(firstArg):
return self.displayQuoteById(firstArg)
elif firstArg == "add":
return self.addQuote(arg, user)
elif firstArg == "remove":
if not (user in getmoderators()) or (user == "Hotkey"):
return user + " >> You need to be a moderator to delete a quote."
return self.rmQuote(arg, user)
elif firstArg == "delete":
if not (user in getmoderators()) or (user == "Hotkey"):
return user + " >> You need to be a moderator to delete a quote."
return self.rmQuote(arg, user)
def displayQuote(self):
if not self.usedQuotes: # Don't filter if theres nothing to filter
data = sqliteFetchAll('''SELECT * FROM quotes ORDER BY RANDOM()''')
else:
strUsedQuotes = ""
for item in self.usedQuotes:
strUsedQuotes += '"%s", ' % item # dude i dunno math
strUsedQuotes = strUsedQuotes[:-2] # Format a string to filter by and filter by it
data = sqliteFetchAll('''SELECT * FROM quotes WHERE id NOT IN (%s) ORDER BY RANDOM()''' % strUsedQuotes)
if not data: # If it's returned empty, reset the list and grab a random quote
self.usedQuotes = []
data = sqliteFetchAll('''SELECT * FROM quotes ORDER BY RANDOM()''')
if not data: # No quotes in db
return "There are currently no quotes. Add one with !quote add"
quote = data[0] # Fuck its 1am
self.usedQuotes.append(quote[0])
if "''" in quote[1]:
return '%s (%s)' % (quote[1], quote[2])
else:
return '"%s" (%s)' % (quote[1], quote[2])
def displayQuoteById(self, id):
data = sqliteread("SELECT * FROM quotes WHERE id=%s" % id)
if not data:
return "No quote exists with that ID."
if "''" in data[1]:
return '%s (%s)' % (data[1], data[2])
else:
return '"%s" (%s)' % (data[1], data[2])
def addQuote(self, arg, user):
if not arg or (arg == " "):
return "You need to specify something to be quoted."
arg = arg.strip()
if arg[0] in ["'", '"'] and arg[-1] in ["'", '"']:
arg = arg.strip("'")
arg = arg.strip('"')
arg = arg.replace('"', "''") # Replace double quotes with two single quotes
if sqlitewrite('''INSERT INTO quotes(quote, date) VALUES("%s", "%s");''' % (arg, todaysDate())):
newId = str(sqliteread('SELECT id FROM quotes ORDER BY id DESC LIMIT 1')[0])
return "Quote successfully added [ID: %s]" % newId
else:
print(user + " >> Your quote was not successfully added. Please try again.")
def rmQuote(self, arg, user):
if not arg or (arg == " "):
return "You need to specify a quote ID to remove."
arg = arg.strip()
idExists = sqliteread('''SELECT id FROM quotes WHERE id = "%s";''' % arg)
if idExists:
sqlitewrite('''DELETE FROM quotes WHERE id = "%s";''' % arg)
return "Quote %s successfully removed." % arg
else:
return "Quote %s does not exist." % arg
quotes = QuoteControl()
bot = BotCommands() | gcfrxbots/rxbot | RxBot/Bot.py | Bot.py | py | 6,155 | python | en | code | 6 | github-code | 36 |
24591814026 | s = "hgfygtfytfuybvj iughiuhfinbk jbnio"
target = "u"
indexes = []
for i, symbol in enumerate(s):
if symbol == target:
indexes.append(i)
# print(i)
# break
# else:
# print("symbol was not found")
if indexes:
print(indexes)
else:
print("symbol was not found")
count = {}
for i in s:
if i not in count:
count[i] = 1
else:
count[i] += 1
print(count)
l = [3,6,4,2,4,67,98,65,3,34,67,4]
m = l[0]
for i in l:
if i > m:
m = i
print(m) | MikitaTsiarentsyeu/Md-PT1-69-23 | Lessons/lesson 16.07/practice.py | practice.py | py | 511 | python | en | code | 0 | github-code | 36 |
12244843214 | # BINARY SEARCH
def search(lst,p):
l = 0
u = len(lst)-1
while l<=u:
m = (l+u)//2
if lst[m]==p:
return True,m+1
else:
if lst[m]<p: l = m+1
if lst[m]>p: u = m-1
return False,'none'
lst = [int(x) for x in input('enter list\n').split()] # [2,4,1,3,21,32]
lst.sort()
print(lst) # [1,2,3,4,21,32]
p = int(input('enter number to search\n'))
a,b = search(lst,p)
if a: print('found at location',b)
else: print('not found')
| NighatRaza/Data-Structures-Using-Python | binarysearch.py | binarysearch.py | py | 572 | python | en | code | 0 | github-code | 36 |
43319728592 | #Number 1
def fizzbuzz(n):
number = 1
while number <= n:
if number % 3 == 0:
print ('fizz')
elif number % 5 == 0:
print ('buzz')
elif number % 3 == 0 and number % 5 == 0:
print ('fizzbuzz')
else:
print (number)
number = number + 1
#Number 2
def pal(n):
m = n
while m:
n, m = n * 10 + m % 10, m // 10
return n
#Number 3
def make_zipper(f1, f2, sequence):
zipper = lambda x: x
helper = lambda f, g: lambda x: f(g(x))
while sequence > 0:
if sequence % 10 == 1:
zipper = helper(f1, zipper)
else:
zipper = helper(f2, zipper)
sequence = sequence // 10
return zipper
#Number 4
def sum_digits(x):
sum = 0
while x > 0:
sum = sum + x % 10
x = x // 10
return sum
#Number 4
from operator import mul
def falling(n, k):
total, stop = 1, n - k
while n > stop:
total, n = total * n, n - 1
return total
#Number 5
def double_eights(n):
prev_eight = False
while n > 0:
last_digit = n % 10
if last_digit == 8 and prev_eight:
return True
elif last_digit == 8:
prev_eight = True
else:
prev_eight = False
n = n // 10
return False
#Number 6
def wears_jacket_with_if(temp, raining):
if temp <= 60 or raining == True:
return True
else:
return False
#Number 7
def wears_jacket(temp, raining):
return temp <= 60 or raining == True
#Number 8
def is_prime(n):
def prime_helper(k):
if k == n:
return True
elif n % k == 0 or n == 1:
return False
else:
return prime_helper(k + 1)
return prime_helper (2)
#Number 9
from operator import add, sub
def a_plus_abs_b(a, b):
if b >= 0:
h = add
else:
h = sub
return h(a, b)
#Number 10
def two_of_three(x, y, z):
return min(x*x + y*y, x*x + z*z, y*y + z*z)
#Number 11
def largest_factor(x):
i = x - 1
while i >0:
if x%i == 0:
return i
i -= 1
#Number 12
def with_if_statement():
if c():
return t()
else:
return f()
def with_if_function():
return if_function(c(), t(), f())
def c():
return False
def t():
print(5)
def f():
print(6)
#Number 13
def hailstone(n):
while n > 1:
yield n
if n % 2 == 0:
n = n // 2
else:
n = 3 * n + 1
yield n
#Number 14
from operator import add, mul, sub
square = lambda x: x * x
identity = lambda x: x
def summation(n, f):
i = 1
sum = 0
while i <= n:
sum = sum + f(i)
i += 1
return sum
def product(n, f):
i = 1
product = 1
while i <= n:
product = product * f(i)
i += 1
return product
#Number 15
def accumulate(combiner, base, n, f):
result = base
i = 1
while i <= n:
result = combiner(result, f(i))
i += 1
return result
#Number 16
def summation_using_accumulate(n, f):
return accumulate(add, 0, n, f)
def product_using_accumulate(n, f):
return accumulate(mul, 1, n, f)
#Number 17
def compose1(h, g):
def f(x):
return h(g(x))
return f
def make_repeater(h, n):
def helper(x):
i = 0
while i < n:
x = h(x)
i += 1
return x
return helper
#Number 18
from operator import add, mul, mod
def lambda_curry2(func):
return lambda x: lambda y: func(x, y)
#Number 19
def count_cond(condition):
def helper(x):
k = 1
count = 0
while k <= x:
if condition(x, k):
count += 1
k += 1
return count
return helper
#Number 20
def both_paths(sofar= 'S'):
print (sofar)
def left():
return both_paths(sofar + "L")
def right():
return both_paths(sofar + "R")
return left, right
#Number 21
def compose(f, g):
return lambda x: f(g(x))
def composite_identity(f, g):
def helper(x):
return compose(f, g)(x) == compose(g, f)(x)
return helper
class Tree:
def __init__(self, label, branches=[]):
self.label = label
for branch in branches:
assert isinstance(branch, Tree)
self.branches = list(branches)
def is_leaf(self):
return not self.branches
class Link:
empty = ()
def __init__(self, first, rest=empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
def __repr__(self):
if self.rest:
rest = ', ' + repr(self.rest)
else:
rest =''
return 'Link(' +repr(self.first)+rest+')'
def __str__(self):
string = '<'
while self.rest is not Link.empty:
string += str(self.first) + ''
self = self.rest
return string + str(self.first) + '>'
#Number 22
def cycle(f1, f2, f3):
def helper1(n):
def helper2(x):
i = 0
while i < n:
if i % 3 == 0:
x = f1(x)
elif i % 3 == 1:
x = f2(x)
else:
x = f3(x)
i += 1
return x
return helper2
return helper1
#Number 23
def keep_ints(cond, n):
i = 1
while i <= n:
if cond(i):
print (i)
i += 1
#Number 24
def make_keeper(n):
def helper(cond):
i = 1
while i <= n:
if cond(i):
print (i)
i += 1
return helper
#Number 25
def print_delayed(x):
def delay_print(y):
print (x)
return print_delayed(y)
return delay_print
#Number 26
def print_n(n):
def inner_print(x):
if n <= 0:
print("done")
else:
print(x)
return print_n(n - 1)
return inner_print
#Number 27
def count_digits(n):
count = 0
while n > 0:
count += 1
n = n // 10
return count
#Number 28
def count_matches(n, m):
count = 0
while n > 0 or m > 0:
if n % 10 == m % 10:
count += 1
n, m = n // 10, m // 10
return count
#Number 29
def make_skipper(n):
def helper(x):
for i in range(x + 1):
if i % n != 0 :
print (i)
return helper
#Number 30
def ordered_digits(x):
while x > 0:
last_digit = x % 10
last = last_digit
if last_digit < last:
return True
return False
x = x // 10
#Number 31
def is_palindrome(n):
x, y = n, 0
f = lambda: y * 10 + x % 10
while x > 0:
x, y = x // 10, f()
return y == n
#Number 32
def same_digits(a, b):
assert a > 0 and b > 0
while a and b:
if a % 10 == b % 10:
end = a % 10
while a % 10 == end:
a = a // 10
while b % 10 == end:
b = b //10
else:
return False
return a == b
#Number 33
def search(f, x):
while not f(x):
x += 1
return x
def no_repeats(a):
return search(lambda b: same_digits(a, b), 1)
#Number 34
def unique_largest(n):
assert n > 0
top = 0
while n:
n, d = n // 10, n % 10
if d > top:
top, unique = d, True
elif d == top:
unique = False
return unique
#Number 35
def transitive(p):
abc = 0
while abc < 100:
a, b, c = abc // 100, (abc // 10) % 10 , abc % 10
if p(a,b) and p(b,c) and not p(b,c):
return False
abc = abc + 1
return True
#Number 36
def compose(n):
assert n >0
if n == 1:
return lambda f: f
def call(f):
def on(g):
return compose(n-1)(lambda x: f(g(x)))
return on
return call
#Number 37
def num_sevens(x):
if x % 10 == 7:
return 1 + num_sevens(x // 10)
elif x < 10:
return 0
else:
return num_sevens(x // 10)
#Number 38
def pingpong(n):
def helper(result, i, step):
if i == n:
return result
elif i % 7 == 0 or num_sevens(i) > 0:
return helper(result - step, i + 1, -step)
else:
return helper(result + step, i + 1, step)
return helper(1, 1, 1)
#Number 39
def count_change(total):
def constrained_count(total, smallest_coin):
if total == 0:
return 1
if smallest_coin > total:
return 0
without_coin = constrained_count(total, smallest_coin * 2)
with_coin = constrained_count(total - smallest_coin, smallest_coin)
return without_coin + with_coin
return constrained_count(total, 1)
#Number 40
def missing_digits(n):
last = n % 10
rest = n // 10
if n < 10:
return n
elif last == rest % 10:
return missing_digits(rest)
elif last != ((rest) % 10 + 1):
return last - (rest) % 10 -1 + missing_digits(rest)
return missing_digits(rest)
#Number 41
def multiply(m, n):
if n == 1:
return m
else:
return m + multiply(m, n-1)
#Number 42
def merge(n1, n2):
n1_last, n2_last = n1 % 10, n2 % 10
if n1 == 0:
return n2
elif n2 == 0:
return n1
elif n1_last < n2_last:
return merge(n1 // 10, n2) * 10 + n1 % 10
else:
return merge(n1, n2 // 10) * 10 + n2 % 10
#Number 43
def make_func_repeater(f, x):
def repeat(k):
if k == 0:
return x
else:
return f(repeat(k-1))
return repeat
#Number 44
def skip_add(n):
if n <= 0:
return 0
return n + skip_add(n - 2)
#Number 45
def summation(n, term):
assert n >= 1
if n == 1:
return term(n)
return term(n) + summation(n - 1, term)
#Number 46
def gcd(a, b):
a, b = max(a, b), min(a, b)
if a % b == 0:
return b
else:
return gcd(b, a % b)
#Number 47
def paths(m, n):
if m == 1 or n == 1:
return 1
else:
return paths(m - 1, n) + paths(m, n - 1)
#Number 48
def max_subseq(n, l):
if n == 0 or l == 0:
return 0
with_last = max_subseq(n // 10, l - 1) * 10 + n % 10
without_last = max_subseq(n // 10, l)
return max(with_last, without_last)
#Number 49
def can_win(number):
if number <= 0:
return False
action = 1
while action <= 3:
new = number - action
if not can_win(new):
return True
action += 1
return False
#Number 50
def pal(n):
m = n
while m:
n, m = n * 10 + m % 10, m // 10
return n
#nUmber 51
def contains(a, b):
if a == b:
return True
if a > b:
return False
if a % 10 == b % 10:
return contains(a // 10, b // 10)
else:
return contains(a, b // 10)
#Number 52
def clear_negatives(lst):
if not lst:
return []
elif lst[0] < 0:
return clear_negatives(lst[-lst[0]:])
else:
return [lst[0]] + clear_negatives(lst[1:])
#Number 53
def count_stair_ways(n):
if n == 0:
return 1
elif n == 1:
return 1
else:
return count_stair_ways(n - 1) + count_stair_ways(n - 2)
#Number 54
def count_k(n, k):
if n == 0:
return 1
elif n < 0:
return 0
else:
total = 0
i = 1
while i <= k:
total += count_k(n - i, k)
i += 1
return total
#Number 55
def even_weighted(s):
return [i * s[i] for i in range(len(s)) if i % 2 == 0]
#Number 56
def max_product(s):
if s == []:
return 1
else:
return max(max_product(s[1:]), s[0] * max_product(s[2:]))
#Number 57
def check_hole_number(n):
if n // 10 == 0:
return True
return ((n // 10) % 10) < (n % 10) and ((n // 10) % 10) < ((n // 100) % 10) and check_hole_number(n // 100)
#Number 58
def check_mountain_number(n):
def helper(x, is_increasing):
if x // 10 == 0:
return True
if is_increasing and (x % 10) < ((x // 10) % 10):
return helper(x // 10, is_increasing)
return (x % 10) > ((x // 10) % 10) and helper(x // 10, False)
return helper(n, True)
#Number 59
def map_mut(f, L):
for i in range(len(L)):
L[i] = f(L[i])
#Number 60
def merge_list(s1, s2):
if s2 == []:
return s1
elif s1 == []:
return s2
elif s1[0] < s2[0]:
return merge_list(s1[0] + merge(s1[1:], s2))
else:
return merge_list(s2[0] + merge(s1, s2[1:]))
#Number 61
def mario_number(level):
def ways(n):
if n == len(level) - 1:
return 1
if n >= len(level) or level[n] == 'P':
return ways(n + 1) + ways(n + 2)
return ways(0)
#Number 62
def couple(lst1, lst2):
assert len(lst1) == len(lst2)
return [[lst1[i], lst2[i]] for i in range(0, len(lst1))]
#Number 63
def add_this_many(x, el, lst):
i = 0
for element in lst:
if element == x:
i += 1
while i > 0:
lst.append(el)
i -= 1
#Number 64
def group_by(s, fn):
grouped = {}
for e in s:
key = fn(e)
if key in grouped:
grouped[key].append(e)
else:
grouped[key] = [e]
return grouped
#Number 65
def partition_options(total, biggest):
if total == 0:
return [[]]
elif total < 0 or biggest == 0:
return []
else:
with_biggest = partition_options(total - biggest, biggest)
without_biggest = partition_options(total, biggest - 1)
with_biggest = [[biggest] + elem for elem in with_biggest]
return with_biggest + without_biggest
def minimum(s, key):
if not s:
return None
m = s[0]
for v in s[1:]:
if key(v) < key(m):
m = v
return m
def tightest(bounds, t):
return minimum([b for b in bounds if t > b(0) and t < b(1)], lambda b: [abs(t - x) for x in b][0])
#Number 66
def make_adder_inc(n):
def make_helper(x):
nonlocal n
value = n + x
n += 1
return value
return make_helper
#Number 67
def make_fib():
x = 0
y = 1
def fib_helper():
nonlocal x, y
next_value = x
x, y = y, x + y
return next_value
return fib_helper
#Number 68
def make_sassy_function(f, msg):
sassy = True
def helper(x):
nonlocal sassy
sassy = not sassy
if sassy:
return msg
return f(x)
return helper
#Number 69
def sentence_buffer():
sentence = ''
def sentence_helper(x):
nonlocal sentence
sentence += x + ' '
if x[-1] == '.':
result, sentence = sentence, ''
return result.strip()
return sentence_helper
#Number 70
def scale(it, multiplier):
for i in it:
i = i * multiplier
yield i
#Number 71
def memory(n):
def mem(f):
nonlocal n
n = f(n)
return n
return mem
#Number 72
def nonlocalist():
get = lambda x: "Index out of range!"
def prepend(value):
nonlocal get
f = get
def get(i):
if i == 0:
return value
return f(i -1)
return prepend, lambda x: get(x)
#Number 73
def merge(a, b):
first_a, first_b = next(a), next(b)
while True:
if first_a == first_b:
yield first_a
first_a, first_b = next(a), next(b)
elif first_a < first_b:
yield first_a
first_a = next(a)
else:
yield first_b
first_b = next(b)
def sequence(start, step):
while True:
yield start
start += step
#Number 74
def generate_subsets():
subset = [[]]
n = 1
while True:
yield subset
subset = subset + [s + [n] for s in subsets]
n += 1
#Number 75
def sum_paths_gen(t):
if is_leaf(t):
yield label(t)
for b in branches(t):
for s in sum_paths_gen(b):
yield s + label(t)
#Number76
def collect_words(t):
if is_leaf(t):
return [label(t)]
words = []
for b in branches(t):
words += [label(t) + word for word in collect_words(b)]
return words
#Number 77
def is_min_heap(t):
for b in branches(t):
if label(t) > label(b) or not is_min_heap(b):
return False
return True
#Number 78
def largest_product_path(tree):
if tree == None:
return 0
elif is_leaf(tree):
return label(tree)
else:
for t in branches(tree):
paths = [largest_product_path(t)]
return label(tree) * max(paths)
#Number 79
def max_tree(t):
if is_leaf(t):
return tree(root(t))
else:
new_branches = [max_tree(b) for b in branches(t)]
new_label = max([root(t)] + [root(s) for s in new_branches])
return tree(new_label, new_branches)
#number 80
def level_order(tree):
if not tree:
return []
curr, next = [label(tree)], [tree]
while next:
find_next = []
for b in next:
find_next.extend(branches(b))
next = find_next
curr.extend([label(t) for t in next])
return curr
#Number 81
def all_paths(t):
if is_leaf(t):
return [[label(t)]]
else:
paths =[]
for b in branches(t):
for path in all_paths(b):
paths = paths.append([label(t)] + path)
return paths
#Number 82
def make_max_finder():
finder = 0
def function(list):
nonlocal finder
if max(list) > finder:
finder = max(list)
return finder
return function
#Number 83
def generate_constant(x):
while True:
yield x
#Number 84
def black_hole(seq, trap):
for i in seq:
if i == trap:
yield from generate_constant(trap)
yield i
#Number 84
def gen_inf(lst):
while True:
for i in lst:
yield i
#Number 85
def naturals():
i = 1
while True:
yield i
i += 1
def filter(iterable, fn):
for i in iterable:
if fn(i):
yield i
#Number 86
def tree_sequence(t):
yield label(t)
for b in branches(t):
for value in tree_sequence(b):
yield value
#Number 87
def make_digit_getter(n):
total = 0
def get_next():
nonlocal n, total
if n == 0:
return total
i = n % 10
n //= 10
total += i
return i
return get_next
#number 88
class VendingMachine:
def __init__(self, name, balance, price):
self.name = name
self.balance = 0
self.price = price
self.stock = 0
def add_funds(self, amount):
if self.stock == 0:
return 'Machine is out of stock. Here is your ${0}'.format(amount)
else:
self.balance += amount
return 'Current balance: {0}'.format(self.balance)
def restock(self, amount):
self.stock += amount
return 'Current {0} stock: {1}'.format(self.name, self.stock)
def vend(self, amount):
if self.stock == 0:
return 'Machine is out of stock. '
difference = self.price - self.balance
if difference > 0:
return 'You must add ${0} more funds. '.format(difference)
message = 'Here is your {0}.format(self.name)'
if difference != 0:
message += ' and ${0} change'.format(-difference)
self.balance = 0
self.stock -= 1
return message + '.'
#Number 89
def preorder(t):
if is_leaf(t):
return [label(t)]
tree = []
for b in branches(t):
tree += preorder(b)
return [label(t)] + tree
#Number 90
def store_digits(n):
result = Link.empty
while n > 0:
result = Link(n % 10, result)
n //= 10
return result
#Number 91
def tops(t):
if t.is_leaf():
yield t.label
else:
for b in t.branches:
if t.label < b.label:
yield from tops(b)
#Number 92
def generate(t, value):
if t.label == value:
yield [value]
for b in branches(t):
for val in generate(b, value):
yield [t.label] + val
#Number 93
class Mint:
current_year = 2020
def __init__(self):
self.update()
def create(self, kind):
return kind(self.year)
def update(self):
self.year = Mint.current_year
class Coin:
def __init__(self, year):
self.year = year
def worth(self):
return self.cents + max(0, Mint.current_year - self.year - 50)
class Nickel(Coin):
cents = 5
class Dime(Coin):
cents = 10
#Number 94
def remove_all(link, value):
if link is Link.empty or link.rest is Link.empty:
return
if link.rest.first == value:
link.rest = link.rest.rest
remove_all(link, value)
else:
remove_all(link.rest, value)
#Number 95
def deep_map(f, link):
if link is Link.empty:
return link
if isinstance(link.first, Link):
first = deep_map(f, link.first)
else:
first = f(link.first)
return Link(first, deep_map(f, link.rest))
#Number 96
class Card(object):
cardtype = 'Staff'
def __init__(self, name, attack, defense):
self.name = name
self.attack = attack
self.defense = defense
def power(self, other_card):
return self.attack - self.defense / 2
class Player(object):
def __init__(self, deck, name):
self.deck = deck
self.name = name
self.hand = [deck.draw() for _ in range(5)]
def draw(self):
assert not self.deck.is_empty()
self.hand.append(self.deck.draw())
def play(self, card_index):
return self.hand.pop(card_index)
#Number 97
def link_to_list(link):
new_list = []
while link is not Link.empty:
new_list.append(link.first)
link = link.rest
return new_list
def link_to_list(link):
if link is Link.empty:
return []
return [link.first] + link_to_list(link.rest)
#Number 98
def cummulative_mul(t):
for b in branches(t):
cummulative_mul(b)
total = t.label
for b in branches(t):
total *= b.label
t.label = total
#Number 99
class Email:
def __init__(self, msg, sender_name, recipient_name):
self.msg = msg
self.sender_name = sender_name
self.recipient_name = recipient_name
class Server:
def __init__(self):
self.clients = {}
def send(self, email):
client = self.clients(email.recipient_name)
client.receive(email)
def register_client(self, client, client_name):
self.clients[client_name] = client
class Client:
def __init__(self, server, name):
self.inbox = []
self.server = server
self.name = name
self.server.register_client(self, self.name)
def compose(self, msg, recipient_name):
email = Email(msg, self.name, recipient_name)
self.server.send(email)
def receive(self, email):
self.inbox.append(email)
#Number 100
def sum_nums(lnk):
if lnk is Link.empty:
return 0
return lnk.first + sum_nums(lnk.rest)
#number 101
def multiply_lnks(lst_of_lnks):
multiply = 1
for lnk in lst_of_lnks:
if lnk is Link.empty:
return Link.empty
product *= lnk.first
lst_of_lnks_rest = [lnk.rest for lnk in lst_of_lnks]
return Link(product, multiply_lnks(lst_of_lnks_rest))
#Number 102
def filter_link(link, f):
while link is not Link.empty:
if f(link.first):
yield link.first
link = link.rest
#Number 103
def filter_no_iter(link, f):
if link is Link.empty:
return
elif f(link.first):
yield link.first
yield from filter_no_iter(link.rest, f)
#Number 104
class User:
def __init__(self, identifier):
self.identifier = identifier
def attend(self, meeting):
if self.identifier in [meeting.waiting, meeting.enlisted]:
print(self.identifier, 'is already attending')
else:
users = meeting.waiting
if self.identifier == meeting.host:
users = meetings.enlisted.append(self.identifier)
users.append(self.identifier)
def __repr__(self):
return 'User(' + repr(self.identifier) +')'
class Meeting:
def __init__(self, host):
self.waiting = []
self.enlisted = []
self.host = host
def admit(self, f):
for x in self.waiting:
if f(x):
self.enlisted.append(x)
self.waiting = self.waiting.remove(self.identifier)
#Number 105
def mystery(t):
def e(r, y):
assert type(r.label) == int
myst = [e(b, max(y, r.label)) for b in r.branches]
if r.label > y:
myst.append(r.label)
return sum(myst)
return e(t, 0)
#Number 106
def multiples(k, n):
if k < n:
for jay in multiples(k, n - k):
yield n
yield k
| aannhvo/CS61A | study.py | study.py | py | 24,997 | python | en | code | 0 | github-code | 36 |
24569667329 | import tensorflow as tf
import cv2
import time
import argparse
import posenet
from joblib import dump, load
import pandas as pd
column_names = ['Eye_L_x', 'Eye_L_y', 'Eye_R_x', 'Eye_R_y', 'Hip_L_x', 'Hip_L_y',
'Knee_L_x', 'Knee_L_y', 'Ankle_L_x', 'Ankle_L_y', 'Toes_L_x',
'Toes_L_y', 'ToesEnd_L_x', 'ToesEnd_L_y', 'Shoulder_L_x',
'Shoulder_L_y', 'Elbow_L_x', 'Elbow_L_y', 'Wrist_L_x', 'Wrist_L_y',
'Hip_R_x', 'Hip_R_y', 'Knee_R_x', 'Knee_R_y', 'Ankle_R_x', 'Ankle_R_y',
'Shoulder_R_x', 'Shoulder_R_y', 'Elbow_R_x', 'Elbow_R_y', 'Wrist_R_x',
'Wrist_R_y']
UNITY_PART_MAP = {
# 'nose' : '',
'leftEye' : 'Eye_L',
'rightEye' : 'Eye_R',
# 'leftEar' : '',
# 'rightEar' : '',
'leftShoulder' : 'Shoulder_L',
'rightShoulder' : 'Shoulder_R',
'leftElbow' : 'Elbow_L',
'rightElbow' : 'Elbow_R',
'leftWrist' : 'Wrist_L',
'rightWrist' : 'Wrist_R',
'leftHip' : 'Hip_L',
'rightHip' : 'Hip_R',
'leftKnee' : 'Knee_L',
'rightKnee' : 'Knee_R',
'leftAnkle' : 'Ankle_L',
'rightAnkle' : 'Ankle_R',
}
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--cam_id', type=int, default=0)
parser.add_argument('--cam_width', type=int, default=1280)
parser.add_argument('--cam_height', type=int, default=720)
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera")
parser.add_argument('--notxt', action='store_true')
parser.add_argument("-s", "--size", type=int, default=5, help="size of queue for averaging")
args = parser.parse_args()
def unitCoords(coords, oldResolution):
unitCoords = {}
unitCoords['x'] = coords['x'] / oldResolution['x'];
unitCoords['y'] = coords['y'] / oldResolution['y']
return unitCoords;
def addText(image, text):
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (50, 50)
# fontScale
fontScale = 1
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2
# Using cv2.putText() method
image = cv2.putText(image, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
return image
from collections import deque, Counter
Q = deque(maxlen=args.size)
def main():
with tf.Session() as sess:
model_cfg, model_outputs = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
cap = cv2.VideoCapture(args.cam_id) # default value
if args.file is not None:
cap = cv2.VideoCapture(args.file)
else:
cap = cv2.VideoCapture(args.cam_id)
cap.set(3, args.cam_width)
cap.set(4, args.cam_height)
start = time.time()
frame_count = 0
while True:
input_image, display_image, output_scale = posenet.read_cap(
cap, scale_factor=args.scale_factor, output_stride=output_stride)
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
model_outputs,
feed_dict={'image:0': input_image}
)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
heatmaps_result.squeeze(axis=0),
offsets_result.squeeze(axis=0),
displacement_fwd_result.squeeze(axis=0),
displacement_bwd_result.squeeze(axis=0),
output_stride=output_stride,
max_pose_detections=10,
min_pose_score=0.15)
keypoint_coords *= output_scale
cp_keypoint_coords = keypoint_coords.copy() # copy
keypoint_coords /= [input_image.shape[1], input_image.shape[2]]
# keypoint_coords *= 400
clf = load('synthpose.joblib')
# TODO this isn't particularly fast, use GL for drawing and display someday...
overlay_image = posenet.draw_skel_and_kp(
display_image, pose_scores, keypoint_scores, cp_keypoint_coords,
min_pose_score=0.15, min_part_score=0.1)
if not args.notxt:
for pi in range(len(pose_scores)):
if pose_scores[pi] == 0.:
break
# print('Pose #%d, score = %f' % (pi, pose_scores[pi]))
t_row = {} #
f_df = pd.DataFrame(columns = column_names)
for ki, (s, c) in enumerate(zip(keypoint_scores[pi, :], keypoint_coords[pi, :, :])):
# print('Keypoint %s, score = %f, coord = %s' % (posenet.PART_NAMES[ki], s, c))
if posenet.PART_NAMES[ki] in UNITY_PART_MAP:
t_row[UNITY_PART_MAP[posenet.PART_NAMES[ki]] + '_x'] = c[1];
t_row[UNITY_PART_MAP[posenet.PART_NAMES[ki]] + '_y'] = c[0];
f_df = f_df.append(t_row, ignore_index=True)
f_df = f_df.fillna(0)
y = clf.predict(f_df)[0]
# print(y, pose_scores[pi])
if pose_scores[pi] > 0.4:
Q.append(y)
b = Counter(Q).most_common(1)[0][0]
print (b)
overlay_image = addText(overlay_image, b)
cv2.imshow('posenet', overlay_image)
frame_count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print('Average FPS: ', frame_count / (time.time() - start))
if __name__ == "__main__":
main() | rahul-islam/posenet-python | webcam_demo.py | webcam_demo.py | py | 5,792 | python | en | code | null | github-code | 36 |
12486442970 | """
Objects and Classes
Check your solution: https://judge.softuni.bg/Contests/Practice/Index/950#4
SUPyF Objects and Classes - 05. Optimized Banking System
Problem:
Create a class BankAccount which has a Name (string), Bank (string) and Balance (decimal).
You will receive several input lines, containing information in the following way:
{bank} | {accountName} | {accountBalance}
You need to store every given Account. When you receive the command “end” you must stop the input sequence.
Then you must print all Accounts, ordered by their balance, in descending order, and then by length of the bank name,
in ascending order.
The accounts must be printed in the following way “{accountName} -> {balance} ({bank})”.
Note: Numbers must be printed rounded to the 2nd decimal digit.
Examples:
----------------------------------------------------------------------------------------------
Input: | Output:
----------------------------------------------------------------------------------------------
DSK | Ivan | 504.403 |Aleksander -> 20000.00 (DSK)
DSK | Pesho | 2000.4031 |Aleksander -> 20000.00 (Piraeus)
DSK | Aleksander | 20000.0001 |Pesho -> 2000.40 (DSK)
Piraeus | Ivan | 504.403 |Ivan -> 504.40 (DSK)
Piraeus | Aleksander | 20000.0001 |Ivan -> 504.40 (Piraeus)
end |
----------------------------------------------------------------------------------------------
"""
class BankAccount:
def __init__(self, name, bank, balance):
self.name = name
self.bank = bank
self.balance = balance
all_accounts = []
while True:
command = input()
if command == "end":
break
d = [item for item in command.split(" | ")]
account = BankAccount(d[1], d[0], float(d[2]))
all_accounts += [account]
all_accounts = sorted(sorted(all_accounts, key=lambda x: x.bank), key=lambda x: x.balance, reverse=True)
for account in all_accounts:
print(f"{account.name} -> {account.balance:.2f} ({account.bank})")
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals June 2019/Problems and Files/07. OBJECT AND CLASSES/05. Optimized Banking System.py | 05. Optimized Banking System.py | py | 2,123 | python | en | code | 9 | github-code | 36 |
22807184796 | # Makes some radial plots of gas density and temperature.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
x_field = 'Radiuspc'
y_fields = ['Density', 'Temperature']
weight_field = 'CellMass'
x_min = 1.0e-1
x_max = 2.0e2
fns = sys.argv[1:]
plot_folder = 'profiles'
n_bins = 128
# Make a folder in which to save the profiles.
if not os.path.isdir(plot_folder):
os.mkdir(plot_folder)
# Make plots for each dataset.
for fn in fns:
pf = load(fn)
profile = BinnedProfile1D(pf.h.all_data(), n_bins, x_field, x_min, x_max)
profile.add_fields(y_fields, weight = weight_field)
# Make the plot
for y_field in y_fields:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(profile[x_field], profile[y_field])
ax.set_xlabel(x_field)
ax.set_ylabel(y_field)
plt.savefig('%s/%s_%s.png' % (plot_folder, pf, y_field));
| enzo-project/enzo-dev | run/Hydro/Hydro-3D/RotatingSphere/profile_script.py | profile_script.py | py | 936 | python | en | code | 72 | github-code | 36 |
74253274663 | """ Module containing implementation of evolutionary computation algorithms, such as:
- basic Evolutionary Algorithm
- Genetic Programming
- Evolutionary Strategies
for solving the cases (see 'cases' module).
"""
import random
import copy
from typing import Tuple, Union, Dict, Any, List
from deap import base, creator, tools
import evolution.solution as sol
import evolution.evaluator as evaluator
import evolution.ec_utils as ecu
from utils.logger import glob_logger
# Some default values used in the algorithms
# Population size, No. Generations, Mutation prob., Crossover prob.
NPOP, NGEN, PMUT, PCX = 20, 100, 0.5, 0.2
class EvolutionParameters:
""" Class containing parameters of the evolution process. The class generally assumes only the
most generic parameters employed in all evolutionary techniques:
- Population size
- Number of generations
- Probability of mutation
- Probability of crossover
Additional parameters can be supplied as keyword arguments and will be made available in
the object through the . (dot) notation, i.e.:
- EvolutionParameters(..., my_parameter=13.5) -> evo_param.my_parameter
The general rules are that 1) each case should provide parameter values that are different
from the default ones and 2) each algorithm should contain an initialization phase where all
required parameters are set to their default value if no parameter value was supplied.
:ivar pop: the requested population size
:ivar gen: the number of generations
:ivar pmut: the probability of mutation
:ivar pcx: the probability of crossover
"""
def __init__(
self, popsize: int = NPOP, generations: int = NGEN, prob_mut: float = PMUT,
prob_cx: float = PCX, **kwargs: Any
) -> None:
""" Constructor
:param popsize: the requested population size
:param generations: the number of generations
:param prob_mut: the probability of mutation
:param prob_cx: the probability of crossover
"""
self.pop = popsize
self.gen = generations
self.pmut = prob_mut
self.pcx = prob_cx
# Set the additional kwargs parameters as attributes
for attr, value in kwargs.items():
setattr(self, attr, value)
def update_defaults(self, defaults: Dict[str, Any]) -> None:
""" Set default parameter values for those parameters that are not present in the object.
We do not impose any restriction on the type of the parameters.
:param defaults: map of default parameters and their values
"""
for attr, value in defaults.items():
if not hasattr(self, attr):
setattr(self, attr, value)
def __setattr__(self, name: str, value: Any) -> None:
""" Setattr override to allow for dynamic attributes type checking.
https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html#when-you-re-puzzled-or-when-things-are-complicated
:param name: name of the attribute
:param value: value of the attribute
"""
super().__setattr__(name, value)
def __getattr__(self, name: str) -> Any:
""" Getattr override to allow for dynamic attributes type checking.
https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html#when-you-re-puzzled-or-when-things-are-complicated
:param name: name of the attribute
:return: value of the attribute
"""
return super().__getattribute__(name)
# def opt_ga(
# solutions: sol.SolutionSet,
# evo_params: EvolutionParameters,
# apply_func: sol.ApplySignature,
# fitness_func: sol.FitnessSignature,
# workers: int,
# **apply_params: Union[int, float]
# ) -> ecu.OptIndividual:
# """ TODO: doc
# """
# evo_params.update_defaults({
# 'pop_lambda': evo_params.pop,
# 'tourn_size': 4
# })
# hof = ecu.BestTracker()
# creator.create('FitnessMax', base.Fitness, weights=(1.0,))
# str_limits = solutions.get_strength_limits()
# try:
# # Create an evaluator context that handles the multiprocess / single process evaluation
# with evaluator.Evaluator(workers, solutions, apply_func, fitness_func) as ev:
# # Initialize the population
# population = [
# ecu.OptIndividual(str_limits, creator.FitnessMax())
# for _ in range(evo_params.pop)
# ]
# # Evaluate the initial population
# for ind in population:
# # Make it tuple of one element
# ind.fitness.values = ev.evaluate(
# None, None, strength=ind.str_map, **apply_params
# )[0],
# hof.update(population)
# for g in range(evo_params.gen):
# # Randomly select 'lambda' parents and deepcopy them => offsprings
# chosen = tools.selTournament(population, k=evo_params.pop_lambda, tournsize=evo_params.tourn_size)
# offsprings: List[ecu.OptIndividual] = list(map(copy.deepcopy, chosen))
# # Perform the crossover (mating) among the offsprings
# for o1, o2 in zip(offsprings[::2], offsprings[1::2]):
# if random.random() < evo_params.pcx:
# ecu.crossover_strength(o1, o2)
# # Mutate some of the offsprings
# for o in offsprings:
# ecu.mutate_strength(o, evo_params.pmut)
# # Recalculate the fitness for offsprings that have changed
# for ind in [o for o in offsprings if not o.fitness.valid]:
# ind.fitness.values = ev.evaluate(
# None, None, strength=ind.str_map, **apply_params
# )[0],
# # Select 'mu' best offsprings and make them the new population
# population[:] = tools.selBest(offsprings, evo_params.pop)
# hof.update(population)
# print(f'Gen {g}: fitness={hof[0].fitness:.2f}; str=[{",".join(hof[0].str_map.values())}]')
# # Re-evaluate the solutions to contain the all-time best results
# ev.evaluate(None, None, strength=hof[0].str_map, **apply_params)
# return hof[0]
# finally:
# # Make sure we remove the created FitnessMax class when multiple algorithms
# # are run back to back in one session
# del creator.FitnessMax
def opt_es_plus(
solutions: sol.SolutionSet,
evo_params: EvolutionParameters,
apply_func: sol.ApplySignature,
fitness_func: sol.FitnessSignature,
workers: int,
**apply_params: Union[int, float]
) -> ecu.OptIndividual:
""" TODO: doc
"""
evo_params.update_defaults({
'pop_lambda': evo_params.pop * 2
})
hof = ecu.BestTracker()
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
str_limits = solutions.get_strength_limits()
sol_count = len(solutions)
opt_goal = next(iter(solutions)).result.opt_goal
try:
# Create an evaluator context that handles the multiprocess / single process evaluation
with evaluator.Evaluator(workers, solutions, apply_func, fitness_func) as ev:
# Initialize the population
population = [
ecu.OptIndividual(str_limits, creator.FitnessMax())
for _ in range(evo_params.pop)
]
# Evaluate the initial population
for ind in population:
# Make it tuple of one element
ind.fitness.values = ev.evaluate(
None, None, strength=ind.str_map, **apply_params
)[0],
hof.update(population)
for g in range(evo_params.gen):
gen_best = ecu.BestTracker()
# Randomly select 'lambda' parents and deepcopy them => offsprings
offsprings: List[ecu.OptIndividual] = list(
map(copy.deepcopy, random.choices(population, k=evo_params.pop_lambda))
)
# Mutate some of the offsprings
for o in offsprings:
ecu.mutate_strength(o)
# Recalculate the fitness for offsprings that have changed
for ind in [o for o in offsprings if not o.fitness.valid]:
ind.fitness.values = ev.evaluate(
None, None, strength=ind.str_map, **apply_params
)[0],
# Select 'mu' best individuals and make them the new population
population[:] = tools.selBest(population + offsprings, evo_params.pop)
hof.update(population)
gen_best.update(population)
best = gen_best.get_best()
ev.evaluate(None, None, strength=best.str_map, **apply_params)
# Gen, goal, fitness, data_ratio, time_ratio
r_time, r_data = 0.0, 0.0
for sol in solutions:
r_time += sol.result.time_ratio
r_data += sol.result.data_ratio
glob_logger.add_record(
(g, opt_goal, best.fitness.values[0], best.get_str(), r_data / sol_count, r_time / sol_count)
)
# print(
# f'Gen {g}: fitness={best.fitness.values[0]}; '\
# f'str=[{",".join(map(str, best.str_map.values()))}]'
# )
# Re-evaluate the solutions to contain the all-time best results
ev.evaluate(None, None, strength=hof.get_best().str_map, **apply_params)
best = hof.get_best()
print("Strength legend: [CGP, SB, DT, DB, DS]")
print(
f'ALL TIME BEST: fitness={best.fitness.values[0]}; '\
f'str=[{",".join(map(str, best.str_map.values()))}]'
)
print(best.str_map)
for sol in solutions:
print(f'[{sol.workload.name}]({sol.result.opt_goal}); Time ratio: {sol.result.time_ratio:.8f}; Data ratio: {sol.result.data_ratio:.8f}')
sol.result.print_effect()
return hof.get_best()
finally:
# Make sure we remove the created FitnessMax class when multiple algorithms
# are run back to back in one session
del creator.FitnessMax
def opt_es_comma(
solutions: sol.SolutionSet,
evo_params: EvolutionParameters,
apply_func: sol.ApplySignature,
fitness_func: sol.FitnessSignature,
workers: int,
**apply_params: Union[int, float]
) -> ecu.OptIndividual:
""" TODO: doc
"""
evo_params.update_defaults({
'pop_lambda': evo_params.pop * 2
})
hof = ecu.BestTracker()
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
str_limits = solutions.get_strength_limits()
sol_count = len(solutions)
opt_goal = next(iter(solutions)).result.opt_goal
try:
# Create an evaluator context that handles the multiprocess / single process evaluation
with evaluator.Evaluator(workers, solutions, apply_func, fitness_func) as ev:
# Initialize the population
population = [
ecu.OptIndividual(str_limits, creator.FitnessMax())
for _ in range(evo_params.pop)
]
# Evaluate the initial population
for ind in population:
# Make it tuple of one element
ind.fitness.values = ev.evaluate(
None, None, strength=ind.str_map, **apply_params
)[0],
hof.update(population)
for g in range(evo_params.gen):
gen_best = ecu.BestTracker()
# Randomly select 'lambda' parents and deepcopy them => offsprings
offsprings: List[ecu.OptIndividual] = list(
map(copy.deepcopy, random.choices(population, k=evo_params.pop_lambda))
)
# Mutate some of the offsprings
for o in offsprings:
ecu.mutate_strength(o)
# Recalculate the fitness for offsprings that have changed
for ind in [o for o in offsprings if not o.fitness.valid]:
ind.fitness.values = ev.evaluate(
None, None, strength=ind.str_map, **apply_params
)[0],
# Select 'mu' best individuals and make them the new population
population[:] = tools.selBest(offsprings, evo_params.pop)
hof.update(population)
gen_best.update(population)
best = gen_best.get_best()
ev.evaluate(None, None, strength=best.str_map, **apply_params)
# Gen, goal, fitness, data_ratio, time_ratio
r_time, r_data = 0.0, 0.0
for sol in solutions:
r_time += sol.result.time_ratio
r_data += sol.result.data_ratio
glob_logger.add_record(
(g, opt_goal, best.fitness.values[0], best.get_str(), r_data / sol_count, r_time / sol_count)
)
# print(
# f'Gen {g}: fitness={best.fitness.values[0]}; '\
# f'str=[{",".join(map(str, best.str_map.values()))}]'
# )
# Re-evaluate the solutions to contain the all-time best results
ev.evaluate(None, None, strength=hof.get_best().str_map, **apply_params)
best = hof.get_best()
print("Strength legend: [CGP, SB, DT, DB, DS]")
print(
f'ALL TIME BEST: fitness={best.fitness.values[0]}; '\
f'str=[{",".join(map(str, best.str_map.values()))}]'
)
print(best.str_map)
for sol in solutions:
print(f'[{sol.workload.name}]({sol.result.opt_goal}); Time ratio: {sol.result.time_ratio:.8f}; Data ratio: {sol.result.data_ratio:.8f}')
sol.result.print_effect()
return hof.get_best()
finally:
# Make sure we remove the created FitnessMax class when multiple algorithms
# are run back to back in one session
del creator.FitnessMax
def basic_ea(
solutions: sol.SolutionSet,
evo_params: EvolutionParameters,
apply_func: sol.ApplySignature,
fitness_func: sol.FitnessSignature,
workers: int,
**apply_params: Union[int, float]
) -> Tuple[float, float]:
""" Evolutionary algorithm for solving the 'basic' variants of the initial sampling function
where we tune only the 'base' parameter.
Heavily inspired by 'https://deap.readthedocs.io/en/master/overview.html'.
:param solutions: a collection of solutions, one for each workload being solved
:param evo_params: the supplied parameters for the evolution process
:param apply_func: function to use for genotype -> fenotype mapping
:param fitness_func: function to use for fitness evaluation
:param workers: number of worker processes
:param apply_params: additional parameters for the apply function
:return: the best individual and its fitness value
"""
# First make sure that we have all the parameters we need
evo_params.update_defaults({
'attr_low': 0.0, 'attr_high': 100.0,
'cx_eta': 2.0,
'mut_eta': 2.0, 'mut_mu': 1, 'mut_sigma': 5,
'tourn_size': 3
})
# Store the all-time best individual
hof = tools.HallOfFame(1)
# Create maximization fitness and an individual class
creator.create('FitnessMax', base.Fitness, weights=(1.0,))
creator.create('Individual', list, fitness=creator.FitnessMax)
try:
# Create an evaluator context that handles the multiprocess / single process evaluation
with evaluator.Evaluator(workers, solutions, apply_func, fitness_func) as ev:
# Set the individual and population initializers
toolbox = base.Toolbox()
toolbox.register(
'attribute',
random.uniform, evo_params.attr_low, evo_params.attr_high
)
toolbox.register(
'individual',
tools.initRepeat, creator.Individual, toolbox.attribute, n=1
)
toolbox.register(
'population',
tools.initRepeat, list, toolbox.individual
)
# Set the evolution operators: crossover, mutation, selection
# The evaluation will be performed by the evaluator
toolbox.register(
'mate',
tools.cxSimulatedBinary, eta=evo_params.cx_eta
)
toolbox.register(
'mutate',
tools.mutGaussian, mu=evo_params.mut_mu, sigma=evo_params.mut_sigma,
indpb=evo_params.pmut
)
toolbox.register(
'select',
tools.selTournament, tournsize=evo_params.tourn_size, k=evo_params.pop - 1
)
# Evaluate fitness of the initial random population
pop = toolbox.population(evo_params.pop)
for ind in pop:
ind.fitness.values = tuple(ev.evaluate(None, None, base=ind[0], **apply_params))
hof.update(pop)
# Run all the generations
for g in range(evo_params.gen):
print(f'Generation: {g}')
# Create new offsprings, always include the all-time best solution
# The cloning is necessary since crossover and mutations work in-situ
offsprings = list(map(toolbox.clone, toolbox.select(pop))) + [toolbox.clone(hof[0])]
# Perform the crossover (mating) among the offsprings
for o1, o2 in zip(offsprings[::2], offsprings[1::2]):
if random.random() < evo_params.pcx:
toolbox.mate(o1, o2)
del o1.fitness.values
del o2.fitness.values
# Additionally mutate some of the new offsprings
for mutant in offsprings:
toolbox.mutate(mutant)
del mutant.fitness.values
# Recalculate the fitness for the modified offsprings (mating, mutation)
for ind in [o for o in offsprings if not o.fitness.valid]:
ind.fitness.values = tuple(ev.evaluate(None, None, base=ind[0], **apply_params))
# Update the population and all-time best
pop[:] = offsprings
hof.update(pop)
# Re-evaluate the solutions to contain the all-time best results
ev.evaluate(None, None, base=hof[0][0], **apply_params)
return hof[0][0], hof[0].fitness.values[0]
finally:
# Make sure we remove the created Individual and Fitness classes when multiple algorithms
# are run back to back in one session
del creator.Individual
del creator.FitnessMax
| JiriPavela/perun-optimization-evolution | src/evolution/ec.py | ec.py | py | 19,236 | python | en | code | 0 | github-code | 36 |
16722490719 | import sys
if __name__ == "__main__":
fname = sys.argv[1]
with open(fname, 'r') as file:
number = 0
for l in file:
num_str = l.strip().split(' ')
number += int(num_str[0])
print("Total point number: %d", number + 20) | Enigmatisms/LiDARSim2D | py/point_num.py | point_num.py | py | 273 | python | en | code | 23 | github-code | 36 |
74476200743 |
def encontrar_chave(dicionario, valor_procurado):
for chave, valor in dicionario.items():
if valor == valor_procurado:
return chave
month = int(input())
months_dict = {
'January': 1,
'February': 2,
'March': 3,
'April': 4,
'May': 5,
'June': 6,
'July': 7,
'August': 8,
'September': 9,
'October': 10,
'November': 11,
'December': 12
}
chave_encontrada = encontrar_chave(months_dict,month)
print(chave_encontrada)
| luis-sardinha/desafio-python-DIO | desafio-twitter/desafio_mes.py | desafio_mes.py | py | 513 | python | es | code | 0 | github-code | 36 |
35436780435 | import urllib.request
from bs4 import BeautifulSoup
url = 'http://127.0.0.1:8000/'
res = urllib.request.urlopen(url)
data = res.read()
html = data.decode("utf-8")
soup = BeautifulSoup(html, 'html.parser')
print(soup)
h1 = soup.html.body.h1
print('h1:', h1.string) | SeungYeopB/bigdata | crawling1/sample01.py | sample01.py | py | 266 | python | en | code | 0 | github-code | 36 |
24623808952 | import matplotlib.pyplot as plt
from generator import Generator
import torch
from discriminator import Discriminator
import torch.nn as nn
import utils
import torch.utils.data as data
G = Generator()
input_z = torch.randn(1, 20)
input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
fake_image = G(input_z)
D = Discriminator()
G.apply(utils.weights_init)
D.apply(utils.weights_init)
print('ネットワーク初期化完了')
train_img_list = utils.make_datapath_list()
mean = (0.5,)
std = (0.5,)
train_dataset = utils.GAN_Img_Dataset(filelist=train_img_list, transform=utils.ImageTransform(mean, std))
batch_size = 64
train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
num_epochs = 200
G_updated, D_updated = utils.train_model(G, D, dataloader=train_dataloader, num_epochs=num_epochs)
device = torch.device('cuda:0')
batch_size = 8
z_dim = 20
fixed_z = torch.randn(batch_size, z_dim)
fixed_z = fixed_z.view(fixed_z.size(0), fixed_z.size(1), 1, 1)
# 訓練したジェネレータで画像を生成
fake_image = G_updated(fixed_z.to(device))
# 訓練データを取得
batch_iterator = iter(train_dataloader)
imgs = next(batch_iterator)
fig = plt.figure(figsize=(15, 6))
for i in range(0, 5):
plt.subplot(2, 5, i+1)
plt.imshow(imgs[i][0].cpu().detach().numpy(), 'gray')
plt.subplot(2, 5, 5+i+1)
plt.imshow(fake_image[i][0].cpu().detach().numpy(), 'gray')
plt.show()
| TOnodera/pytorch-advanced | gan/main.py | main.py | py | 1,442 | python | en | code | 0 | github-code | 36 |
20665600389 | from ast import arg
from brownie import (
accounts,
config,
network
)
import eth_utils
LOCAL_BLOCKCHAIN_ENVIRONMENTS=["development", "ganache-local"]
FORKED_LOCAL_ENVIRONMENTS=["mainnet-fork-dev"]
OPENSEA_URL = "https://testnets.opensea.io/assets/{}/{}"
def get_account_v2(index=None, id=None):
if (index):
return accounts[index]
if (id):
return accounts.load(id)
if (network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS
or network.show_active() in FORKED_LOCAL_ENVIRONMENTS):
return accounts[0]
else:
return accounts.add(config["wallets"]["from_key"])
# initializer = box.store, 1
def encode_function_data(initializer=None, *args):
"""Encodes the function call so we can work with an initializer.
Args:
initializer ([brownie.network.contract.ContractTx], optional):
The initializer function we want to call. Example: `box.store`.
Defaults to None.
args (Any, optional):
The arguments to pass to the initializer function
Returns:
[bytes]: Return the encoded bytes.
"""
if not len(args): args = b''
if initializer: return initializer.encode_input(*args)
return b''
def upgrade(
account,
proxy,
newimplementation_address,
proxy_admin_contract=None,
initializer=None,
*args
):
transaction = None
if proxy_admin_contract:
if initializer:
encoded_function_call = encode_function_data(initializer, *args)
transaction = proxy_admin_contract.upgradeAndCall(
proxy.address,
newimplementation_address,
encoded_function_call,
{"from": account},
)
transaction.wait(1)
else:
transaction = proxy_admin_contract.upgrade(
proxy.address, newimplementation_address, {"from": account}
)
transaction.wait(1)
else:
# use proxy contract
if initializer:
encoded_function_call = encode_function_data(initializer, *args)
transaction = proxy.upgradeToAndCall(
newimplementation_address, encoded_function_call, {"from": account}
)
else:
transaction = proxy.upgradeTo(newimplementation_address, {"from": account})
return
| ckt22/upgradeable-contract-template | scripts/helpful_scripts.py | helpful_scripts.py | py | 2,352 | python | en | code | 0 | github-code | 36 |
33403799483 | import os.path
import bitarray
# Класс создан для хранения предыдущего блока и ксора переданного с предудущим для последующего сохранения
class CBCEncrypter:
def __init__(self, init_key: bitarray.bitarray) -> None:
super().__init__()
# Ключ инициализации
self.key = init_key
self.prev_block: bitarray.bitarray = None
def save_block(self, block):
self.prev_block = block
def xor_block(self, block: bitarray.bitarray):
if self.prev_block is None:
return block ^ self.key
else:
res = block ^ self.prev_block
self.prev_block = block
return res
class CBCDecrypter:
def __init__(self, init_key: bitarray.bitarray) -> None:
super().__init__()
# Ключ инициализации
self.key = init_key
self.prev_block: bitarray.bitarray = None
self.lazy_block = None
def save_lazy_block(self, block):
self.lazy_block = block
def xor_block(self, block: bitarray.bitarray):
if self.prev_block is None:
self.prev_block = self.lazy_block
return block ^ self.key
else:
res = block ^ self.prev_block
self.prev_block = self.lazy_block
return res
def read_file_to_bit_arr(filename: str) -> bitarray:
"""
Считывание файла в битовый массив
:param filename: имя файла
:return: битовый массив текста с файла
"""
filesize = os.path.getsize(filename)
with open(filename, 'rb') as f:
bit_arr = bitarray.bitarray(1)
bit_arr.fromfile(f, filesize)
return bit_arr[1:]
pass
def write_bit_arr_to_file(crypto_msg, filename='output.crypto'):
with open(filename, 'wb') as f:
f.write(crypto_msg)
def read_key(filename: str) -> bitarray:
"""
Считывание ключа из файла. Ожидается, что ключ 64 битный (8 байт)
:param filename: имя файла
:return: битовый массив ключа
"""
filesize = os.path.getsize(filename)
with open(filename, 'rb') as f:
bit_arr = bitarray.bitarray(1)
bit_arr.fromfile(f, filesize)
return bit_arr[1:]
def ror(value: bitarray, shift: int) -> bitarray:
"""
Циклический битовый сдвиг вправо
:param value: битовый массив
:param shift: значение сдвига
:return: измененный битовый массив
"""
shift = shift % len(value)
right = value[:shift]
left = value[:-shift]
return right + left
pass
def rol(value: bitarray, shift: int) -> bitarray:
"""
Циклический битовый сдвиг влево
:param value: битовый массив
:param shift: значение сдвига
:return: измененный битовый массив
"""
shift = shift % len(value)
left = value[:shift]
right = value[shift:]
return right + left
pass
def gen_key_vector(secret_key: bitarray, round_col: int):
res = []
for i in range(round_col):
step1 = rol(secret_key, i * 3)
step2 = bitarray.bitarray(32)
pointer = 0
for i, elem in enumerate(step1):
if i % 2 == 1:
step2[pointer] = elem
pointer += 1
step3 = step2[16:]
res.append(step3)
return res
pass
def encrypt(msg, init_key, iv, round_cols=1, block_size=64, minimum_bits_block=16):
"""
Функция шифрования сообщиения
:param minimum_bits_block: размер минимального блока для шифрования (кратно 8)
:param block_size: размер блока для шифрования (кратно minimum_bytes_block)
:param msg: сообщение для шифровки
:param init_key: стартовый ключ для шифования
:param round_cols: количество раундов шифрования
:return: зашифрованное сообщение
"""
# Добивка последнего блока до 64 битов
if minimum_bits_block % 8 != 0 or block_size % minimum_bits_block != 0:
raise Exception("Неверные размеры блоков для шифрования!!!")
tail_cell_size = len(msg) % block_size
if tail_cell_size != 0:
msg += '0' * (block_size - tail_cell_size)
block_col = len(msg) // block_size
crypto_msg = bitarray.bitarray(0)
key_vec = gen_key_vector(init_key, round_cols)
cbc = CBCEncrypter(iv)
for round_num in range(round_cols):
for block_num in range(block_col):
start = block_num * block_size
end = block_num * block_size + block_size
block = msg[start:end]
# Реализация CBC
block = cbc.xor_block(block)
# Блок, разбитый на 4 подблока
blocks = [block[i * minimum_bits_block: i * minimum_bits_block + minimum_bits_block] for i in range(4)]
del block
res_block = [None for i in range(4)]
res_block[0] = blocks[1]
res_block[1] = blocks[2] ^ blocks[0]
res_block[2] = ((blocks[1] ^ key_vec[round_num]) ^ blocks[3]) ^ (blocks[2] ^ blocks[0])
res_block[3] = blocks[0]
res = bitarray.bitarray(0)
for r in res_block:
res += r
cbc.save_block(res)
crypto_msg += res
msg = crypto_msg
return msg
def decrypt(crypto_msg, init_key, iv, round_cols=1, block_size=64, minimum_bits_block=16, clear_output=True):
"""
:param crypto_msg: зашифрованное сообщение
:param init_key: начальный ключ инициализации
:param round_cols: количество раундов
:param block_size: размер блока для шифрования (кратен minimum_bits_block)
:param minimum_bits_block: минимальный блок кодирования (кратен 8)
:param clear_output: очищает вывод от NULL байтов (есть возможность выключить, если сообщение их намеренно содежит)
:return:
"""
if minimum_bits_block % 8 != 0 or block_size % minimum_bits_block != 0:
raise Exception("Неверные размеры блоков для шифрования!!!")
tail_cell_size = len(crypto_msg) % block_size
if tail_cell_size != 0:
crypto_msg += '0' * (block_size - tail_cell_size)
block_col = len(crypto_msg) // block_size
decrypt_msg = bitarray.bitarray(0)
key_vec = gen_key_vector(init_key, round_cols)
cbc = CBCDecrypter(iv)
for round_num in range(round_cols):
for block_num in range(block_col):
start = block_num * block_size
end = block_num * block_size + block_size
block = crypto_msg[start:end]
cbc.save_lazy_block(block)
# Блок, разбитый на 4 подблока
blocks = [block[i * minimum_bits_block: i * minimum_bits_block + minimum_bits_block] for i in range(4)]
res_block = [None for i in range(4)]
res_block[0] = blocks[3]
res_block[1] = blocks[0]
res_block[2] = blocks[1] ^ blocks[3]
res_block[3] = (blocks[2] ^ blocks[1]) ^ (key_vec[round_num] ^ blocks[0])
res = bitarray.bitarray(0)
for r in res_block:
res += r
res = cbc.xor_block(res)
decrypt_msg += res
crypto_msg = decrypt_msg
decrypt_msg = bitarray.bitarray(0)
decrypt_msg = [crypto_msg[i * 8: i * 8 + 8] for i in range(len(crypto_msg) // 8)]
NULL = bitarray.bitarray('0' * 8)
while bitarray.bitarray(NULL) in decrypt_msg:
decrypt_msg.remove(NULL)
crypto_msg = bitarray.bitarray(0)
for partition in decrypt_msg:
crypto_msg += partition
return crypto_msg
pass
def get_iv(data: str):
iv = bitarray.bitarray(1)
iv.frombytes(data.encode())
return iv[1:]
if __name__ == '__main__':
f = open('iv.txt')
iv_v = f.readline()
iv_data = get_iv(iv_v)
key = read_key('./key.txt')
crypto = read_file_to_bit_arr('./flag.bin')
new_text = decrypt(crypto, key, iv_data)
print("Шифротекст: ", new_text)
write_bit_arr_to_file(new_text, 'decrypt_message.txt')
pass
| remoppou/CTF | crypto/Feistel-song/solution/solve.py | solve.py | py | 8,778 | python | ru | code | 0 | github-code | 36 |
4130598030 | #By Alexandros Panagiotakopoulos - alexandrospanag.github.io
class Lifecycle:
x = 0
name = ''
def __init__(self, nam): #constructor example
self.name = nam
print(self.name,'constructed')
def party(self): #objects constructed counter example
self.x = self.x + 1
print(self.name,'total counter',self.x)
#def __del___(self): //optional code for destructor
#print("I am destructed", self.x)
a = Lifecycle('Object1')
b = Lifecycle('Object2')
a.party()
b.party()
a.party() | AlexandrosPanag/My_Python_Projects | Object-Oriented Programming (OOP)/Object Lifecycle/Object Lifecycle.py | Object Lifecycle.py | py | 554 | python | en | code | 1 | github-code | 36 |
30478421727 | import pprint
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_ranking as tfr
import tensorflow_recommenders as tfrs
import collections
def _create_feature_dict():
"""Helper function for creating an empty feature dict for defaultdict."""
return {"embeddings": [], "ranking": []}
def _sample_list(
feature_lists,
num_examples_per_list,
random_state,
):
"""Function for sampling a list example from given feature lists."""
if random_state is None:
random_state = np.random.RandomState()
sampled_indices = random_state.choice(
range(len(feature_lists["embeddings"])),
size=num_examples_per_list,
replace=False,
)
sampled_embeddings = [
feature_lists["embeddings"][idx] for idx in sampled_indices
]
sampled_rankings = [
feature_lists["ranking"][idx]
for idx in sampled_indices
]
return (
sampled_embeddings,
tf.concat(sampled_rankings, 0),
)
def sample_listwise(
ranking_dataset,
num_list_per_cc,
num_examples_per_list,
seed,
):
"""Function for converting the rankings dataset to a listwise dataset.
Args:
ranking_dataset:
The training dataset with [CC,embeddinga,rank] for the specified time period
num_list_per_cc:
An integer representing the number of lists that should be sampled for
each cc in the training dataset.
num_examples_per_list:
An integer representing the number of store ranks to be sampled for each list
from the list of stores ranked "by" the cc. Like a user ranking movies.
seed:
An integer for creating `np.random.RandomState.
Returns:
A tf.data.Dataset containing list examples.
Each example contains three keys: "cc_id", "embeddings", and
"ranking". "cc_id" maps to a integer tensor that represents the
cc_id for the example. "embeddings" maps to a tensor of shape
[sum(num_example_per_list)] with dtype tf.Tensor. It represents the list
of store,cc embedding descriptions. "ranking" maps to a tensor of shape
[sum(num_example_per_list)] with dtype tf.float32. It represents the
ranking of each store attached to the cc_id in the candidate list.
"""
random_state = np.random.RandomState(seed)
example_lists_by_cc = collections.defaultdict(_create_feature_dict)
for example in ranking_dataset:
user_id = example["cc_id"].numpy()
example_lists_by_cc[user_id]["embeddings"].append(
example["embeddings"])
example_lists_by_cc[user_id]["ranking"].append(
example["ranking"])
tensor_slices = {"cc_id": [], "embeddings": [], "ranking": []}
for cc_id, feature_lists in example_lists_by_cc.items():
for _ in range(num_list_per_cc):
# Drop the user if they don't have enough ratings.
if len(feature_lists["embeddings"]) < num_examples_per_list:
continue
sampled_embeddings, sampled_rankings = _sample_list(
feature_lists,
num_examples_per_list,
random_state=random_state,
)
tensor_slices["cc_id"].append(cc_id)
tensor_slices["embeddings"].append(sampled_embeddings)
tensor_slices["ranking"].append(sampled_rankings)
return tf.data.Dataset.from_tensor_slices(tensor_slices) | colinfritz-ai/GAP_Recommender_System_MVP | GAP_Recommender_System_Utilities.py | GAP_Recommender_System_Utilities.py | py | 3,307 | python | en | code | 0 | github-code | 36 |
26444005522 | #定义地瓜类
class SweetPotato:
#定义初始化方法
def __init__(self):
self.cookedLevel=0
self.cookedString="生的"
self.condiments=[]
def __str__(self):
msg = "您的地瓜已经处于 " + self.cookedString + "的状态"
# if len(self.condiments)>0:
# msg = msg + " ,添加的佐料为:"
# for temp in self.condiments:
# msg = msg + temp + ", "
# msg = msg.strip(", ")
# return msg
# msg=self.cookedString+" 地瓜"
if len(self.condiments)>0:
msg = msg + " ,添加的佐料为:"
for temp in self.condiments:
msg = msg + temp + ", "
msg = msg.strip(", ")
msg=msg+")"
return msg
# return "现在地瓜状态是%s,加的作料有%s"%(self.cookedString,self.condiments)
#烤地瓜方法
def cook(self,time):
self.cookedLevel+=time
if self.cookedLevel>8:
self.cookString="烤成灰了"
elif self.cookedLevel>5:
self.cookString="烤好了"
elif self.cookedLevel>3:
self.cookString="半生不熟"
else:
self.cookString="生的"
#添加配料
def addCondiments(self,condiments):
self.condiments.append(condiments)
mySweetPotato=SweetPotato()
# mySweetPotato.cook(4)
# print(mySweetPotato.cookedLevel)
# print(mySweetPotato.cookString)
# print(mySweetPotato.coodiments)
print("------有了一个地瓜,还没有烤------")
print(mySweetPotato.cookedLevel)
print(mySweetPotato.cookedString)
print(mySweetPotato.condiments)
print("------接下来要进行烤地瓜了------")
print("------地瓜已经烤了4分钟了------")
mySweetPotato.cook(4)#靠4分钟
print(mySweetPotato)
print("------地瓜已经烤了3分钟了------")
mySweetPotato.cook(3)#靠3分钟
print(mySweetPotato)
print("------接下来要添加配料-番茄酱------")
mySweetPotato.addCondiments("番茄酱")
print(mySweetPotato)
print("------地瓜已经烤了5分钟了------")
mySweetPotato.cook(5)#又烤了5分钟
print(mySweetPotato)
print("------接下来要添加配料-芥末酱------")
mySweetPotato.addCondiments('芥末酱')
print(mySweetPotato)
| sjr125697/PythonBasicDemo | 烤地瓜.py | 烤地瓜.py | py | 2,262 | python | en | code | 1 | github-code | 36 |
14300436310 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 20 20:16:15 2021
@author: RISHBANS
"""
import pandas as pd
mnist_data = pd.read_csv("mnist-train.csv")
features = mnist_data.columns[1:]
X = mnist_data[features]
y = mnist_data['label']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X/255, y, test_size =0.15, random_state = 0)
import numpy as np
from keras.utils import np_utils
print(np.unique(y_train, return_counts = True))
n_classes = 10
y_train = np_utils.to_categorical(y_train, n_classes)
y_test = np_utils.to_categorical(y_test, n_classes)
from keras.models import Sequential
from keras.layers import Dense, Dropout
mnist_nn = Sequential()
#Hidden Layer
mnist_nn.add(Dense(units = 100, kernel_initializer='uniform', activation = 'relu', input_dim=784))
mnist_nn.add(Dropout(0.2))
mnist_nn.add(Dense(units = 10, kernel_initializer='uniform', activation = 'softmax'))
mnist_nn.compile(optimizer = 'adam', loss = 'categorical_crossentropy',metrics = ['accuracy'])
mnist_nn.fit(X_train, y_train, batch_size = 64, epochs = 20, validation_data = (X_test, y_test))
y_pred = mnist_nn.predict(X_test)
y_pred = ( y_pred > 0.9)
| edyoda/ML-with-Rishi | mnist_nn.py | mnist_nn.py | py | 1,205 | python | en | code | 4 | github-code | 36 |
30160636048 | import pytest
from django.urls import reverse
from mixer.backend.django import mixer
pytestmark = [pytest.mark.django_db]
def test_get_user_list(api_client):
"""Получение списка пользователей."""
url = reverse('users')
response = api_client.get(url)
assert response.status_code == 200
def test_new_user_in_list(api_client):
"""Появление нового пользователя."""
url = reverse('users')
response = api_client.get(url)
mixer.blend('users.User')
new_response = api_client.get(url)
assert response.content != new_response.content
| X-Viktor/FLStudy | users/tests/api/test_users.py | test_users.py | py | 627 | python | en | code | 1 | github-code | 36 |
35685627050 | """ Classes for basic manipulation of GraphNet """
import numpy as np
import tensorflow as tf
def _copy_any_ds(val):
"""
Copy semantics for different datatypes accepted.
This affects what happens when copying nodes, edges and graphs.
In order to trace gradients,
and defines a consistent interface regardless of the input data-structure.
"""
valout = val
if isinstance(val , np.ndarray) or isinstance(val, list):
valout = val.copy()
if isinstance(val, tf.Variable) or isinstance(val,tf.Tensor):
valout = tf.identity(val) # TODO: maybe have a flag to override this? Adding more ops does not always make sense.
return valout
class Node:
def __init__(self, node_attr_tensor):
if len(node_attr_tensor.shape) <2:
raise ValueError("The shape of the input for nodes and edges should have at least 2 dimensions!")
self.node_attr_tensor = node_attr_tensor
self.incoming_edges = [];
self.shape = self.node_attr_tensor.shape
def get_state(self):
return self.node_attr_tensor
def set_tensor(self, tensor):
self.node_attr_tensor = tensor
self.shape = self.shape = tensor.shape
def copy(self):
return Node(_copy_any_ds(self.node_attr_tensor))
def __add__(self, n):
return Node(self.node_attr_tensor + n.node_attr_tensor)
def __sub__(self, n):
return Node(self.node_attr_tensor - n.node_attr_tensor)
class Edge:
def __init__(self, edge_attr_tensor, node_from, node_to):
self.edge_tensor = edge_attr_tensor
self.node_from = node_from
self.node_to = node_to
self.shape = self.edge_tensor.shape
# Keep a reference to this edge since it is needed for aggregation afterwards.
node_to.incoming_edges.append(self)
def set_tensor(self, edge_tensor):
self.edge_tensor = edge_tensor
self.shape = edge_tensor.shape
def copy(self, nodes_correspondence):
edge_tensor = _copy_any_ds(self.edge_tensor)
node_from = nodes_correspondence[self.node_from]
node_to = nodes_correspondence[self.node_to]
return Edge(edge_tensor, node_from, node_to)
def __add__(self, edge):
Exception("Edge addition is not implemented! This is due to potentially unclear semantics. Perform this manually.")
class Graph:
def __init__(self, nodes, edges, global_attr = None,NO_VALIDATION=True):
"""
Creates a graph from a set of edges and nodes
"""
self.nodes = nodes
self.edges = edges
self.global_attr = global_attr
self.has_global = self.global_attr is not None
if not NO_VALIDATION:
self.validate_graph()
def is_equal_by_value(self,g2):
"""
Checks if the graphs have the same values for node and edge attributes
"""
is_equal = True
for n1,n2 in zip(self.nodes, g2.nodes):
is_equal = is_equal and tf.reduce_all(n1.node_attr_tensor == n2.node_attr_tensor)
for e1, e2 in zip(self.edges, g2.edges):
is_equal = is_equal and tf.reduce_all(e1.edge_tensor== e2.edge_tensor)
if self.has_global:
is_equal = is_equal and (g2.global_attr == self.global_attr)
return bool(is_equal)
def compare_connectivity(self,g2):
"""
Checks if the connectivity of two graphs is the same.
"""
g1 = self
nodes_from_match = [(g1.nodes.index(e1.node_from) == g2.nodes.index(e2.node_from)) for e1,e2 in zip(g1.edges,g2.edges)]
nodes_to_match = [(g1.nodes.index(e1.node_to) == g2.nodes.index(e2.node_to)) for e1,e2 in zip(g1.edges,g2.edges)]
all_matching = True
for matches in [*nodes_from_match, *nodes_to_match]:
all_matching = all_matching and matches
return all_matching
@staticmethod
def validate_graph(self):
# validate that the edges are all
for e in self.edges:
if ((e.node_from in self.nodes)):
raise AssertionError("The source node {nn} for edge {ee} is not in the graph!".format(nn = e.node_from, ee = e))
if (e.node_to in self.nodes):
raise AssertionError("The destination node {nn} for edge {ee} is not in the graph!".format(nn = e.node_to, ee = e))
def copy(self):
# copy attributes of nodes and edges and re-create graph connectivity:
nodes_coppied = [n.copy() for n in self.nodes]
nodes_correspondence = {s:c for s , c in zip(self.nodes,nodes_coppied)}
# Instantiate the new edges:
coppied_edge_instances = []
for e in self.edges:
enew = e.copy(nodes_correspondence)
coppied_edge_instances.append(enew)
return Graph(nodes_coppied, coppied_edge_instances)
def get_subgraph_from_nodes(self, nodes, edge_trimming_mode = "+from+to"):
"""
Node should belong to graph. Creates a new graph with coppied edge and
node properties, defined from a sub-graph of the original graph.
parameters:
self (type = Graph): the graph we want a sub-graph from
nodes: the nodes of the graph we want the subgraph of.
mode: "+from+to" - keep an edge if there is a "from" node or a "to" node at that edge (and the corresponding node)
"-from-to" - keep an edge if there is NOT a "from" node and NOT a "to" node at that edge (and the corresponding node)
"+from" - keep an edge only if it has a "from" node that coincides with any of the nodes in the list (not implemented)
"+to" - keep an edge only if it has a "to" node that coincides with any of the nodes in the list (not implemented)
"-from" - keep an edge only if it DOESN't have a "from" node that concides with any of the nodes in the list (not implemented)
"""
def check_edge_trimming_condition(e_):
if edge_trimming_mode == "+from+to":
return (e.node_from in nodes) and (e.node_to in nodes)
if edge_trimming_mode == "-from+to":
return (e.node_from not in nodes) and (e.node_to not in nodes)
sg_nodes_copy = [n.copy() for n in nodes]
original_copy_nodes_correspondence = {n:nc for n, nc in zip(nodes, sg_nodes_copy)}
sg_edges_copy = [];
if len(self.edges) > 0:
for e in self.edges:
if check_edge_trimming_condition(e):
sg_edges_copy.append(e.copy(original_copy_nodes_correspondence))
g = Graph(sg_nodes_copy, sg_edges_copy)
return g
def __add__(self, graph):
"""
This should only work with graphs that have compatible node and edge features
Assumed also that the two graphs have the same connectivity (otherwise this will fail ugly)
"""
nodes = [nself + n for nself,n in zip(self.nodes,graph.nodes)]
correspondence = {s:t for s, t in zip(self.nodes,nodes)}
added_edges = [];
for eself,e in zip(self.edges, graph.edges):
enew = Edge(eself.edge_tensor + e.edge_tensor,
correspondence[eself.node_from],
correspondence[eself.node_to])
added_edges.append(enew);
return Graph(nodes, added_edges)
def make_graph_tuple_from_graph_list(list_of_graphs):
"""
Takes in a list of graphs (with consistent sizes - not checked)
and creates a graph tuple (input tensors + some book keeping)
Because there is some initial functionality I don't want to throw away currently, that implements special treatment for nodes and edges
coming from graphs with the same topology, it is currently required that the first dimension of nodes and edges
for the list of graphs that are entered in this function is always 1 (this dimension is the batch dimension in the previous implementation.)
"""
# check the first dimension is 1 - instruct to split graphs if not.
problematic_graphs = []
# TODO: Support splitting a list of same graphs with the first dimension of node and edge
# features different than one and constructing a GraphTuple. Currently the first
# dimension is required to be "1" (but squeezed later on!)
for g_index,g in enumerate(list_of_graphs):
problem = ''
all_sizes_same = True
if g.nodes[0].get_state().shape[0] != 1:
problem += 'First size of node attributes should be 1 - found %i '%g.edges[0].get_state().shape[0]
if g.edges[0].edge_tensor.shape[0] != 1:
problem += 'First size of node attributes should be 1 - found %i '%g.edges[0].get_state().shape[0]
# graph_id = [id_ for id_, dummy in enumerate(list_of_graphs)]
all_edges, all_nodes, n_nodes,n_edges =[[],[],[],[]]
for g in list_of_graphs:
all_edges.extend(g.edges)
all_nodes.extend(g.nodes)
n_nodes.append(len(g.nodes))
n_edges.append(len(g.edges))
edge_attr_tensor, nodes_attr_tensor, senders, receivers = [[],[],[],[]];
for e in all_edges:
edge_attr_tensor.append(e.edge_tensor)
senders.append(all_nodes.index(e.node_from))
receivers.append(all_nodes.index(e.node_to))
for n in all_nodes:
nodes_attr_tensor.append(n.node_attr_tensor)
# The 2nd dimension (dimension index 1) should be of size 1 (there is a test in the start of the constructor).
# The same framework supports efficient computation on graphs of the same topology batched together where the first dimension
# is the batched size. It is required that such graphs were provided for the construction (or at least the first dimension is "1").
edges_attr_stacked = tf.squeeze(tf.stack(edge_attr_tensor,0),1)
nodes_attr_stacked = tf.squeeze(tf.stack(nodes_attr_tensor,0),1)
return GraphTuple(nodes_attr_stacked, edges_attr_stacked,senders, receivers, n_nodes, n_edges)# , graph_id)
class GraphTuple:
def __init__(self, nodes, edges,senders,receivers, n_nodes, n_edges, global_attr = None,sort_receivers_to_edges = False , global_reps_for_nodes = None, global_reps_for_edges = None, n_graphs = None):
"""
A graph tuple contains multiple graphs for faster batched computation.
parameters:
nodes : a `tf.Tensor` containing all the node attributes
edges : a `tf.Tensor` containing all the edge attributes
senders : a list of sender node indices defining the graph connectivity. The indices are unique accross graphs
receivers : a list of receiver node indices defining the graph connectivity. The indices are unique accross graphs
n_nodes : a list, a numpy array or a tf.Tensor containing how many nodes are in each graph represented by the nodes and edges in the object
n_edges : a list,a numpy array or a tf.Tensor containing how many edges are in each graph represented by the nodes and edges in the object
global_attr: (optional) a `tf.Tensor` or a `np.array` containing global attributes (first size - self.n_graphs)
sort_receivers : (optional) whether to sort the edges on construction, allowing for not needing to sort the output of the node receiver aggregators.
global_reps_for_edges : (optional) used for the aggregation of the global var.
global_reps_for_nodes : (optional) used for the aggregation of the global var.
n_graphs : (optional)
"""
# Sort edges according to receivers and sort receivers:
assert(len(n_nodes) == len(n_edges))
self.nodes = nodes # floats tensor
self.edges = edges # floats tensor
self.senders = senders # integers
self.receivers = receivers # integers
self.n_nodes = n_nodes # integers
self.n_edges = n_edges # integers
if n_graphs is None:
self.n_graphs = len(self.n_nodes) # assuming the n_nodes is a list containing the number of nodes for each graph.
self.global_attr = global_attr
self.has_global = self.global_attr is not None
graph_indices_nodes = []
for k_,k in enumerate(self.n_nodes):
graph_indices_nodes.extend(np.ones(k).astype("int")*k_)
graph_indices_edges = []
for k_,k in enumerate(self.n_edges):
graph_indices_edges.extend(np.ones(k).astype("int")*k_)
if self.has_global: # <- default global is "None". If it was provided, set the global variable (together with some aggregator indices for convenience and performance).
self.assign_global(global_attr)
self.graph_indices_nodes , self.graph_indices_edges = graph_indices_nodes, graph_indices_edges
if (global_reps_for_edges is None ) and (global_reps_for_nodes is None):
self.update_reps_for_globals()
self.n_graphs = len(self.n_nodes)
def update_reps_for_globals(self):
"""
Some flat vectors for segment sums when dealing with global variables.
This is created even when there are no globals (one just needs the node
and edge counts for each graph.)
"""
global_reps_for_edges = [] # <- used to cast the global tensor to a compatible size for the edges.
for k, e in enumerate(self.n_edges):
global_reps_for_edges.extend([k]*int(e))
self._global_reps_for_edges = global_reps_for_edges
global_reps_for_nodes = [] # <- similarly for nodes:
for k, e in enumerate(self.n_nodes):
global_reps_for_nodes.extend([k]*int(e))
self._global_reps_for_nodes = global_reps_for_nodes
def assign_global(self, global_attr, check_shape = False):
self.has_global = True
if check_shape:
assert(tf.shape(global_attr)[0] == self.n_graphs)
self.global_attr = global_attr
def is_equal_by_value(self, other_graph_tuple):
v1 = self.edges,self.nodes, self.receivers,self.senders, self.n_nodes, self.n_edges, self.n_graphs
v2 = other_graph_tuple.edges,other_graph_tuple.nodes, other_graph_tuple.receivers,other_graph_tuple.senders, other_graph_tuple.n_nodes, other_graph_tuple.n_edges, other_graph_tuple.n_graphs
def _equals_or_all_equals(v1_,v2_):
if isinstance(v1_, list) and isinstance(v2_, list):
return v1_ == v2_
if isinstance(v1_, tf.Variable) and isinstance(v2_, tf.Variable):
return all(v1_ == v2_)
if isinstance(v1_, np.array) and isinstance(v2_. np.array):
return all(v1_ == v2_)
if self.has_global:
global_same = _equals_or_all_equals(other_graph_tuple.global_attr,self.global_attr)
assert(other_graph_tuple.has_global)
else:
global_same = True
return all([_equals_or_all_equals(v1__,v2__) for v1__, v2__ in zip(v1,v2)]) and global_same
def copy(self):
n = _copy_any_ds(self.nodes)
e = _copy_any_ds(self.edges)
s = _copy_any_ds(self.senders)
r = _copy_any_ds(self.receivers)
nnodes = _copy_any_ds(self.n_nodes)
nedges = _copy_any_ds(self.n_edges)
ngraphs = _copy_any_ds(self.n_graphs)
return GraphTuple(n,e,s,r,nnodes,nedges, global_attr = self.global_attr)
def __add__(self, g2):
nodes = self.nodes + g2.nodes
edges = self.edges + g2.edges
s = self.senders
r = self.receivers
n_nodes = self.n_nodes
n_edges = g2.n_edges
if self.has_global and g2.has_global:
new_global = self.global_attr + g2.global_attr
gt = GraphTuple(nodes,edges,s,r,n_nodes, n_edges, global_attr = new_global)
gt._global_reps_for_edges = self._global_reps_for_edges
gt._global_reps_for_nodes = self._global_reps_for_nodes
else:
gt = GraphTuple(nodes, edges, s,r,n_nodes, n_edges)
return gt
def get_graph(self, graph_index):
"""
Returns a new graph with the same properties as the original graph.
gradients are not traced through this operation.
"""
assert(graph_index >=0 )
if graph_index > self.n_graphs:
raise ValueError("The provided index is larger than the available graphs in this GraphTuple object.")
get_start_stop_index = lambda sizes_list, index : np.cumsum([0,*sizes_list[0:index+1]])[-2:]
start_idx_nodes , end_idx_nodes = get_start_stop_index(self.n_nodes, graph_index)
start_idx_edges , end_idx_edges = get_start_stop_index(self.n_edges, graph_index)
nodes_attrs = self.nodes[start_idx_nodes:end_idx_nodes]
senders, receivers, edge_attr = [v[start_idx_edges:end_idx_edges] for v in [self.senders, self.receivers,self.edges]]
senders = senders-start_idx_nodes
receivers = receivers - start_idx_nodes
nodes = [Node(node_attr[tf.newaxis]) for node_attr in nodes_attrs]
edges = [Edge(edge_attr_tensor[tf.newaxis], nodes[node_from_idx], nodes[node_to_idx]) for edge_attr_tensor, node_from_idx, node_to_idx in zip(edge_attr, senders,receivers)]
if self.has_global:
global_attr = self.global_attr[graph_index]
else:
global_attr = None
return Graph(nodes, edges, global_attr = global_attr)
def to_tensor_dict(self):
return _graphtuple_to_tensor_dict(self)
def _graphtuple_to_tensor_dict(gt_):
"""
Transform a GT to a dictionary.
Used for employing the traceable graph_dict evaluation function.
"""
def _tf_constant_or_none(v):
if v is None:
return None
else:
return tf.constant(v)
return {'edges' : _tf_constant_or_none(gt_.edges),
'nodes' : _tf_constant_or_none(gt_.nodes),
'senders' : _tf_constant_or_none(gt_.senders),
'receivers' :_tf_constant_or_none(gt_.receivers),
'n_edges' : _tf_constant_or_none(gt_.n_edges),
'n_nodes' : _tf_constant_or_none(gt_.n_nodes),
'n_graphs' : _tf_constant_or_none(gt_.n_graphs),
'global_attr' : _tf_constant_or_none(gt_.global_attr),
'global_reps_for_edges' : _tf_constant_or_none(gt_._global_reps_for_edges),
'global_reps_for_nodes' : _tf_constant_or_none(gt_._global_reps_for_nodes)}
| mylonasc/tf_gnns | tf_gnns/datastructures.py | datastructures.py | py | 18,665 | python | en | code | 9 | github-code | 36 |
3592662594 | from fastapi import APIRouter, Depends, HTTPException, UploadFile
from sqlalchemy.orm import Session
from typing import List
from db.database import get_db
from security.auth import oauth2_scheme, get_current_user
from . import schemas, crud
router = APIRouter()
@router.post("/events/add")
async def add_event(text: str, image: UploadFile, db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
user = get_current_user(db, token)
if not user.is_admin:
raise HTTPException(status_code=400, detail="No permision")
return crud.add_event(db, text, image)
@router.delete("/events/{event_id}/delete")
async def delete_event(event_id: int, db: Session = Depends(get_db), token: str = Depends(oauth2_scheme)):
user = get_current_user(db, token)
if not user.is_admin:
raise HTTPException(status_code=400, detail="No permision")
return crud.delete_event(db, event_id)
@router.get("/events", response_model=List[schemas.Events])
async def read_events(db: Session = Depends(get_db)):
return crud.get_events(db)
| ostrekodowanie/Synapsis | backend/api/events/routes.py | routes.py | py | 1,070 | python | en | code | 0 | github-code | 36 |
27158727859 | """
CartoonPhoto
Yotam Levit
Date: 13/11/2020
"""
import cv2
def read_image(image_name):
return cv2.imread(image_name)
def get_edged(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
grey = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 9, 9)
return edges
def cartoonization(image, edges):
color = cv2.bilateralFilter(image, 9, 250, 250)
cartoon = cv2.bitwise_and(color, color, mask=edges)
return cartoon
def show_images(image, edges, cartoon):
cv2.imshow("Image" ,image)
cv2.imshow("edges", edges)
cv2.imshow("Cartoon", cartoon)
cv2.waitKey(0)
cv2.destroyAllWindows()
def convert(image_name):
image = read_image(image_name)
edges = get_edged(image)
cartoon = cartoonization(image, edges)
show_images(image, edges, cartoon)
convert("dana4.png") | yotamlevit/CartoonPhoto | Convertor.py | Convertor.py | py | 961 | python | en | code | 0 | github-code | 36 |
9019137787 | from chessboard import *
import pygame
import sys
def redraw(screen, board, pieces, square_size, WHITE, GREY):
# Draw the chess board
for row in range(8):
for col in range(8):
if (row + col) % 2 == 0:
color = WHITE
else:
color = GREY
pygame.draw.rect(screen, color, [col * square_size, row * square_size, square_size, square_size])
if board[row][col] != 0:
# images are 55x55
piece_image = pieces[str(board[row][col])]
piece_width, piece_height = piece_image.get_size()
max_size = min(square_size - 25, piece_width, piece_height)
# If bigger than square_size, resize it to 55x55
piece_image = pygame.transform.smoothscale(piece_image, (max_size, max_size))
# Make sure the chess pieces are centered on the squares
center = (col * square_size + square_size // 2 - max_size // 2, row * square_size + square_size // 2 - max_size // 2)
screen.blit(piece_image, center)
# Update the display
pygame.display.flip()
def main():
chessboard = ChessBoard()
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREY = (128, 128, 128)
# Set up the display
pygame.init()
screen = pygame.display.set_mode((640, 640))
pygame.display.set_caption("Chess")
# Load the chess pieces
pieces = {
str(Pawn("white")): pygame.image.load("images/white_pawn.png"),
str(Rook("white")): pygame.image.load("images/white_rook.png"),
str(Knight("white")): pygame.image.load("images/white_knight.png"),
str(Bishop("white")): pygame.image.load("images/white_bishop.png"),
str(Queen("white")): pygame.image.load("images/white_queen.png"),
str(King("white")): pygame.image.load("images/white_king.png"),
str(Pawn("black")): pygame.image.load("images/black_pawn.png"),
str(Rook("black")): pygame.image.load("images/black_rook.png"),
str(Knight("black")): pygame.image.load("images/black_knight.png"),
str(Bishop("black")): pygame.image.load("images/black_bishop.png"),
str(Queen("black")): pygame.image.load("images/black_queen.png"),
str(King("black")): pygame.image.load("images/black_king.png"),
}
# Define the chess board
board = chessboard.current_position
# Define the square size
square_size = 80
# Draw the chess board
redraw(screen, board, pieces, square_size, WHITE, GREY)
# Update the display
pygame.display.flip()
# Main game loop
selected_piece = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
# if playr is in check and checkmate, game over
# Get the clicked square
x, y = pygame.mouse.get_pos()
row = y // square_size
col = x // square_size
if selected_piece == 0:
# If no piece is selected, select the piece on the clicked square
selected_piece = board[row][col]
if selected_piece != 0 and selected_piece.color != chessboard.current_player:
selected_piece = 0
elif selected_piece != 0 and selected_piece.color == chessboard.current_player:
# Highlight the possible moves for the selected piece
moves, caps = selected_piece.possible_moves(chessboard, pre_check =True)
for move in (moves+caps):
# Highlight the possible moves for the selected piece and lower the saturation of the highlighted squares
pygame.draw.rect(screen, (255, 204, 229), [move[1] * square_size, move[0] * square_size, square_size, square_size])
pygame.display.flip()
else:
# If no white piece is on the clicked square, do nothing
pass
elif selected_piece != 0 and selected_piece.color == chessboard.current_player:
# If a piece is already selected, try to move the selected piece to the clicked square
# moves, caps = selected_piece.possible_moves(row, col, selected_piece, chessboard)
if (row, col) in moves or caps:
# Move the selected piece to the clicked square
selected_piece.move(row, col, chessboard)
# Switch players
chessboard.current_player = "black" if chessboard.current_player == "white" else "white"
if chessboard.is_check():
print("Check!")
print(chessboard.is_checkmate())
if chessboard.is_check() and chessboard.is_checkmate():
print(f"GG, {chessboard.current_player} wins")
pygame.quit()
sys.exit()
selected_piece = 0
# Redraw the board
redraw(screen, board, pieces, square_size, WHITE, GREY)
else:
# If the clicked square is not a valid move for the selected piece, do nothing
selected_piece = 0
redraw(screen, board, pieces, square_size, WHITE, GREY)
else:
selected_piece = 0
redraw(screen, board, pieces, square_size, WHITE, GREY)
if __name__ == '__main__':
main() | Miesjell/chess | main.py | main.py | py | 6,010 | python | en | code | 0 | github-code | 36 |
8403047178 | from typing import Any, List, Dict
class Config:
"""
Contains parsed yaml config
"""
def __init__(self, config_yaml: Any) -> None:
# self.query: Dict[str, TableConfig] = {}
self._parse_conf(config_yaml)
def _parse_conf(self, conf_yaml: Any) -> None:
"""
Parses yaml config and init python structures
:param conf_yaml: config
:return: None
"""
for conf_name, conf_dict in conf_yaml.items():
if conf_name == 'sources':
self._parse_sources_conf(conf_dict)
def _parse_sources_conf(self, conf_yaml: dict):
"""
Parses "sources" config params
:param conf_yaml: config
:return: None
"""
for conf_name, conf_dict in conf_yaml.items():
if conf_name == 'relational_db':
self.querys = _get_key_2_conf(conf_dict, QueryConfig)
class QueryConfig:
"""
Parses table config. Example:
user_table:
db: 'datatp'
schema: 'detail'
connector_type: 'mysql_db'
query: 'select * from [schema].[name]'
"""
db: str
schema: str
connector_type: str
query: str
def __init__(self, conf: Dict):
self.schema = conf.get('schema', '')
self.name = conf.get('name', '')
self.storage_key = conf.get('storage', '')
self.storage_type = conf.get('connector_type', '')
self.query_template = conf.get('query_template', '')
self.expected_columns = conf.get('expected_columns', [])
self.allow_empty = True if conf.get('allow_empty', 'no') == 'yes' else False
class TableConfig:
"""
Parses table config. Example:
user_table:
schema: 'trading_2018'
name: 'All_Users_Table'
storage: 'trading_db'
connector_type: 'mock'
expected_columns: [ 'LOGIN', 'NAME' ]
query_template: 'select * from [schema].[name]'
"""
schema: str
name: str
storage_key: str
storage_type: str
query_template: str
expected_columns: List[str]
def __init__(self, conf: Dict):
self.schema = conf.get('schema', '')
self.name = conf.get('name', '')
self.storage_key = conf.get('storage', '')
self.storage_type = conf.get('connector_type', '')
self.query_template = conf.get('query_template', '')
self.expected_columns = conf.get('expected_columns', [])
self.allow_empty = True if conf.get('allow_empty', 'no') == 'yes' else False
def _get_key_2_conf(conf_dict: dict, class_name: Any) -> Dict[str, Any]:
"""
Parses deep yaml structures into key-class_object structure
:param conf_dict: structures config
:param class_name: structure, that describes in config
:return: key-class_object
"""
key_2_conf_obj = {}
for key, conf in conf_dict.items():
key_2_conf_obj[key] = class_name(conf)
return key_2_conf_obj | parkroyal/Data_Loader | configlayer/models.py | models.py | py | 2,975 | python | en | code | 0 | github-code | 36 |
8135943468 | from accounts.serializers import UserSerializer
from django.shortcuts import redirect
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework.generics import CreateAPIView
from rest_framework.views import APIView
from rest_framework import serializers, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework_jwt.settings import api_settings
from .utils import google_obtain_access_token, google_get_user_info, user_get_or_create, jwt_login, get_user_info
BASE_URL = settings.BASE_URL
BASE_FRONTEND_URL= settings.BASE_FRONTEND_URL
LOGIN_URL = f'{BASE_URL}/accounts/auth/login'
UserModel = get_user_model()
class GetUserApi(APIView):
"""
Determine current user. Return user name, email
and profile image
"""
permission_classes = [AllowAny]
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
return Response(get_user_info(user=request.user))
return Response(status=status.HTTP_204_NO_CONTENT)
class GoogleLoginAPI(APIView):
"""
Manage login with Google
Get token from request and obtain user information: email,
user name and profile image
"""
permission_classes = []
class InputSerializer(serializers.Serializer):
code = serializers.CharField(required=True)
def get(self, request, *args, **kwargs):
input_serializer = self.InputSerializer(data=request.GET)
input_serializer.is_valid(raise_exception=True)
validated_data = input_serializer.validated_data
code = validated_data.get('code')
if not code:
return redirect(f'{LOGIN_URL}?error')
redirect_uri = f'{LOGIN_URL}/google'
access_token = google_obtain_access_token(
code=code, redirect_uri=redirect_uri)
user_info = google_get_user_info(access_token)
profile_data = {
'first_name': user_info.get('given_name'),
'last_name': user_info.get('family_name'),
'email': user_info.get('email'),
'profile_image': user_info.get('picture'),
}
user = user_get_or_create(profile_data)
res = redirect(BASE_FRONTEND_URL)
res = jwt_login(response=res, user=user)
return res
class LogoutAPI(APIView):
"""
Log out user by removing JWT cookie header
"""
permission_classes = [IsAuthenticated]
def post(self, request, *args, **kwargs):
response = Response(status=status.HTTP_202_ACCEPTED)
params = {
'expires': 'Thu, 01 Jan 1970 00:00:00 GMT',
'domain': api_settings.JWT_AUTH_COOKIE_DOMAIN,
'path': api_settings.JWT_AUTH_COOKIE_PATH,
'secure': api_settings.JWT_AUTH_COOKIE_SECURE,
'samesite': api_settings.JWT_AUTH_COOKIE_SAMESITE,
'httponly': True
}
response.set_cookie(api_settings.JWT_AUTH_COOKIE, **params)
return response
class SignUpUserApi(CreateAPIView):
"""
Create new user with email and password and log user in
"""
serializer_class = UserSerializer
permission_classes = [AllowAny]
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
email = serializer.data['email']
user = UserModel.objects.get(email=email)
res = Response(serializer.data, status=status.HTTP_201_CREATED)
res = jwt_login(response=res, user=user)
return res
| QuocHung52/course-pool-react | backend/accounts/views.py | views.py | py | 3,658 | python | en | code | 0 | github-code | 36 |
24401520806 | from tkinter import *
import Mediator
from Model.BoardModel import BoardModel
from View.BoardView import BoardView
from Controller.GemController import GemController
from random import randint
from Handler import Handler
class BoardController:
"""
The main class that is responsible for the board.
The functions of the class work both on the graphical interface and on the logical part.
Creates classes BoardModel, BoardView and controls them from the outside.
"""
_BOARD_HEIGHT = 288
_BOARD_WIDTH = 288
_BACKGROUND_COLOR = "blue"
_GEM_TYPE_MIN_VALUE = 1
_GEM_TYPE_MAX_VALUE = 7
def __init__(self, root: Tk, width: int, height: int, mediator: Mediator):
"""
initializes the canvas for further work with it.
creates instances of Board classes.
Generates the correct random playing field
"""
self._root = root
self._width = width
self._height = height
self._create_canvas()
self._score_points = 0
self.board_model = BoardModel(width, height)
self._board_view = BoardView(root, width, height, self._canvas, mediator)
self._control = Handler(self)
self._set_random_board()
def draw(self):
"""shows graphic elements"""
self._board_view.draw_board()
def hide(self):
"""hides graphic elements"""
self._board_view.hide_board()
def swap_gems(self, gem1, gem2):
""""Replaces gems, removes steaks and updates statistics"""
self.board_model.swap_gems(gem2, gem1)
self._update_points()
def _update_points(self):
self._score_points = self.board_model.get_points() # receives data processing
self._board_view.update_score(self._score_points)
def _create_canvas(self):
self._canvas = Canvas(self._root, background=self._BACKGROUND_COLOR, width=self._BOARD_WIDTH,
height=self._BOARD_HEIGHT, relief=SOLID, borderwidth=2)
def _set_gem(self, row, column, gem_controller):
self.board_model.set_gem(row, column, gem_controller)
self._board_view.set_gem(row, column, gem_controller)
def _set_random_board(self):
for i in range(self._width):
for j in range(self._height):
rand_type = self._get_random_gem_type()
gem_controller = GemController(self._root, rand_type, i, j, self._control, self._canvas)
self._set_gem(i, j, gem_controller) # uses a random stone to initialize the field
self.board_model.fix_board() # removes groups of gems more than 3
def _get_random_gem_type(self):
return randint(self._GEM_TYPE_MIN_VALUE, self._GEM_TYPE_MAX_VALUE)
def destroy(self):
"""cleans and destroys the game board"""
self._canvas.destroy()
| vladbochok/university-tasks | c1s2/labwork-4/Controller/BoardController.py | BoardController.py | py | 2,928 | python | en | code | 3 | github-code | 36 |
20270208049 | """
Module containing the constants needed for the numerical solution of the laplace equation.
"""
# DIMENSIONS OF THE PLATES [CM]
l_y = 5 # Side 1
l_z = 10 # Side 2
d = 1 # Separation in between plates
# DIMENSIONS OF THE BOX [CM]
L_X = 10
L_Y = 15
L_Z = 30
# POTENTIALS [V]
V_1 = 10
V_2 = -5
# MESHING PARAMETERS
ds = 0.1
x_min = -5
x_max = 5
M_X = 101
y_min = -7.5
y_max = 7.5
M_Y = 151
z_min = -15
z_max = 15
M_Z = 301
r_tol = 10e-2
| nmonrio/laplace-eq-sim | params.py | params.py | py | 500 | python | en | code | 0 | github-code | 36 |
28704089727 | #!/usr/bin/env python3
import os
import sys
from pathlib import Path
import logging
from pdf_tool import PDF_Tool
from form import *
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2.QtCore import Qt, QObject, QEvent
from PySide2.QtGui import QIcon, QMouseEvent
os.environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class TestListView(QListWidget):
fileDropped = Signal(list)
def __init__(self, parent=None):
super(TestListView, self).__init__(parent)
self.setAcceptDrops(True)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setIconSize(QSize(72, 72))
self.file_paths = []
self.files = []
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(Qt.CopyAction)
event.accept()
self.files = [u.toLocalFile() for u in event.mimeData().urls() if u.toLocalFile()[-4:] == '.pdf']
difference = list(set(self.files) - set(self.file_paths))
if difference:
self.fileDropped.emit(difference)
self.file_paths.extend(difference)
else:
event.ignore()
class MainWindow(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self)
self.old_position = None
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.header.installEventFilter(self)
self.ui.view.installEventFilter(self)
self.setWindowIcon(QIcon('icons/pdf.ico'))
# frameless window
flags = Qt.WindowFlags(Qt.FramelessWindowHint | Qt.WindowMaximizeButtonHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlags(flags)
# button click events
self.ui.maximize_button.clicked.connect(self.window_full_screen)
self.ui.exit_button.clicked.connect(self.close)
self.ui.minimize_button.clicked.connect(self.showMinimized)
self.ui.search_button.clicked.connect(self.get_files)
self.ui.word_button.clicked.connect(self.extract_to_docx)
self.ui.image_button.clicked.connect(self.extract_images)
self.ui.text_botton.clicked.connect(self.extract_text)
self.ui.view.fileDropped.connect(self.picture_dropped)
self.ui.split_button.clicked.connect(self.split_files)
self.ui.merge_button.clicked.connect(self.merge_files)
# event filter
def eventFilter(self, object: QObject, event: QMouseEvent) -> bool:
if object.objectName() == 'header':
if event.type() == QEvent.MouseButtonDblClick:
self.window_full_screen()
return True
if event.type() == QEvent.MouseButtonPress:
self.old_position = event.globalPos()
return True
if event.type() == QEvent.MouseMove:
delta = QPoint(event.globalPos() - self.old_position)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.old_position = event.globalPos()
return True
if event.type() == QEvent.KeyPress:
key = event.key()
if key == Qt.Key_Backspace or key == Qt.Key_Delete:
self.delete_from_list()
return True
return QMainWindow.eventFilter(self, object, event)
def window_full_screen(self):
self.setWindowState(self.windowState() ^ Qt.WindowFullScreen)
def get_files(self):
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.ExistingFiles)
dlg.setNameFilters(["Pdf files (*.pdf)"])
if dlg.exec_():
self.ui.view.files = dlg.selectedFiles()
difference = list(set(self.ui.view.files) - set(self.ui.view.file_paths))
if difference:
self.ui.view.fileDropped.emit(difference)
self.ui.view.file_paths.extend(difference)
def extract_to_docx(self):
error = False
if self.ui.view.file_paths:
for index, file in enumerate(self.ui.view.file_paths):
path = Path(file)
output_path = '{}/{}-output/'.format(path.parent, path.stem)
if not os.path.exists(output_path):
os.makedirs(output_path)
docx_file = '{}{}.docx'.format(output_path, path.stem)
try:
PDF_Tool.convert_to_docx(file, docx_file)
except Exception as e:
logger.error(e)
error = True
QMessageBox.critical(self, 'Fehler!', 'Es ist ein Fehler aufgetreten')
if not error:
error = False
QMessageBox.information(self, 'Info', "Alles erfolgreich erstellt")
else:
QMessageBox.warning(
self,
"Fehler!",
"Es ist kein Pfad ausgewählt",
defaultButton=QMessageBox.Ok,
)
def extract_images(self):
error = False
if self.ui.view.file_paths:
for index, file in enumerate(self.ui.view.file_paths):
path = Path(file)
output_path = '{}/{}-output/images'.format(path.parent, path.stem)
if not os.path.exists(output_path):
os.makedirs(output_path)
try:
PDF_Tool.extract_images(file, output_path)
except Exception as e:
logger.error(e)
error = True
QMessageBox.critical(self, 'Fehler!', 'Es ist ein Fehler aufgetreten')
if not error:
error = False
QMessageBox.information(self, 'Info', "Alles erfolgreich erstellt")
else:
QMessageBox.warning(
self,
"Fehler!",
"Es ist kein Pfad ausgewählt",
defaultButton=QMessageBox.Ok,
)
def extract_text(self):
error = False
if self.ui.view.file_paths:
for index, file in enumerate(self.ui.view.file_paths):
path = Path(file)
output_path = '{}/{}-output/'.format(path.parent, path.stem)
text_file = '{}{}.txt'.format(output_path, path.stem)
if os.path.exists(text_file):
os.remove(text_file)
try:
PDF_Tool.convert_to_txt(file, text_file)
except Exception as e:
error = True
logger.error(e)
QMessageBox.critical(self, 'Fehler!', 'Es ist ein Fehler aufgetreten')
if not error:
error = False
QMessageBox.information(self, 'Info', "Alles erfolgreich erstellt")
else:
QMessageBox.warning(
self,
"Fehler!",
"Es ist kein Pfad ausgewählt",
defaultButton=QMessageBox.Ok,
)
def split_files(self):
error = False
if self.ui.view.file_paths:
for index, file in enumerate(self.ui.view.file_paths):
output_path = Path(file)
output_path = '{}/{}-output/einzelne-seiten'.format(output_path.parent, output_path.stem)
if not os.path.exists(output_path):
os.makedirs(output_path)
try:
PDF_Tool.split_files(file, output_path)
except Exception as e:
error = True
logger.error(e)
QMessageBox.critical(self, 'Fehler!', 'Es ist ein Fehler aufgetreten')
if not error:
error = False
QMessageBox.information(self, 'Info', "Alles erfolgreich erstellt")
else:
QMessageBox.warning(
self,
"Fehler!",
"Es ist kein Pfad ausgewählt",
defaultButton=QMessageBox.Ok,
)
def merge_files(self):
if self.ui.view.file_paths:
path = Path(self.ui.view.file_paths[0])
text, ok = QInputDialog.getText(self, 'Pdf-Files vereinen', 'Name eingeben')
if ok:
try:
output_path = '{}/{}.pdf'.format(str(path.parent), text)
PDF_Tool.merge_files(self.ui.view.file_paths, output_path)
QMessageBox.information(self, 'Info', "Alles erfolgreich erstellt")
except Exception as e:
logger.error(e)
QMessageBox.critical(self, 'Fehler!', 'Es ist ein Fehler aufgetreten')
else:
QMessageBox.warning(
self,
"Fehler!",
"Es ist kein Pfad ausgewählt",
defaultButton=QMessageBox.Ok,
)
def delete_from_list(self):
items = self.ui.view.selectedItems()
if items:
for index, item in reversed(list(enumerate(items))):
item_text = str(self.ui.view.selectedItems()[index].text())
list_index = self.ui.view.file_paths.index(item_text)
self.ui.view.takeItem(list_index)
self.ui.view.file_paths.remove(item_text)
print(self.ui.view.file_paths)
def picture_dropped(self, files):
for url in files:
if os.path.exists(url):
icon = QIcon(url)
pixmap = icon.pixmap(72, 72)
icon = QIcon(pixmap)
item = QListWidgetItem(url, self.ui.view)
item.setIcon(icon)
item.setStatusTip(url)
if __name__ == "__main__":
app = QApplication([])
window = MainWindow()
window.show()
sys.exit(app.exec_())
| GschoesserPhilipp/Pdf-Tool-GUI | mainwindow.py | mainwindow.py | py | 10,229 | python | en | code | 0 | github-code | 36 |
33236543359 | from il_utils import *
def make_cfg(insFile,varsFile,printIns=False):
cfg={}
#load all instructions
instructions,varmap=load_il(insFile,varsFile)
gen_ins=('Plus','Minus','Times','Greater','And','Or','GreaterEq','Equal','Not','Move')
check_gen=lambda i,f: True if (f(i)!='null' and op(i) in gen_ins and f(i) in list(varmap)) else False
check_kill=lambda i: True if (output(i)!='null' and output(i) in list(varmap)) else False
gen=lambda b: set([varmap[i0(i)] for i in instructions_in_block(b,instructions) if check_gen(i,i0)]+
[varmap[i1(i)] for i in instructions_in_block(b,instructions) if check_gen(i,i1)])
kill=lambda b: set([varmap[output(i)] for i in instructions_in_block(b,instructions) if check_kill(i)])
def connect(i,x,edge_annotation=''):
assert i!=x,'self-connection detected'
if block(i) not in cfg: cfg[block(i)]={'succ':[],'succ_labels':[]}
# if x not in [z[0] for z in cfg[block(i)]]:
cfg[block(i)]['succ'].append(x)
cfg[block(i)]['succ_labels'].append(edge_annotation)
if printIns: print_ins(instructions)
#main loop
for ni,i in enumerate(instructions[:-1]):
next_i=instructions[ni+1]
if ni>0: prev_i=instructions[ni-1]
if op(i) == 'Jump': connect(i,jumpto_uncond(i,instructions),edge_annotation=' j ')
if op(i) in ('JumpIfNonzero','JumpIfZero'):
connect(i,jumpto_cond(i,instructions),edge_annotation=' if ')
connect(i,block(next_i),edge_annotation=' else ')
if (block(i) != block(next_i)) and (op(i)[:4]!='Jump'): connect(i,block(next_i))
for i in set([block(j) for j in instructions]):
if i not in cfg:cfg[i]={'succ':[],'succ_labels':[]}
cfg[i]['gen']=gen(i)
cfg[i]['kill']=kill(i)
return cfg
| jorgeypcb/ImpCodeGenerator | riscv/cfg.py | cfg.py | py | 1,858 | python | en | code | 0 | github-code | 36 |
490651592 | # import tcod
from random import randint
from game_messages import Message
class BasicMonster:
def take_turn(self, target, game_map, entities):
results = []
monster = self.owner
if monster.distance_to(target) >= 2:
# monster.move_astar(target, entities, game_map)
monster.move_towards(target.x, target.y, game_map, entities)
elif target.fighter.hp > 0:
attack_results = monster.fighter.attack(target)
results.extend(attack_results)
return results
class ConfusedMonster:
def __init__(self, previous_ai, number_of_turns=10):
self.previous_ai = previous_ai
self.number_of_turns = number_of_turns
def take_turn(self, target, game_map, entities):
results = []
# if self.owner.name == 'wall':
# game_map.tiles[self.owner.x][self.owner.y].blocked = False
# game_map.tiles[self.owner.x][self.owner.y].block_sight = False
if self.number_of_turns > 0:
random_x = self.owner.x + randint(0, 2) - 1
random_y = self.owner.y + randint(0, 2) - 1
if random_x != self.owner.x and random_y != self.owner.y:
self.owner.move_towards(random_x, random_y, game_map, entities)
# self.number_of_turns -= 1
else:
self.owner.ai = self.previous_ai
results.append({'message': Message(
'The {0} is no longer confused'.format(self.owner.name),
'red')})
return results
| Denrur/map_as_dict | ai.py | ai.py | py | 1,543 | python | en | code | 0 | github-code | 36 |
41310446545 | from nltk import CFG
from nltk import ChartParser # parse_cfg, ChartParser
from random import choice
import re
from enum import Enum, auto
from argparse import ArgumentParser
from os import listdir
from os.path import isfile, join
import os
this_dir = os.path.dirname(os.path.abspath(__file__))
name_segment_folder = join(this_dir, "../../name-segments/")
class EnumAutoName(Enum):
# An enum where auto() will default to the enum name
def _generate_next_value_(name, start, count, last_values):
return name
def __str__(self):
return self.value
def StringToEnum(s):
if s in [v.name for v in Name.NameBank]:
return Name.NameBank[s]
else:
raise ValueError
def get_files_from_path(path, extension_filter=".txt"):
files = []
for f in listdir(path):
full_f = join(path, f)
if extension_filter in full_f:
if isfile(full_f):
files.append(f)
return files
def get_available_namebanks_and_syllables():
namebanks = get_available_namebanks()
syllables = get_available_syllables()
# return namebanks | syllables # Nicer 3.9 Python syntax
return {**namebanks, **syllables}
def get_available_syllables(where="syllables"):
global name_segment_folder
path = join(name_segment_folder,where)
onlyfiles = get_files_from_path(path)
available = {}
for f in onlyfiles:
f = f.replace(".txt","")
f = f.replace("-female","")
f = f.replace("-male","")
for x in range(10):
f = f.replace(f"-{x}","")
f = f.capitalize()
available[f] = auto()
return available
def get_available_namebanks(where="forenames"):
global name_segment_folder
path = join(name_segment_folder,where)
onlyfiles = get_files_from_path(path)
available = {}
for f in onlyfiles:
f = f.replace(".txt","")
f = f.replace("-female","")
f = f.replace("-male","")
f = f.capitalize()
available[f] = auto()
return available
def get_available_origins(where="nouns"):
global name_segment_folder
path = join(name_segment_folder,where)
onlyfiles = get_files_from_path(path)
available = {}
for f in onlyfiles:
f = f.replace(".txt","")
f = f.replace("-female","")
f = f.replace("-male","")
f = f.capitalize()
available[f] = auto()
return available
class Name:
class NameOrder(EnumAutoName):
Eastern = auto()
Forename_Only = auto()
Surname_Only = auto()
Western = "Western"
namebank_values = get_available_namebanks_and_syllables()
NameBank = EnumAutoName('NameBank', namebank_values)
origin_values = get_available_origins()
Origin = EnumAutoName('Origin', origin_values)
class NameType(EnumAutoName):
Forename = auto()
Surname = auto()
class Origin(EnumAutoName):
Aquatic = auto()
Desert = auto()
Mountain = auto()
Tundra = auto()
Urban = auto()
Forest = auto()
Air = auto()
def __init__(self):
self.gender_male = False
self.gender_female = False
self.gender_neutral = False
self.has_position = False
self.order = Name.NameOrder.Western
class FileFetcher():
def __init__(self):
pass
def get_gender_endings(self, config, always_neutral=False):
e = []
if config.gender_male:
e.append("male")
if config.gender_female:
e.append("female")
if config.gender_neutral or always_neutral:
e.append("")
if len(e) == 0:
print("No Gender Selection. Defaulting to gender neutral")
config.gender_neutral = True
e.append("")
return e
def get_position_files(self, config):
ges = self.get_gender_endings(config)
pt = []
for g in ges:
g = f"-{g}" if g != "" else g
pt.append(f'prefixes/positions{g}.txt')
return pt
def SyllableLength(self, namebank):
global name_segment_folder
path = join(name_segment_folder,"syllables")
onlyfiles = get_files_from_path(path)
unique_syllables = {}
for f in onlyfiles:
if namebank in f:
f = f.replace("-female","")
f = f.replace("-male","")
unique_syllables[f] = True
return len(unique_syllables.items())
class Grammar:
def __init__(self, config):
self.config = config
self.obj = {}
self.root = "S"
self.ff = FileFetcher()
def initialize(self):
self.obj[self.root]= ["PRE", "CORE", "POST"]
self.basic_tokens()
def basic_tokens(self):
self.obj["SPC"] = ["' '"]
self.obj["OF"] = ["'o'", "'f'"]
def define_position(self, config, optional=False):
# Prefix
positions = self.ff.get_position_files(config)
positions = [f"['{p}']" for p in positions]
self.obj["PRE"] = ["TITLE", "SPC"]
if optional:
self.obj["PRE"].append(None)
self.obj["TITLE"] = positions
# Postfix
origin = config.origin.name.lower()
self.obj["POST"] = ["SPC", "OF", "SPC", "WHERE"]
if optional:
self.obj["POST"].append(None)
# TODO: Allow multiple origins
self.obj["WHERE"] = [f"['postfixes/{origin}.txt']",]
def setNameOrder(self, order):
if order == Name.NameOrder.Western:
self.obj["CORE"] = ["FORENAME", "SPC", "SURNAME"]
elif order == Name.NameOrder.Eastern:
self.obj["CORE"] = ["SURNAME", "SPC", "FORENAME"]
elif order == Name.NameOrder.Forename_Only:
self.obj["CORE"] = ["FORENAME"]
elif order == Name.NameOrder.Surname_Only:
self.obj["CORE"] = ["FORENAME"]
else:
print("Unimplemented Name Order: ", order, ". Defaulting to Western")
self.setNameOrder(Name.NameOrder.Western)
def getNamesFromSyllables(self, config, name_type):
ges = self.ff.get_gender_endings(config)
namebank = config.namebank.name.lower()
name_type = name_type.name.upper()
global name_segment_folder
# TODO: Check compatibile with namebanks
syls = self.ff.SyllableLength(namebank)
self.obj[name_type] = []
for x in range(syls):
self.obj[name_type].append(f"SYLLABLE{x}")
for x in range(syls):
pt = []
for g in ges:
g = f"-{g}" if g != "" else g
f = f'syllables/{namebank}{g}-{x}.txt'
if os.path.exists(join(name_segment_folder, f)):
pt.append(f)
else:
print(f"Warn/Err: No syllable file found: {f}. May produce bad name.")
self.obj[f"SYLLABLE{x}"] = [pt]
def getNamesFromBank(self, config, name_type):
ges = self.ff.get_gender_endings(config)
namebank = config.namebank.name.lower()
name_type = name_type.name.upper()
pt = []
for g in ges:
g = f"-{g}" if g != "" else g
# TODO: s shouldnt be there.
pt.append(f'{name_type.lower()}s/{namebank}{g}.txt')
self.obj[name_type] = [pt]
def constructName(self, config, name_type):
origin = config.origin.name.lower()
name_type = name_type.name.upper()
self.obj[name_type] = ["ADJ", "NOUN"]
self.buildAdjBank(config)
self.buildNounBank(config)
def buildAdjBank(self, config):
origin = config.origin.name.lower()
pt = []
# TODO: Dodginess/Alignment. John Bloodsword seems more evil than John Goldheart
pt.append(f"['adjectives/{origin}.txt']")
self.obj["ADJ"] = pt
def buildNounBank(self, config):
origin = config.origin.name.lower()
pt = []
# TODO: Dodginess/Alignment. John Poisonblood seems more evil than John Goldheart
pt.append(f"['nouns/{origin}.txt']")
self.obj["NOUN"] = pt
def write(self, dir="", filename="custom.grammar"):
# TODO: order carefully
s = ""
for key, value in self.obj.items():
s += f"{key} -> "
for i, v in enumerate(value):
sep = " "
if v is None:
v = " | "
s += f"{v}{sep}"
s += "\n"
self.string_repr = s
filename = os.path.join(dir, filename)
print("---------------------")
print(filename)
print(filename)
print(filename)
print(filename)
print("---------------------")
f = open(filename, "w")
f.write(s)
f.close()
filename = os.path.abspath(filename)
return filename
def __str__(self):
if hasattr(self, "string_repr"):
return self.string_repr
else:
return "Not Finalized"
def define_grammar(config, where=""):
grammar = Grammar(config)
grammar.initialize()
if config.has_position:
grammar.define_position(config)
grammar.setNameOrder(config.order)
# Prefer Forenames to be syllable generated
if grammar.ff.SyllableLength(config.namebank.name.lower()) > 0:
grammar.getNamesFromSyllables(config, Name.NameType.Forename)
else:
grammar.getNamesFromBank(config, Name.NameType.Forename)
# TODO: Use namebank for Surnames
grammar.constructName(config, Name.NameType.Surname)
return grammar.write(where)
def resolve_grammar(G):
def file_contents(s):
global name_segment_folder
filename = join(name_segment_folder,str(s.group(1)))
try:
terms = open(filename).readlines()
s = ""
for i, t in enumerate(terms):
t = t.replace("\n","")
# Allow Commenting
if "#" not in t:
seperator = "|" if i > 0 else ""
s += f"{seperator} '{t}' "
except FileNotFoundError:
print("Warn/Err: File doesn't exist:", filename, ". May produce bad names.")
s = ""
return s
G = re.sub(r"\[\'([a-zA-Z\-\.\/0-9]*)\'\]", file_contents, G)
return G
def generate_name(G, ):
grammar = CFG.fromstring(G)
parser = ChartParser(grammar)
gr = parser.grammar()
tokens = produce(gr, gr.start())
name = ''.join(tokens)
return name.title()
def produce(grammar, symbol):
words = []
productions = grammar.productions(lhs = symbol)
production = choice(productions)
for sym in production.rhs():
if isinstance(sym, str):
words.append(sym)
else:
words.extend(produce(grammar, sym))
return words
def generate(args, where=""):
config = Name()
config.has_position = True
config.origin = args.origin
config.namebank = args.namebank
config.order = args.order
config.gender_male = args.gender_male
config.gender_female = args.gender_female
config.gender_neutral = args.gender_neutral
grammar_file = define_grammar(config, where)
G = resolve_grammar(open(grammar_file).read())
name = generate_name(G)
if args.verbose:
print("Your Character:", name)
return name
def parse_args():
ap = ArgumentParser(description="Generate a character name")
# Gender
ap.add_argument('--gender-male', action="store_true")
ap.add_argument('--gender-female', action="store_true")
ap.add_argument('--gender-neutral', action="store_true")
# Origins
ap.add_argument('--order', type=Name.NameOrder, choices=list(Name.NameOrder), nargs="?", default=Name.NameOrder.Western)
ap.add_argument('--origin', type=Name.Origin, choices=list(Name.Origin), nargs="?", default=Name.Origin.Mountain)
ap.add_argument('--namebank', type=Name.NameBank, choices=Name.NameBank, nargs="?", default=Name.NameBank.Dwarf)
args = ap.parse_args()
args.verbose = True
return args
if __name__ == "__main__":
a = parse_args()
generate(a)
| Mimic-Tools/name-generation | src/name_generation/generate.py | generate.py | py | 12,855 | python | en | code | 12 | github-code | 36 |
24389835444 | import re as _re
from argparse import *
from .. import path as _path
_ArgumentParser = ArgumentParser
_Action = Action
# Add some simple wrappers to make it easier to specify shell-completion
# behaviors.
def _add_complete(argument, complete):
if complete is not None:
argument.complete = complete
elif isinstance(argument.type, File):
argument.complete = 'file'
elif isinstance(argument.type, Directory):
argument.complete = 'directory'
return argument
class Action(_Action):
def __init__(self, *args, complete=None, **kwargs):
super().__init__(*args, **kwargs)
_add_complete(self, complete)
class ToggleAction(Action):
def __init__(self, option_strings, dest, default=False, required=False,
complete=None, help=None):
if len(option_strings) == 0:
raise ValueError('option string must begin with "--"')
self.true_strings = [self._prefix(i, self._true_prefix)
for i in option_strings]
self.false_strings = [self._prefix(i, self._false_prefix)
for i in option_strings]
option_strings = self.true_strings + self.false_strings
super().__init__(option_strings, dest=dest, nargs=0, default=default,
required=required, complete=complete, help=help)
def __call__(self, parser, namespace, values, option_string=None):
value = option_string in self.true_strings
setattr(namespace, self.dest, value)
@staticmethod
def _prefix(s, prefix):
if not s.startswith('--'):
raise ValueError('option string must begin with "--"')
return _re.sub('(^--(x-)?)', r'\1' + prefix, s)
class EnableAction(ToggleAction):
_true_prefix = 'enable-'
_false_prefix = 'disable-'
class WithAction(ToggleAction):
_true_prefix = 'with-'
_false_prefix = 'without-'
class BaseFile:
def __init__(self, must_exist=False):
self.must_exist = must_exist
def __call__(self, string):
p = self._abspath(string)
if _path.exists(p):
if not self._check_type(p):
raise ArgumentTypeError("'{}' is not a {}"
.format(string, self._kind))
elif self.must_exist:
raise ArgumentTypeError("'{}' does not exist".format(string))
return p
class Directory(BaseFile):
_kind = 'directory'
@staticmethod
def _abspath(p):
return _path.abspath(p, directory=True, absdrive=False)
@staticmethod
def _check_type(p):
return _path.isdir(p)
class File(BaseFile):
_kind = 'file'
@staticmethod
def _abspath(p):
return _path.abspath(p, directory=False, absdrive=False)
@staticmethod
def _check_type(p):
return _path.isfile(p)
class ArgumentParser(_ArgumentParser):
@staticmethod
def _wrap_complete(action):
def wrapper(*args, complete=None, **kwargs):
return _add_complete(action(*args, **kwargs), complete)
return wrapper
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for k, v in self._registries['action'].items():
self._registries['action'][k] = self._wrap_complete(v)
self.register('action', 'enable', EnableAction)
self.register('action', 'with', WithAction)
def _get_option_tuples(self, option_string):
# Don't try to check prefixes for long options; this is similar to
# Python 3.5's `allow_abbrev=False`, except this doesn't break combined
# short options. See <https://bugs.python.org/issue26967>.
if option_string[:2] == self.prefix_chars * 2:
return []
return super()._get_option_tuples(option_string)
# It'd be nice to just have a UserArgumentParser class with this method but it
# wouldn't propagate to argument groups, so it's easier to just do it this way.
def add_user_argument(parser, *names, **kwargs):
if any(not i.startswith('--') for i in names):
raise ValueError('option string must begin with "--"')
if any(i.startswith('--x-') for i in names):
raise ValueError('"x-" prefix is reserved')
if parser.usage == 'parse':
names += tuple('--x-' + i[2:] for i in names)
return parser.add_argument(*names, **kwargs)
| jimporter/bfg9000 | bfg9000/arguments/parser.py | parser.py | py | 4,385 | python | en | code | 73 | github-code | 36 |
34181491873 | import json
import logging
import traceback
import warnings
from datetime import datetime
from collections import OrderedDict
from typing import Dict, Callable, Optional, Union, List, Any, Type, Sequence
from qiskit.providers.backend import BackendV1 as Backend
from qiskit.providers.provider import ProviderV1 as Provider
from qiskit.providers.exceptions import QiskitBackendNotFoundError
from qiskit.providers.providerutils import filter_backends
from qiskit.providers.models import (
PulseBackendConfiguration,
QasmBackendConfiguration,
)
from qiskit_ibm_provider.proxies import ProxyConfiguration
from qiskit_ibm_provider.utils.hgp import to_instance_format, from_instance_format
from qiskit_ibm_provider.utils.backend_decoder import configuration_from_server_data
from qiskit_ibm_runtime import ibm_backend
from .utils.utils import validate_job_tags
from .accounts import AccountManager, Account, ChannelType
from .api.clients import AuthClient, VersionClient
from .api.clients.runtime import RuntimeClient
from .api.exceptions import RequestsApiError
from .constants import QISKIT_IBM_RUNTIME_API_URL
from .exceptions import IBMNotAuthorizedError, IBMInputValueError, IBMAccountError
from .exceptions import (
IBMRuntimeError,
RuntimeProgramNotFound,
RuntimeJobNotFound,
)
from .hub_group_project import HubGroupProject # pylint: disable=cyclic-import
from .utils.result_decoder import ResultDecoder
from .runtime_job import RuntimeJob
from .utils import RuntimeDecoder, to_python_identifier
from .api.client_parameters import ClientParameters
from .runtime_options import RuntimeOptions
from .ibm_backend import IBMBackend
logger = logging.getLogger(__name__)
SERVICE_NAME = "runtime"
class QiskitRuntimeService(Provider):
"""Class for interacting with the Qiskit Runtime service.
Qiskit Runtime is a new architecture offered by IBM Quantum that
streamlines computations requiring many iterations. These experiments will
execute significantly faster within its improved hybrid quantum/classical
process.
A sample workflow of using the runtime service::
from qiskit_ibm_runtime import QiskitRuntimeService, Session, Sampler, Estimator, Options
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit.circuit.library import RealAmplitudes
from qiskit.quantum_info import SparsePauliOp
# Initialize account.
service = QiskitRuntimeService()
# Set options, which can be overwritten at job level.
options = Options(optimization_level=1)
# Prepare inputs.
bell = ReferenceCircuits.bell()
psi = RealAmplitudes(num_qubits=2, reps=2)
H1 = SparsePauliOp.from_list([("II", 1), ("IZ", 2), ("XI", 3)])
theta = [0, 1, 1, 2, 3, 5]
with Session(service=service, backend="ibmq_qasm_simulator") as session:
# Submit a request to the Sampler primitive within the session.
sampler = Sampler(session=session, options=options)
job = sampler.run(circuits=bell)
print(f"Sampler results: {job.result()}")
# Submit a request to the Estimator primitive within the session.
estimator = Estimator(session=session, options=options)
job = estimator.run(
circuits=[psi], observables=[H1], parameter_values=[theta]
)
print(f"Estimator results: {job.result()}")
The example above uses the dedicated :class:`~qiskit_ibm_runtime.Sampler`
and :class:`~qiskit_ibm_runtime.Estimator` classes. You can also
use the :meth:`run` method directly to invoke a Qiskit Runtime program.
If the program has any interim results, you can use the ``callback``
parameter of the :meth:`run` method to stream the interim results.
Alternatively, you can use the :meth:`RuntimeJob.stream_results` method to stream
the results at a later time, but before the job finishes.
The :meth:`run` method returns a
:class:`RuntimeJob` object. You can use its
methods to perform tasks like checking job status, getting job result, and
canceling job.
"""
global_service = None
def __init__(
self,
channel: Optional[ChannelType] = None,
token: Optional[str] = None,
url: Optional[str] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
instance: Optional[str] = None,
proxies: Optional[dict] = None,
verify: Optional[bool] = None,
channel_strategy: Optional[str] = None,
) -> None:
"""QiskitRuntimeService constructor
An account is selected in the following order:
- Account with the input `name`, if specified.
- Default account for the `channel` type, if `channel` is specified but `token` is not.
- Account defined by the input `channel` and `token`, if specified.
- Account defined by the `default_channel` if defined in filename
- Account defined by the environment variables, if defined.
- Default account for the ``ibm_cloud`` account, if one is available.
- Default account for the ``ibm_quantum`` account, if one is available.
`instance`, `proxies`, and `verify` can be used to overwrite corresponding
values in the loaded account.
Args:
channel: Channel type. ``ibm_cloud`` or ``ibm_quantum``.
token: IBM Cloud API key or IBM Quantum API token.
url: The API URL.
Defaults to https://cloud.ibm.com (ibm_cloud) or
https://auth.quantum-computing.ibm.com/api (ibm_quantum).
filename: Full path of the file where the account is created.
Default: _DEFAULT_ACCOUNT_CONFIG_JSON_FILE
name: Name of the account to load.
instance: The service instance to use.
For ``ibm_cloud`` runtime, this is the Cloud Resource Name (CRN) or the service name.
For ``ibm_quantum`` runtime, this is the hub/group/project in that format.
proxies: Proxy configuration. Supported optional keys are
``urls`` (a dictionary mapping protocol or protocol and host to the URL of the proxy,
documented at https://docs.python-requests.org/en/latest/api/#requests.Session.proxies),
``username_ntlm``, ``password_ntlm`` (username and password to enable NTLM user
authentication)
verify: Whether to verify the server's TLS certificate.
channel_strategy: Error mitigation strategy.
Returns:
An instance of QiskitRuntimeService.
Raises:
IBMInputValueError: If an input is invalid.
"""
super().__init__()
self._account = self._discover_account(
token=token,
url=url,
instance=instance,
channel=channel,
filename=filename,
name=name,
proxies=ProxyConfiguration(**proxies) if proxies else None,
verify=verify,
channel_strategy=channel_strategy,
)
self._client_params = ClientParameters(
channel=self._account.channel,
token=self._account.token,
url=self._account.url,
instance=self._account.instance,
proxies=self._account.proxies,
verify=self._account.verify,
)
self._channel_strategy = channel_strategy or self._account.channel_strategy
self._channel = self._account.channel
self._backends: Dict[str, "ibm_backend.IBMBackend"] = {}
self._backend_configs: Dict[str, Any] = {}
if self._channel == "ibm_cloud":
self._api_client = RuntimeClient(self._client_params)
# TODO: We can make the backend discovery lazy
self._backends = self._discover_cloud_backends()
QiskitRuntimeService.global_service = self
self._validate_channel_strategy()
return
else:
auth_client = self._authenticate_ibm_quantum_account(self._client_params)
# Update client parameters to use authenticated values.
self._client_params.url = auth_client.current_service_urls()["services"]["runtime"]
if self._client_params.url == "https://api.de.quantum-computing.ibm.com/runtime":
warnings.warn(
"Features in versions of qiskit-ibm-runtime greater than and including "
"0.13.0 may not be supported in this environment"
)
self._client_params.token = auth_client.current_access_token()
self._api_client = RuntimeClient(self._client_params)
self._hgps = self._initialize_hgps(auth_client)
for hgp in self._hgps.values():
for backend_name in hgp.backends:
if backend_name not in self._backends:
self._backends[backend_name] = None
self._current_instance = self._account.instance
if not self._current_instance:
self._current_instance = self._get_hgp().name
logger.info("Default instance: %s", self._current_instance)
QiskitRuntimeService.global_service = self
# TODO - it'd be nice to allow some kind of autocomplete, but `service.ibmq_foo`
# just seems wrong since backends are not runtime service instances.
# self._discover_backends()
def _discover_account(
self,
token: Optional[str] = None,
url: Optional[str] = None,
instance: Optional[str] = None,
channel: Optional[ChannelType] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
proxies: Optional[ProxyConfiguration] = None,
verify: Optional[bool] = None,
channel_strategy: Optional[str] = None,
) -> Account:
"""Discover account."""
account = None
verify_ = verify or True
if channel_strategy:
if channel_strategy not in ["q-ctrl", "default"]:
raise ValueError(f"{channel_strategy} is not a valid channel strategy.")
if channel and channel != "ibm_cloud":
raise ValueError(
f"The channel strategy {channel_strategy} is "
"only supported on the ibm_cloud channel."
)
if name:
if filename:
if any([channel, token, url]):
logger.warning(
"Loading account from file %s with name %s. Any input "
"'channel', 'token' or 'url' are ignored.",
filename,
name,
)
else:
if any([channel, token, url]):
logger.warning(
"Loading account with name %s. Any input "
"'channel', 'token' or 'url' are ignored.",
name,
)
account = AccountManager.get(filename=filename, name=name)
elif channel:
if channel and channel not in ["ibm_cloud", "ibm_quantum"]:
raise ValueError("'channel' can only be 'ibm_cloud' or 'ibm_quantum'")
if token:
account = Account.create_account(
channel=channel,
token=token,
url=url,
instance=instance,
proxies=proxies,
verify=verify_,
channel_strategy=channel_strategy,
)
else:
if url:
logger.warning("Loading default %s account. Input 'url' is ignored.", channel)
account = AccountManager.get(filename=filename, name=name, channel=channel)
elif any([token, url]):
# Let's not infer based on these attributes as they may change in the future.
raise ValueError(
"'channel' is required if 'token', or 'url' is specified but 'name' is not."
)
# channel is not defined yet, get it from the AccountManager
if account is None:
account = AccountManager.get(filename=filename)
if instance:
account.instance = instance
if proxies:
account.proxies = proxies
if verify is not None:
account.verify = verify
# resolve CRN if needed
self._resolve_crn(account)
# ensure account is valid, fail early if not
account.validate()
return account
def _validate_channel_strategy(self) -> None:
"""Raise an error if the passed in channel_strategy and
instance do not match.
"""
qctrl_enabled = self._api_client.is_qctrl_enabled()
if self._channel_strategy == "q-ctrl":
if not qctrl_enabled:
raise IBMNotAuthorizedError(
"The instance passed in is not compatible with Q-CTRL channel strategy. "
"Please switch to or create an instance with the Q-CTRL strategy enabled. "
"See https://cloud.ibm.com/docs/quantum-computing?"
"topic=quantum-computing-get-started for more information"
)
else:
if qctrl_enabled:
raise IBMNotAuthorizedError(
"The instance passed in is only compatible with Q-CTRL performance "
"management strategy. "
"To use this instance, set channel_strategy='q-ctrl'."
)
def _discover_cloud_backends(self) -> Dict[str, "ibm_backend.IBMBackend"]:
"""Return the remote backends available for this service instance.
Returns:
A dict of the remote backend instances, keyed by backend name.
"""
ret = OrderedDict() # type: ignore[var-annotated]
backends_list = self._api_client.list_backends(channel_strategy=self._channel_strategy)
for backend_name in backends_list:
raw_config = self._api_client.backend_configuration(backend_name=backend_name)
config = configuration_from_server_data(
raw_config=raw_config, instance=self._account.instance
)
if not config:
continue
ret[config.backend_name] = ibm_backend.IBMBackend(
configuration=config,
service=self,
api_client=self._api_client,
)
return ret
def _resolve_crn(self, account: Account) -> None:
account.resolve_crn()
def _authenticate_ibm_quantum_account(self, client_params: ClientParameters) -> AuthClient:
"""Authenticate against IBM Quantum and populate the hub/group/projects.
Args:
client_params: Parameters used for server connection.
Raises:
IBMInputValueError: If the URL specified is not a valid IBM Quantum authentication URL.
IBMNotAuthorizedError: If the account is not authorized to use runtime.
Returns:
Authentication client.
"""
version_info = self._check_api_version(client_params)
# Check the URL is a valid authentication URL.
if not version_info["new_api"] or "api-auth" not in version_info:
raise IBMInputValueError(
"The URL specified ({}) is not an IBM Quantum authentication URL. "
"Valid authentication URL: {}.".format(
client_params.url, QISKIT_IBM_RUNTIME_API_URL
)
)
auth_client = AuthClient(client_params)
service_urls = auth_client.current_service_urls()
if not service_urls.get("services", {}).get(SERVICE_NAME):
raise IBMNotAuthorizedError(
"This account is not authorized to use ``ibm_quantum`` runtime service."
)
return auth_client
def _initialize_hgps(
self,
auth_client: AuthClient,
) -> Dict:
"""Authenticate against IBM Quantum and populate the hub/group/projects.
Args:
auth_client: Authentication data.
Raises:
IBMInputValueError: If the URL specified is not a valid IBM Quantum authentication URL.
IBMAccountError: If no hub/group/project could be found for this account.
IBMInputValueError: If instance parameter is not found in hgps.
Returns:
The hub/group/projects for this account.
"""
# pylint: disable=unsubscriptable-object
hgps: OrderedDict[str, HubGroupProject] = OrderedDict()
service_urls = auth_client.current_service_urls()
user_hubs = auth_client.user_hubs()
for hub_info in user_hubs:
# Build credentials.
hgp_params = ClientParameters(
channel=self._account.channel,
token=auth_client.current_access_token(),
url=service_urls["services"]["runtime"],
instance=to_instance_format(
hub_info["hub"], hub_info["group"], hub_info["project"]
),
proxies=self._account.proxies,
verify=self._account.verify,
)
# Build the hgp.
try:
hgp = HubGroupProject(
client_params=hgp_params, instance=hgp_params.instance, service=self
)
hgps[hgp.name] = hgp
except Exception: # pylint: disable=broad-except
# Catch-all for errors instantiating the hgp.
logger.warning(
"Unable to instantiate hub/group/project for %s: %s",
hub_info,
traceback.format_exc(),
)
if not hgps:
raise IBMAccountError(
"No hub/group/project that supports Qiskit Runtime could "
"be found for this account."
)
# Move open hgp to end of the list
if len(hgps) > 1:
open_key, open_val = hgps.popitem(last=False)
hgps[open_key] = open_val
default_hgp = self._account.instance
if default_hgp:
if default_hgp in hgps:
# Move user selected hgp to front of the list
hgps.move_to_end(default_hgp, last=False)
else:
raise IBMInputValueError(
f"Hub/group/project {default_hgp} could not be found for this account."
)
return hgps
@staticmethod
def _check_api_version(params: ClientParameters) -> Dict[str, Union[bool, str]]:
"""Check the version of the remote server in a set of client parameters.
Args:
params: Parameters used for server connection.
Returns:
A dictionary with version information.
"""
version_finder = VersionClient(url=params.url, **params.connection_parameters())
return version_finder.version()
def _get_hgp(
self,
instance: Optional[str] = None,
backend_name: Optional[Any] = None,
) -> HubGroupProject:
"""Return an instance of `HubGroupProject`.
This function also allows to find the `HubGroupProject` that contains a backend
`backend_name`.
Args:
instance: The hub/group/project to use.
backend_name: Name of the IBM Quantum backend.
Returns:
An instance of `HubGroupProject` that matches the specified criteria or the default.
Raises:
IBMInputValueError: If no hub/group/project matches the specified criteria,
or if the input value is in an incorrect format.
QiskitBackendNotFoundError: If backend cannot be found.
"""
if instance:
_ = from_instance_format(instance) # Verify format
if instance not in self._hgps:
raise IBMInputValueError(
f"Hub/group/project {instance} " "could not be found for this account."
)
if backend_name and not self._hgps[instance].has_backend(backend_name):
raise QiskitBackendNotFoundError(
f"Backend {backend_name} cannot be found in " f"hub/group/project {instance}"
)
return self._hgps[instance]
if not backend_name:
return list(self._hgps.values())[0]
for hgp in self._hgps.values():
if hgp.has_backend(backend_name):
return hgp
error_message = (
f"Backend {backend_name} cannot be found in any " f"hub/group/project for this account."
)
if not isinstance(backend_name, str):
error_message += (
f" {backend_name} is of type {type(backend_name)} but should "
f"instead be initialized through the {self}."
)
raise QiskitBackendNotFoundError(error_message)
def _discover_backends(self) -> None:
"""Discovers the remote backends for this account, if not already known."""
for backend in self._backends.values():
backend_name = to_python_identifier(backend.name)
# Append _ if duplicate
while backend_name in self.__dict__:
backend_name += "_"
setattr(self, backend_name, backend)
# pylint: disable=arguments-differ
def backends(
self,
name: Optional[str] = None,
min_num_qubits: Optional[int] = None,
instance: Optional[str] = None,
filters: Optional[Callable[[List["ibm_backend.IBMBackend"]], bool]] = None,
**kwargs: Any,
) -> List["ibm_backend.IBMBackend"]:
"""Return all backends accessible via this account, subject to optional filtering.
Args:
name: Backend name to filter by.
min_num_qubits: Minimum number of qubits the backend has to have.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format.
filters: More complex filters, such as lambda functions.
For example::
QiskitRuntimeService.backends(
filters=lambda b: b.max_shots > 50000)
QiskitRuntimeService.backends(
filters=lambda x: ("rz" in x.basis_gates )
**kwargs: Simple filters that require a specific value for an attribute in
backend configuration or status.
Examples::
# Get the operational real backends
QiskitRuntimeService.backends(simulator=False, operational=True)
# Get the backends with at least 127 qubits
QiskitRuntimeService.backends(min_num_qubits=127)
# Get the backends that support OpenPulse
QiskitRuntimeService.backends(open_pulse=True)
For the full list of backend attributes, see the `IBMBackend` class documentation
<https://docs.quantum.ibm.com/api/qiskit/providers_models>
Returns:
The list of available backends that match the filter.
Raises:
IBMInputValueError: If an input is invalid.
QiskitBackendNotFoundError: If the backend is not in any instance.
"""
# TODO filter out input_allowed not having runtime
backends: List[IBMBackend] = []
instance_filter = instance if instance else self._account.instance
if self._channel == "ibm_quantum":
if name:
if name not in self._backends:
raise QiskitBackendNotFoundError("No backend matches the criteria.")
if not self._backends[name] or instance != self._backends[name]._instance:
self._set_backend_config(name)
self._backends[name] = self._create_backend_obj(
self._backend_configs[name],
instance,
)
if self._backends[name]:
backends.append(self._backends[name])
elif instance_filter:
hgp = self._get_hgp(instance=instance_filter)
for backend_name in hgp.backends:
if (
not self._backends[backend_name]
or instance_filter != self._backends[backend_name]._instance
):
self._set_backend_config(backend_name, instance_filter)
self._backends[backend_name] = self._create_backend_obj(
self._backend_configs[backend_name], instance_filter
)
if self._backends[backend_name]:
backends.append(self._backends[backend_name])
else:
for backend_name, backend_config in self._backends.items():
if not backend_config:
self._set_backend_config(backend_name)
self._backends[backend_name] = self._create_backend_obj(
self._backend_configs[backend_name]
)
if self._backends[backend_name]:
backends.append(self._backends[backend_name])
else:
if instance:
raise IBMInputValueError(
"The 'instance' keyword is only supported for ``ibm_quantum`` runtime."
)
backends = list(self._backends.values())
if name:
kwargs["backend_name"] = name
if min_num_qubits:
backends = list(
filter(lambda b: b.configuration().n_qubits >= min_num_qubits, backends)
)
return filter_backends(backends, filters=filters, **kwargs)
def _set_backend_config(self, backend_name: str, instance: Optional[str] = None) -> None:
"""Retrieve backend configuration and add to backend_configs.
Args:
backend_name: backend name that will be returned.
instance: the current h/g/p.
"""
if backend_name not in self._backend_configs:
raw_config = self._api_client.backend_configuration(backend_name)
config = configuration_from_server_data(raw_config=raw_config, instance=instance)
self._backend_configs[backend_name] = config
def _create_backend_obj(
self,
config: Union[QasmBackendConfiguration, PulseBackendConfiguration],
instance: Optional[str] = None,
) -> IBMBackend:
"""Given a backend configuration return the backend object.
Args:
config: backend configuration.
instance: the current h/g/p.
Returns:
A backend object.
Raises:
QiskitBackendNotFoundError: if the backend is not in the hgp passed in.
"""
if config:
if not instance:
for hgp in list(self._hgps.values()):
if config.backend_name in hgp.backends:
instance = to_instance_format(hgp._hub, hgp._group, hgp._project)
break
elif config.backend_name not in self._get_hgp(instance=instance).backends:
raise QiskitBackendNotFoundError(
f"Backend {config.backend_name} is not in "
f"{instance}: please try a different hub/group/project."
)
return ibm_backend.IBMBackend(
instance=instance,
configuration=config,
service=self,
api_client=self._api_client,
)
return None
def active_account(self) -> Optional[Dict[str, str]]:
"""Return the IBM Quantum account currently in use for the session.
Returns:
A dictionary with information about the account currently in the session.
"""
return self._account.to_saved_format()
@staticmethod
def delete_account(
filename: Optional[str] = None,
name: Optional[str] = None,
channel: Optional[ChannelType] = None,
) -> bool:
"""Delete a saved account from disk.
Args:
filename: Name of file from which to delete the account.
name: Name of the saved account to delete.
channel: Channel type of the default account to delete.
Ignored if account name is provided.
Returns:
True if the account was deleted.
False if no account was found.
"""
return AccountManager.delete(filename=filename, name=name, channel=channel)
@staticmethod
def save_account(
token: Optional[str] = None,
url: Optional[str] = None,
instance: Optional[str] = None,
channel: Optional[ChannelType] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
proxies: Optional[dict] = None,
verify: Optional[bool] = None,
overwrite: Optional[bool] = False,
channel_strategy: Optional[str] = None,
set_as_default: Optional[bool] = None,
) -> None:
"""Save the account to disk for future use.
Args:
token: IBM Cloud API key or IBM Quantum API token.
url: The API URL.
Defaults to https://cloud.ibm.com (ibm_cloud) or
https://auth.quantum-computing.ibm.com/api (ibm_quantum).
instance: The CRN (ibm_cloud) or hub/group/project (ibm_quantum).
channel: Channel type. `ibm_cloud` or `ibm_quantum`.
filename: Full path of the file where the account is saved.
name: Name of the account to save.
proxies: Proxy configuration. Supported optional keys are
``urls`` (a dictionary mapping protocol or protocol and host to the URL of the proxy,
documented at https://docs.python-requests.org/en/latest/api/#requests.Session.proxies),
``username_ntlm``, ``password_ntlm`` (username and password to enable NTLM user
authentication)
verify: Verify the server's TLS certificate.
overwrite: ``True`` if the existing account is to be overwritten.
channel_strategy: Error mitigation strategy.
set_as_default: If ``True``, the account is saved in filename,
as the default account.
"""
AccountManager.save(
token=token,
url=url,
instance=instance,
channel=channel,
filename=filename,
name=name,
proxies=ProxyConfiguration(**proxies) if proxies else None,
verify=verify,
overwrite=overwrite,
channel_strategy=channel_strategy,
set_as_default=set_as_default,
)
@staticmethod
def saved_accounts(
default: Optional[bool] = None,
channel: Optional[ChannelType] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
) -> dict:
"""List the accounts saved on disk.
Args:
default: If set to True, only default accounts are returned.
channel: Channel type. `ibm_cloud` or `ibm_quantum`.
filename: Name of file whose accounts are returned.
name: If set, only accounts with the given name are returned.
Returns:
A dictionary with information about the accounts saved on disk.
Raises:
ValueError: If an invalid account is found on disk.
"""
return dict(
map(
lambda kv: (kv[0], Account.to_saved_format(kv[1])),
AccountManager.list(
default=default, channel=channel, filename=filename, name=name
).items(),
),
)
def backend(
self,
name: str = None,
instance: Optional[str] = None,
) -> Backend:
"""Return a single backend matching the specified filtering.
Args:
name: Name of the backend.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format. If an instance is not given, among the providers
with access to the backend, a premium provider will be prioritized.
For users without access to a premium provider, the default open provider will be used.
Returns:
Backend: A backend matching the filtering.
Raises:
QiskitBackendNotFoundError: if no backend could be found.
"""
# pylint: disable=arguments-differ, line-too-long
backends = self.backends(name, instance=instance)
if not backends:
cloud_msg_url = ""
if self._channel == "ibm_cloud":
cloud_msg_url = (
" Learn more about available backends here "
"https://cloud.ibm.com/docs/quantum-computing?topic=quantum-computing-choose-backend "
)
raise QiskitBackendNotFoundError("No backend matches the criteria." + cloud_msg_url)
return backends[0]
def get_backend(self, name: str = None, **kwargs: Any) -> Backend:
return self.backend(name, **kwargs)
def run(
self,
program_id: str,
inputs: Dict,
options: Optional[Union[RuntimeOptions, Dict]] = None,
callback: Optional[Callable] = None,
result_decoder: Optional[Union[Type[ResultDecoder], Sequence[Type[ResultDecoder]]]] = None,
session_id: Optional[str] = None,
start_session: Optional[bool] = False,
) -> RuntimeJob:
"""Execute the runtime program.
Args:
program_id: Program ID.
inputs: Program input parameters. These input values are passed
to the runtime program.
options: Runtime options that control the execution environment.
See :class:`RuntimeOptions` for all available options.
callback: Callback function to be invoked for any interim results and final result.
The callback function will receive 2 positional parameters:
1. Job ID
2. Job result.
result_decoder: A :class:`ResultDecoder` subclass used to decode job results.
If more than one decoder is specified, the first is used for interim results and
the second final results. If not specified, a program-specific decoder or the default
``ResultDecoder`` is used.
session_id: Job ID of the first job in a runtime session.
start_session: Set to True to explicitly start a runtime session. Defaults to False.
Returns:
A ``RuntimeJob`` instance representing the execution.
Raises:
IBMInputValueError: If input is invalid.
RuntimeProgramNotFound: If the program cannot be found.
IBMRuntimeError: An error occurred running the program.
"""
qrt_options: RuntimeOptions = options
if options is None:
qrt_options = RuntimeOptions()
elif isinstance(options, Dict):
qrt_options = RuntimeOptions(**options)
qrt_options.validate(channel=self.channel)
hgp_name = None
if self._channel == "ibm_quantum":
# Find the right hgp
hgp = self._get_hgp(instance=qrt_options.instance, backend_name=qrt_options.backend)
hgp_name = hgp.name
if hgp_name != self._current_instance:
self._current_instance = hgp_name
logger.info("Instance selected: %s", self._current_instance)
backend = self.backend(name=qrt_options.backend, instance=hgp_name)
status = backend.status()
if status.operational is True and status.status_msg != "active":
warnings.warn(
f"The backend {backend.name} currently has a status of {status.status_msg}."
)
try:
response = self._api_client.program_run(
program_id=program_id,
backend_name=qrt_options.backend,
params=inputs,
image=qrt_options.image,
hgp=hgp_name,
log_level=qrt_options.log_level,
session_id=session_id,
job_tags=qrt_options.job_tags,
max_execution_time=qrt_options.max_execution_time,
start_session=start_session,
session_time=qrt_options.session_time,
channel_strategy=None
if self._channel_strategy == "default"
else self._channel_strategy,
)
if self._channel == "ibm_quantum":
messages = response.get("messages")
if messages:
warning_message = messages[0].get("data")
warnings.warn(warning_message)
except RequestsApiError as ex:
if ex.status_code == 404:
raise RuntimeProgramNotFound(f"Program not found: {ex.message}") from None
raise IBMRuntimeError(f"Failed to run program: {ex}") from None
backend = (
self.backend(name=response["backend"], instance=hgp_name)
if response["backend"]
else qrt_options.backend
)
job = RuntimeJob(
backend=backend,
api_client=self._api_client,
client_params=self._client_params,
job_id=response["id"],
program_id=program_id,
user_callback=callback,
result_decoder=result_decoder,
image=qrt_options.image,
service=self,
)
return job
def job(self, job_id: str) -> RuntimeJob:
"""Retrieve a runtime job.
Args:
job_id: Job ID.
Returns:
Runtime job retrieved.
Raises:
RuntimeJobNotFound: If the job doesn't exist.
IBMRuntimeError: If the request failed.
"""
try:
response = self._api_client.job_get(job_id, exclude_params=True)
except RequestsApiError as ex:
if ex.status_code == 404:
raise RuntimeJobNotFound(f"Job not found: {ex.message}") from None
raise IBMRuntimeError(f"Failed to delete job: {ex}") from None
return self._decode_job(response)
def jobs(
self,
limit: Optional[int] = 10,
skip: int = 0,
backend_name: Optional[str] = None,
pending: bool = None,
program_id: str = None,
instance: Optional[str] = None,
job_tags: Optional[List[str]] = None,
session_id: Optional[str] = None,
created_after: Optional[datetime] = None,
created_before: Optional[datetime] = None,
descending: bool = True,
) -> List[RuntimeJob]:
"""Retrieve all runtime jobs, subject to optional filtering.
Args:
limit: Number of jobs to retrieve. ``None`` means no limit.
skip: Starting index for the job retrieval.
backend_name: Name of the backend to retrieve jobs from.
pending: Filter by job pending state. If ``True``, 'QUEUED' and 'RUNNING'
jobs are included. If ``False``, 'DONE', 'CANCELLED' and 'ERROR' jobs
are included.
program_id: Filter by Program ID.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format.
job_tags: Filter by tags assigned to jobs. Matched jobs are associated with all tags.
session_id: Job ID of the first job in a runtime session.
created_after: Filter by the given start date, in local time. This is used to
find jobs whose creation dates are after (greater than or equal to) this
local date/time.
created_before: Filter by the given end date, in local time. This is used to
find jobs whose creation dates are before (less than or equal to) this
local date/time.
descending: If ``True``, return the jobs in descending order of the job
creation date (i.e. newest first) until the limit is reached.
Returns:
A list of runtime jobs.
Raises:
IBMInputValueError: If an input value is invalid.
"""
hub = group = project = None
if instance:
if self._channel == "ibm_cloud":
raise IBMInputValueError(
"The 'instance' keyword is only supported for ``ibm_quantum`` runtime."
)
hub, group, project = from_instance_format(instance)
if job_tags:
validate_job_tags(job_tags)
job_responses = [] # type: List[Dict[str, Any]]
current_page_limit = limit or 20
offset = skip
while True:
jobs_response = self._api_client.jobs_get(
limit=current_page_limit,
skip=offset,
backend_name=backend_name,
pending=pending,
program_id=program_id,
hub=hub,
group=group,
project=project,
job_tags=job_tags,
session_id=session_id,
created_after=created_after,
created_before=created_before,
descending=descending,
)
job_page = jobs_response["jobs"]
# count is the total number of jobs that would be returned if
# there was no limit or skip
count = jobs_response["count"]
job_responses += job_page
if len(job_responses) == count - skip:
# Stop if there are no more jobs returned by the server.
break
if limit:
if len(job_responses) >= limit:
# Stop if we have reached the limit.
break
current_page_limit = limit - len(job_responses)
else:
current_page_limit = 20
offset += len(job_page)
return [self._decode_job(job) for job in job_responses]
def delete_job(self, job_id: str) -> None:
"""Delete a runtime job.
Note that this operation cannot be reversed.
Args:
job_id: ID of the job to delete.
Raises:
RuntimeJobNotFound: If the job doesn't exist.
IBMRuntimeError: If the request failed.
"""
try:
self._api_client.job_delete(job_id)
except RequestsApiError as ex:
if ex.status_code == 404:
raise RuntimeJobNotFound(f"Job not found: {ex.message}") from None
raise IBMRuntimeError(f"Failed to delete job: {ex}") from None
def _decode_job(self, raw_data: Dict) -> RuntimeJob:
"""Decode job data received from the server.
Args:
raw_data: Raw job data received from the server.
Returns:
Decoded job data.
"""
instance = None
if self._channel == "ibm_quantum":
hub = raw_data.get("hub")
group = raw_data.get("group")
project = raw_data.get("project")
if all([hub, group, project]):
instance = to_instance_format(hub, group, project)
# Try to find the right backend
try:
if "backend" in raw_data:
backend = self.backend(raw_data["backend"], instance=instance)
else:
backend = None
except QiskitBackendNotFoundError:
backend = ibm_backend.IBMRetiredBackend.from_name(
backend_name=raw_data["backend"],
api=None,
)
params = raw_data.get("params", {})
if isinstance(params, list):
if len(params) > 0:
params = params[0]
else:
params = {}
if not isinstance(params, str):
params = json.dumps(params)
decoded = json.loads(params, cls=RuntimeDecoder)
return RuntimeJob(
backend=backend,
api_client=self._api_client,
client_params=self._client_params,
service=self,
job_id=raw_data["id"],
program_id=raw_data.get("program", {}).get("id", ""),
params=decoded,
creation_date=raw_data.get("created", None),
session_id=raw_data.get("session_id"),
tags=raw_data.get("tags"),
)
def least_busy(
self,
min_num_qubits: Optional[int] = None,
instance: Optional[str] = None,
filters: Optional[Callable[[List["ibm_backend.IBMBackend"]], bool]] = None,
**kwargs: Any,
) -> ibm_backend.IBMBackend:
"""Return the least busy available backend.
Args:
min_num_qubits: Minimum number of qubits the backend has to have.
instance: This is only supported for ``ibm_quantum`` runtime and is in the
hub/group/project format.
filters: Filters can be defined as for the :meth:`backends` method.
An example to get the operational backends with 5 qubits::
QiskitRuntimeService.least_busy(n_qubits=5, operational=True)
Returns:
The backend with the fewest number of pending jobs.
Raises:
QiskitBackendNotFoundError: If no backend matches the criteria.
"""
backends = self.backends(
min_num_qubits=min_num_qubits, instance=instance, filters=filters, **kwargs
)
candidates = []
for back in backends:
backend_status = back.status()
if not backend_status.operational or backend_status.status_msg != "active":
continue
candidates.append(back)
if not candidates:
raise QiskitBackendNotFoundError("No backend matches the criteria.")
return min(candidates, key=lambda b: b.status().pending_jobs)
def instances(self) -> List[str]:
"""Return the IBM Quantum instances list currently in use for the session.
Returns:
A list with instances currently in the session.
"""
if self._channel == "ibm_quantum":
return list(self._hgps.keys())
return []
@property
def channel(self) -> str:
"""Return the channel type used.
Returns:
The channel type used.
"""
return self._channel
@property
def runtime(self): # type:ignore
"""Return self for compatibility with IBMQ provider.
Returns:
self
"""
return self
def __repr__(self) -> str:
return "<{}>".format(self.__class__.__name__)
| Qiskit/qiskit-ibm-runtime | qiskit_ibm_runtime/qiskit_runtime_service.py | qiskit_runtime_service.py | py | 47,557 | python | en | code | 106 | github-code | 36 |
5603106879 | import discord
from utils.auth import AuthManager
class Account(discord.Cog):
@discord.slash_command(name="register", description="Register using your discord username")
async def register(self, ctx):
AuthManager.registerGuild(ctx.author)
AuthManager.registerUser(ctx.author)
await ctx.response.send_message(content=AuthManager.signUp(ctx.author), ephemeral=True)
def setup(bot): # this is called by Pycord to setup the cog
bot.add_cog(Account(bot)) # add the cog to the bot
| liang799/rivenDealer | cogs/account.py | account.py | py | 517 | python | en | code | 1 | github-code | 36 |
10739040334 | from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from QLed import QLed
class Box(QGroupBox):
instances = []
def __init__(self, name, opcID='opcID', horizontal_spacing=10, width=100):
#self.setTitle(name)
super().__init__(name)
self.instances.append(self)
self.opcName=name
mainLayout = QFormLayout()
self.state = False
self.setEnabled(self.state)
self.led1=QLed(onColour=QLed.Green, shape=QLed.Circle)
self.led2=QLed(onColour=QLed.Green, shape=QLed.Circle)
self.led3=QLed(onColour=QLed.Green, shape=QLed.Circle)
self.radioBtn1=QRadioButton('Hand')
self.radioBtn2=QRadioButton('AUS')
#self.radioBtn2.setChecked(True)
#self.led2.value = True
self.radioBtn3=QRadioButton('AUTO')
self.opcID=opcID
self.radioBtn1.clicked.connect(self.write1)
self.radioBtn2.clicked.connect(self.write2)
self.radioBtn3.clicked.connect(self.write3)
mainLayout.addRow(self.radioBtn1,self.led1)
mainLayout.addRow(self.radioBtn2,self.led2)
mainLayout.addRow(self.radioBtn3,self.led3)
#Settings:
mainLayout.setVerticalSpacing(8)
mainLayout.setFormAlignment(Qt.AlignLeft)
mainLayout.setHorizontalSpacing(horizontal_spacing)
self.setFixedHeight(120)
self.setFixedWidth(width)
self.setLayout(mainLayout)
@classmethod
def set_all_states(cls, state):
for instance in cls.instances:
instance.state = state
instance.setEnabled(state)
def write1(self):
if self.led1.value==False:
print(self.opcID+': '+ self.radioBtn1.text())
self.led1.setValue(True)
self.led2.setValue(False) # Add this line
self.led3.setValue(False) # Add this line
def write2(self):
if self.led2.value==False:
print(self.opcID+': '+ self.radioBtn2.text())
self.led2.setValue(True)
self.led1.setValue(False)
self.led3.setValue(False)
def write3(self):
if self.led3.value==False:
print(self.opcID+': '+ self.radioBtn3.text())
self.led2.setValue(False)
self.led1.setValue(False)
self.led3.setValue(True)
def update(self,val):
# self.led1.value=val[self.opcName+'.Hand']
# self.led2.value=val[self.opcName+'.AUS']
# self.led3.value=val[self.opcName+'.AUTO']
if (val[self.opcName+'.Hand']):
self.radioBtn2.setChecked(False)
self.radioBtn3.setChecked(False)
self.radioBtn1.setChecked(True)
self.led2.setValue(False)
self.led3.setValue(False)
self.led1.setValue(True)
print("Led1 is true")
elif (val[self.opcName+'.AUS']):
self.radioBtn1.setChecked(False)
self.radioBtn2.setChecked(True)
self.radioBtn3.setChecked(False)
self.led1.setValue(False)
self.led2.setValue(True)
self.led3.setValue(False)
print("Led2 is true")
elif (val[self.opcName+'.AUTO']):
self.radioBtn1.setChecked(False)
self.radioBtn2.setChecked(False)
self.radioBtn3.setChecked(True)
self.led1.setValue(False)
self.led2.setValue(False)
self.led3.setValue(True)
print("Led3 is true")
#print(val[self.opcName+'.Hand'])
| ValdsteiN/metabolon-gui | components/widgets/box.py | box.py | py | 3,168 | python | en | code | null | github-code | 36 |
30569661147 | """
Привет! ID успешной посылки: 54853357
_____________________________________
Задача:
Гоша реализовал структуру данных Дек, максимальный размер которого определяется заданным числом. Методы push_back(x),
push_front(x), pop_back(), pop_front() работали корректно. Но, если в деке было много элементов, программа работала
очень долго. Дело в том, что не все операции выполнялись за O(1). Помогите Гоше! Напишите эффективную реализацию.
Внимание: при реализации нельзя использовать связный список.
____________________________________________________________
Так как связный список использовать нельзя, я подумал и пришел к тому, что самое оптимальное решение это использовать
два массива, НО потом мне подсказали и я подумал еще получше - циклический буфер будет лучшим выбором!
Будем использовать для метода push_back и push_front для вставки значений в конец и начало буфера.
Также мы заведем в классе Deque поле size, в котором будем хранить максимальный размер Дека, задаваемый во входных
данных. Поля tail и head для хранения индексов хвоста буфера и начала. Поле count - хранение количества элементов в
буфере. Ну и соответственно проинициализируем саму очередь на заданный размер.
Идея алгоритма простая, мы считываем данные, упаковываем их в массив, далее через оператов if - elif на нужные команды
вызываем нужные методы.
Для реализации алгоритма нам нужен класс, в котором будет реализованы следующие методы:
* push_back(value) - добавляет элемент в конец буфера
* push_front(value) - добавляем элемент в начало буфера
* pop_back() - удаляет элемент из конца буфера
* pop_front() - удаляет элемент из начала буфера
Два дополнительный метода is_full() и is_empty() позволят нам отлавливать моменты, когда дека заполнена или пуста, и
выкидывать в методах добавления и удаления элементом исключения, которые мы будем обрабатывать снаружи.
При добавление в конец проверяем, что буфер не заполнен, далее проверяем, что элементы в буфере уже есть, проверяем
если tail + 1 == size, то обнуляем tail, в противном случае увеличиваем tail на 1, для того, чтобы не перезатереть
значение, которое уже лежит в буфере. Если буфер пустой, то tail и head обнуляем и записываем по индексу tail значение
value. Увеличиваем счетчик элементов буфере на 1.
Аналогичная ситуация для добавления в начало. Только здесь необходимо следить за индексом в head для того, чтобы не
перезатереть значение, которое уже записано в буфер. Добавление происходит по индексу head, и увеличение счетчика на 1.
Далее методы удаления элементов.
Удаление с конца. Проверяем буфер на пустоту. Сохраняем текущий индекс в idx из tail во временную переменную именно по
этому индексу мы и извлечем значение. Далее нам нужно переопределить индексы в tail и head, чтобы они указывали на
правильные позиции буфера после удаления элемента. Уменьшаем счетчик на 1. Берем элемент по индексу idx из буфера, а на
его место записываем None. Удалять элементы нельзя, чтобы не изменился размер буфера. По идее элементы можно не заменять
на None, а просто сдвигать tail и head на нужные новые позиции и уменьшать счетчик. Но в задании указано удалить и мы
его удаляем.
Удаление с начала аналогичное. В idx сохраняем индекс из head, далее переопределяем tail и head для новых позиций,
уменьшаем счетчик на 1 и возвращаем элемент.
------------------------------------------------------------
Про сложность.
Алгоритм выполняется за линейное время O(n), где n - количество команд.
Сами операции выполняются за O(1).
Мы тратим на работу алгоритма O(n) памяти, потому что длина буфера не превосходят 0 <= n <= 50_000,
где n это маскимальный размер Дека.
------------------------------------------------------------
Данные посылки:
0.56s 19.71Mb
"""
from typing import List, Tuple, NoReturn
class Deque:
def __init__(self, n: int):
self.queue = [None] * n
self.head = 0
self.tail = 0
self.size = n
self.count = 0
def is_full(self):
return self.count == self.size
def is_empty(self):
return self.count == 0
def push_back(self, value):
if self.is_full():
raise IndexError()
if self.count:
if self.tail + 1 == self.size:
self.tail = 0
else:
self.tail += 1
else:
self.tail = self.head = 0
self.queue[self.tail] = value
self.count += 1
def push_front(self, value: int):
if self.is_full():
raise IndexError()
if self.count:
if self.head - 1 < 0:
self.head = self.size - 1
else:
self.head -= 1
else:
self.tail = self.head = 0
self.queue[self.head] = value
self.count += 1
def pop_back(self):
if self.is_empty():
raise IndexError()
idx = self.tail
if self.count == 1:
self.tail = self.head = -1
else:
if self.tail - 1 < 0:
self.tail = self.size - 1
else:
self.tail -= 1
self.count -= 1
item = self.queue[idx]
self.queue[idx] = None
return item
def pop_front(self):
if self.is_empty():
raise IndexError()
idx = self.head
if self.count == 1:
self.tail = self.head = -1
else:
if self.head + 1 == self.size:
self.head = 0
else:
self.head += 1
self.count -= 1
item = self.queue[idx]
self.queue[idx] = None
return item
def input_data() -> Tuple[int, List[Tuple[str, ...]]]:
n = int(input().strip())
m = int(input().strip())
command_list = []
while n:
command = tuple(input().strip().split())
command_list.append(command)
n -= 1
return m, command_list
def solution(deque_length: int, command_list: List[Tuple[str, ...]]) -> NoReturn:
deque = Deque(deque_length)
for command in command_list:
if command[0] == 'push_back':
try:
deque.push_back(int(command[1]))
except IndexError:
print('error')
elif command[0] == 'push_front':
try:
deque.push_front(int(command[1]))
except IndexError:
print('error')
elif command[0] == 'pop_back':
try:
print(deque.pop_back())
except IndexError:
print('error')
elif command[0] == 'pop_front':
try:
print(deque.pop_front())
except IndexError:
print('error')
if __name__ == '__main__':
solution(*input_data())
| fenixguard/yandex_algorithms | sprint_2/final_tasks/deque.py | deque.py | py | 9,249 | python | ru | code | 2 | github-code | 36 |
1942587161 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
def dfs(start: int,end: int) -> List[TreeNode]:
if start > end:
return [None]
i = start
result = []
while i <= end:
lefts = dfs(start,i-1)
rights = dfs(i+1,end)
for left in lefts:
for right in rights:
root = TreeNode(i)
root.left = left
root.right = right
result.append(root)
i = i + 1
return result
reslult = dfs(1,n)
return reslult
| hellojukay/leetcode-cn | src/unique-binary-search-trees-ii.py | unique-binary-search-trees-ii.py | py | 879 | python | en | code | 3 | github-code | 36 |
20422534772 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="webull",
version="0.6.1",
author="ted chou",
description="The unofficial python interface for the WeBull API",
license='MIT',
author_email="ted.chou12@gmail.com",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tedchou12/webull.git",
packages=setuptools.find_packages(),
install_requires=[
"certifi>=2020.4.5.1",
"chardet>=3.0.4",
"idna>=2.9",
"numpy>=1.18.4",
"pandas>=0.25.3",
"python-dateutil>=2.8.1",
"pytz>=2020.1",
"requests>=2.23.0",
"six>=1.14.0",
"urllib3>=1.25.9",
"email-validator>=1.1.0",
"paho-mqtt>=1.6.0"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| tedchou12/webull | setup.py | setup.py | py | 1,036 | python | en | code | 576 | github-code | 36 |
14231634112 | #!/usr/bin/env python3
import fire
import logging
import os, sys, traceback
from IsoNet.util.dict2attr import Arg,check_parse,idx2list
from fire import core
from IsoNet.util.metadata import MetaData,Label,Item
class ISONET:
"""
ISONET: Train on tomograms and restore missing-wedge\n
for detail description, run one of the following commands:
isonet.py prepare_star -h
isonet.py prepare_subtomo_star -h
isonet.py deconv -h
isonet.py make_mask -h
isonet.py extract -h
isonet.py refine -h
isonet.py predict -h
isonet.py resize -h
isonet.py gui -h
"""
#log_file = "log.txt"
def prepare_star(self,folder_name, output_star='tomograms.star',pixel_size = 10.0, defocus = 0.0, number_subtomos = 100):
"""
\nThis command generates a tomograms.star file from a folder containing only tomogram files (.mrc or .rec).\n
isonet.py prepare_star folder_name [--output_star] [--pixel_size] [--defocus] [--number_subtomos]
:param folder_name: (None) directory containing tomogram(s). Usually 1-5 tomograms are sufficient.
:param output_star: (tomograms.star) star file similar to that from "relion". You can modify this file manually or with gui.
:param pixel_size: (10) pixel size in angstroms. Usually you want to bin your tomograms to about 10A pixel size.
Too large or too small pixel sizes are not recommended, since the target resolution on Z-axis of corrected tomograms should be about 30A.
:param defocus: (0.0) defocus in Angstrom. Only need for ctf deconvolution. For phase plate data, you can leave defocus 0.
If you have multiple tomograms with different defocus, please modify them in star file or with gui.
:param number_subtomos: (100) Number of subtomograms to be extracted in later processes.
If you want to extract different number of subtomograms in different tomograms, you can modify them in the star file generated with this command or with gui.
"""
md = MetaData()
md.addLabels('rlnIndex','rlnMicrographName','rlnPixelSize','rlnDefocus','rlnNumberSubtomo','rlnMaskBoundary')
tomo_list = sorted(os.listdir(folder_name))
i = 0
for tomo in tomo_list:
if tomo[-4:] == '.rec' or tomo[-4:] == '.mrc':
i+=1
it = Item()
md.addItem(it)
md._setItemValue(it,Label('rlnIndex'),str(i))
md._setItemValue(it,Label('rlnMicrographName'),os.path.join(folder_name,tomo))
md._setItemValue(it,Label('rlnPixelSize'),pixel_size)
md._setItemValue(it,Label('rlnDefocus'),defocus)
md._setItemValue(it,Label('rlnNumberSubtomo'),number_subtomos)
md._setItemValue(it,Label('rlnMaskBoundary'),None)
md.write(output_star)
def prepare_subtomo_star(self, folder_name, output_star='subtomo.star', pixel_size: float=10.0, cube_size = None):
"""
\nThis command generates a subtomo star file from a folder containing only subtomogram files (.mrc).
This command is usually not necessary in the traditional workflow, because "isonet.py extract" will generate this subtomo.star for you.\n
isonet.py prepare_subtomo_star folder_name [--output_star] [--cube_size]
:param folder_name: (None) directory containing subtomogram(s).
:param output_star: (subtomo.star) output star file for subtomograms, will be used as input in refinement.
:param pixel_size: (10) The pixel size in angstrom of your subtomograms.
:param cube_size: (None) This is the size of the cubic volumes used for training. This values should be smaller than the size of subtomogram.
And the cube_size should be divisible by 8. If this value isn't set, cube_size is automatically determined as int(subtomo_size / 1.5 + 1)//16 * 16
"""
#TODO check folder valid, logging
if not os.path.isdir(folder_name):
print("the folder does not exist")
import mrcfile
md = MetaData()
md.addLabels('rlnSubtomoIndex','rlnImageName','rlnCubeSize','rlnCropSize','rlnPixelSize')
subtomo_list = sorted(os.listdir(folder_name))
for i,subtomo in enumerate(subtomo_list):
subtomo_name = os.path.join(folder_name,subtomo)
try:
with mrcfile.open(subtomo_name, mode='r', permissive=True) as s:
crop_size = s.header.nx
except:
print("Warning: Can not process the subtomogram: {}!".format(subtomo_name))
continue
if cube_size is not None:
cube_size = int(cube_size)
if cube_size >= crop_size:
cube_size = int(crop_size / 1.5 + 1)//16 * 16
print("Warning: Cube size should be smaller than the size of subtomogram volume! Using cube size {}!".format(cube_size))
else:
cube_size = int(crop_size / 1.5 + 1)//16 * 16
it = Item()
md.addItem(it)
md._setItemValue(it,Label('rlnSubtomoIndex'),str(i+1))
md._setItemValue(it,Label('rlnImageName'),subtomo_name)
md._setItemValue(it,Label('rlnCubeSize'),cube_size)
md._setItemValue(it,Label('rlnCropSize'),crop_size)
md._setItemValue(it,Label('rlnPixelSize'),pixel_size)
# f.write(str(i+1)+' ' + os.path.join(folder_name,tomo) + '\n')
md.write(output_star)
def deconv(self, star_file: str,
deconv_folder:str="./deconv",
voltage: float=300.0,
cs: float=2.7,
snrfalloff: float=None,
deconvstrength: float=None,
highpassnyquist: float=0.02,
chunk_size: int=None,
overlap_rate: float= 0.25,
ncpu:int=4,
tomo_idx: str=None):
"""
\nCTF deconvolution for the tomograms.\n
isonet.py deconv star_file [--deconv_folder] [--snrfalloff] [--deconvstrength] [--highpassnyquist] [--overlap_rate] [--ncpu] [--tomo_idx]
This step is recommended because it enhances low resolution information for a better contrast. No need to do deconvolution for phase plate data.
:param deconv_folder: (./deconv) Folder created to save deconvoluted tomograms.
:param star_file: (None) Star file for tomograms.
:param voltage: (300.0) Acceleration voltage in kV.
:param cs: (2.7) Spherical aberration in mm.
:param snrfalloff: (1.0) SNR fall rate with the frequency. High values means losing more high frequency.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 1.0 will be used.
:param deconvstrength: (1.0) Strength of the deconvolution.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 1.0 will be used.
:param highpassnyquist: (0.02) Highpass filter for at very low frequency. We suggest to keep this default value.
:param chunk_size: (None) When your computer has enough memory, please keep the chunk_size as the default value: None . Otherwise, you can let the program crop the tomogram into multiple chunks for multiprocessing and assembly them into one. The chunk_size defines the size of individual chunk. This option may induce artifacts along edges of chunks. When that happen, you may use larger overlap_rate.
:param overlap_rate: (None) The overlapping rate for adjecent chunks.
:param ncpu: (4) Number of cpus to use.
:param tomo_idx: (None) If this value is set, process only the tomograms listed in this index. e.g. 1,2,4 or 5-10,15,16
"""
from IsoNet.util.deconvolution import deconv_one
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
logging.info('\n######Isonet starts ctf deconvolve######\n')
try:
md = MetaData()
md.read(star_file)
if not 'rlnSnrFalloff' in md.getLabels():
md.addLabels('rlnSnrFalloff','rlnDeconvStrength','rlnDeconvTomoName')
for it in md:
md._setItemValue(it,Label('rlnSnrFalloff'),1.0)
md._setItemValue(it,Label('rlnDeconvStrength'),1.0)
md._setItemValue(it,Label('rlnDeconvTomoName'),None)
if not os.path.isdir(deconv_folder):
os.mkdir(deconv_folder)
tomo_idx = idx2list(tomo_idx)
for it in md:
if tomo_idx is None or str(it.rlnIndex) in tomo_idx:
if snrfalloff is not None:
md._setItemValue(it,Label('rlnSnrFalloff'), snrfalloff)
if deconvstrength is not None:
md._setItemValue(it,Label('rlnDeconvStrength'),deconvstrength)
tomo_file = it.rlnMicrographName
base_name = os.path.basename(tomo_file)
deconv_tomo_name = '{}/{}'.format(deconv_folder,base_name)
deconv_one(it.rlnMicrographName,deconv_tomo_name,voltage=voltage,cs=cs,defocus=it.rlnDefocus/10000.0, pixel_size=it.rlnPixelSize,snrfalloff=it.rlnSnrFalloff, deconvstrength=it.rlnDeconvStrength,highpassnyquist=highpassnyquist,chunk_size=chunk_size,overlap_rate=overlap_rate,ncpu=ncpu)
md._setItemValue(it,Label('rlnDeconvTomoName'),deconv_tomo_name)
md.write(star_file)
logging.info('\n######Isonet done ctf deconvolve######\n')
except Exception:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def make_mask(self,star_file,
mask_folder: str = 'mask',
patch_size: int=4,
mask_boundary: str=None,
density_percentage: int=None,
std_percentage: int=None,
use_deconv_tomo:bool=True,
z_crop:float=None,
tomo_idx=None):
"""
\ngenerate a mask that include sample area and exclude "empty" area of the tomogram. The masks do not need to be precise. In general, the number of subtomograms (a value in star file) should be lesser if you masked out larger area. \n
isonet.py make_mask star_file [--mask_folder] [--patch_size] [--density_percentage] [--std_percentage] [--use_deconv_tomo] [--tomo_idx]
:param star_file: path to the tomogram or tomogram folder
:param mask_folder: path and name of the mask to save as
:param patch_size: (4) The size of the box from which the max-filter and std-filter are calculated.
:param density_percentage: (50) The approximate percentage of pixels to keep based on their local pixel density.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 50 will be used.
:param std_percentage: (50) The approximate percentage of pixels to keep based on their local standard deviation.
If this value is not set, the program will look for the parameter in the star file.
If this value is not set and not found in star file, the default value 50 will be used.
:param use_deconv_tomo: (True) If CTF deconvolved tomogram is found in tomogram.star, use that tomogram instead.
:param z_crop: If exclude the top and bottom regions of tomograms along z axis. For example, "--z_crop 0.2" will mask out the top 20% and bottom 20% region along z axis.
:param tomo_idx: (None) If this value is set, process only the tomograms listed in this index. e.g. 1,2,4 or 5-10,15,16
"""
from IsoNet.bin.make_mask import make_mask
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
logging.info('\n######Isonet starts making mask######\n')
try:
if not os.path.isdir(mask_folder):
os.mkdir(mask_folder)
# write star percentile threshold
md = MetaData()
md.read(star_file)
if not 'rlnMaskDensityPercentage' in md.getLabels():
md.addLabels('rlnMaskDensityPercentage','rlnMaskStdPercentage','rlnMaskName')
for it in md:
md._setItemValue(it,Label('rlnMaskDensityPercentage'),50)
md._setItemValue(it,Label('rlnMaskStdPercentage'),50)
md._setItemValue(it,Label('rlnMaskName'),None)
tomo_idx = idx2list(tomo_idx)
for it in md:
if tomo_idx is None or str(it.rlnIndex) in tomo_idx:
if density_percentage is not None:
md._setItemValue(it,Label('rlnMaskDensityPercentage'),density_percentage)
if std_percentage is not None:
md._setItemValue(it,Label('rlnMaskStdPercentage'),std_percentage)
if use_deconv_tomo and "rlnDeconvTomoName" in md.getLabels() and it.rlnDeconvTomoName not in [None,'None']:
tomo_file = it.rlnDeconvTomoName
else:
tomo_file = it.rlnMicrographName
tomo_root_name = os.path.splitext(os.path.basename(tomo_file))[0]
if os.path.isfile(tomo_file):
logging.info('make_mask: {}| dir_to_save: {}| percentage: {}| window_scale: {}'.format(tomo_file,
mask_folder, it.rlnMaskDensityPercentage, patch_size))
#if mask_boundary is None:
if "rlnMaskBoundary" in md.getLabels() and it.rlnMaskBoundary not in [None, "None"]:
mask_boundary = it.rlnMaskBoundary
else:
mask_boundary = None
mask_out_name = '{}/{}_mask.mrc'.format(mask_folder,tomo_root_name)
make_mask(tomo_file,
mask_out_name,
mask_boundary=mask_boundary,
side=patch_size,
density_percentage=it.rlnMaskDensityPercentage,
std_percentage=it.rlnMaskStdPercentage,
surface = z_crop)
md._setItemValue(it,Label('rlnMaskName'),mask_out_name)
md.write(star_file)
logging.info('\n######Isonet done making mask######\n')
except Exception:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def extract(self,
star_file: str,
use_deconv_tomo: bool = True,
subtomo_folder: str = "subtomo",
subtomo_star: str = "subtomo.star",
cube_size: int = 64,
crop_size: int = None,
log_level: str="info",
tomo_idx = None
):
"""
\nExtract subtomograms\n
isonet.py extract star_file [--subtomo_folder] [--subtomo_star] [--cube_size] [--use_deconv_tomo] [--tomo_idx]
:param star_file: tomogram star file
:param subtomo_folder: (subtomo) folder for output subtomograms.
:param subtomo_star: (subtomo.star) star file for output subtomograms.
:param cube_size: (64) Size of cubes for training, should be divisible by 8, eg. 32, 64. The actual sizes of extracted subtomograms are this value adds 16.
:param crop_size: (None) The size of subtomogram, should be larger then the cube_size The default value is 16+cube_size.
:param log_level: ("info") level of the output, either "info" or "debug"
:param use_deconv_tomo: (True) If CTF deconvolved tomogram is found in tomogram.star, use that tomogram instead.
"""
d = locals()
d_args = Arg(d)
if d_args.log_level == "debug":
logging.basicConfig(format='%(asctime)s, %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
,datefmt="%H:%M:%S",level=logging.DEBUG,handlers=[logging.StreamHandler(sys.stdout)])
else:
logging.basicConfig(format='%(asctime)s, %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
,datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
logging.info("\n######Isonet starts extracting subtomograms######\n")
try:
if os.path.isdir(subtomo_folder):
logging.warning("subtomo directory exists, the current directory will be overwritten")
import shutil
shutil.rmtree(subtomo_folder)
os.mkdir(subtomo_folder)
from IsoNet.preprocessing.prepare import extract_subtomos
if crop_size is None:
d_args.crop_size = cube_size + 16
else:
d_args.crop_size = crop_size
d_args.subtomo_dir = subtomo_folder
d_args.tomo_idx = idx2list(tomo_idx)
extract_subtomos(d_args)
logging.info("\n######Isonet done extracting subtomograms######\n")
except Exception:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def refine(self,
subtomo_star: str,
gpuID: str = None,
iterations: int = None,
data_dir: str = None,
pretrained_model: str = None,
log_level: str = None,
result_dir: str='results',
remove_intermediate: bool =False,
select_subtomo_number: int = None,
preprocessing_ncpus: int = 16,
continue_from: str=None,
epochs: int = 10,
batch_size: int = None,
steps_per_epoch: int = None,
noise_level: tuple=(0.05,0.10,0.15,0.20),
noise_start_iter: tuple=(11,16,21,26),
noise_mode: str = None,
noise_dir: str = None,
learning_rate: float = None,
drop_out: float = 0.3,
convs_per_depth: int = 3,
kernel: tuple = (3,3,3),
pool: tuple = None,
unet_depth: int = 3,
filter_base: int = None,
batch_normalization: bool = True,
normalize_percentile: bool = True,
):
"""
\ntrain neural network to correct missing wedge\n
isonet.py refine subtomo_star [--iterations] [--gpuID] [--preprocessing_ncpus] [--batch_size] [--steps_per_epoch] [--noise_start_iter] [--noise_level]...
:param subtomo_star: (None) star file containing subtomogram(s).
:param gpuID: (0,1,2,3) The ID of gpu to be used during the training. e.g 0,1,2,3.
:param pretrained_model: (None) A trained neural network model in ".h5" format to start with.
:param iterations: (30) Number of training iterations.
:param data_dir: (data) Temporary folder to save the generated data used for training.
:param log_level: (info) debug level, could be 'info' or 'debug'
:param continue_from: (None) A Json file to continue from. That json file is generated at each iteration of refine.
:param result_dir: ('results') The name of directory to save refined neural network models and subtomograms
:param preprocessing_ncpus: (16) Number of cpu for preprocessing.
************************Training settings************************
:param epochs: (10) Number of epoch for each iteraction.
:param batch_size: (None) Size of the minibatch.If None, batch_size will be the max(2 * number_of_gpu,4). batch_size should be divisible by the number of gpu.
:param steps_per_epoch: (None) Step per epoch. If not defined, the default value will be min(num_of_subtomograms * 6 / batch_size , 200)
************************Denoise settings************************
:param noise_level: (0.05,0.1,0.15,0.2) Level of noise STD(added noise)/STD(data) after the iteration defined in noise_start_iter.
:param noise_start_iter: (11,16,21,26) Iteration that start to add noise of corresponding noise level.
:param noise_mode: (None) Filter names when generating noise volumes, can be 'ramp', 'hamming' and 'noFilter'
:param noise_dir: (None) Directory for generated noise volumes. If set to None, the Noise volumes should appear in results/training_noise
************************Network settings************************
:param drop_out: (0.3) Drop out rate to reduce overfitting.
:param learning_rate: (0.0004) learning rate for network training.
:param convs_per_depth: (3) Number of convolution layer for each depth.
:param kernel: (3,3,3) Kernel for convolution
:param unet_depth: (3) Depth of UNet.
:param filter_base: (64) The base number of channels after convolution.
:param batch_normalization: (True) Use Batch Normalization layer
:param pool: (False) Use pooling layer instead of stride convolution layer.
:param normalize_percentile: (True) Normalize the 5 percent and 95 percent pixel intensity to 0 and 1 respectively. If this is set to False, normalize the input to 0 mean and 1 standard dievation.
"""
from IsoNet.bin.refine import run
d = locals()
d_args = Arg(d)
with open('log.txt','a+') as f:
f.write(' '.join(sys.argv[0:]) + '\n')
run(d_args)
def predict(self, star_file: str, model: str, output_dir: str='./corrected_tomos', gpuID: str = None, cube_size:int=64,
crop_size:int=96,use_deconv_tomo=True, batch_size:int=None,normalize_percentile: bool=True,log_level: str="info", tomo_idx=None):
"""
\nPredict tomograms using trained model\n
isonet.py predict star_file model [--gpuID] [--output_dir] [--cube_size] [--crop_size] [--batch_size] [--tomo_idx]
:param star_file: star for tomograms.
:param output_dir: file_name of output predicted tomograms
:param model: path to trained network model .h5
:param gpuID: (0,1,2,3) The gpuID to used during the training. e.g 0,1,2,3.
:param cube_size: (64) The tomogram is divided into cubes to predict due to the memory limitation of GPUs.
:param crop_size: (96) The side-length of cubes cropping from tomogram in an overlapping patch strategy, make this value larger if you see the patchy artifacts
:param batch_size: The batch size of the cubes grouped into for network predicting, the default parameter is four times number of gpu
:param normalize_percentile: (True) if normalize the tomograms by percentile. Should be the same with that in refine parameter.
:param log_level: ("debug") level of message to be displayed, could be 'info' or 'debug'
:param tomo_idx: (None) If this value is set, process only the tomograms listed in this index. e.g. 1,2,4 or 5-10,15,16
:param use_deconv_tomo: (True) If CTF deconvolved tomogram is found in tomogram.star, use that tomogram instead.
:raises: AttributeError, KeyError
"""
d = locals()
d_args = Arg(d)
from IsoNet.bin.predict import predict
if d_args.log_level == "debug":
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.DEBUG,handlers=[logging.StreamHandler(sys.stdout)])
else:
logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt="%m-%d %H:%M:%S",level=logging.INFO,handlers=[logging.StreamHandler(sys.stdout)])
try:
predict(d_args)
except:
error_text = traceback.format_exc()
f =open('log.txt','a+')
f.write(error_text)
f.close()
logging.error(error_text)
def resize(self, star_file:str, apix: float=15, out_folder="tomograms_resized"):
'''
This function rescale the tomograms to a given pixelsize
'''
md = MetaData()
md.read(star_file)
#print(md._data[0].rlnPixelSize)
from scipy.ndimage import zoom
#from skimage.transform import rescale
#import numpy as np
import mrcfile
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
for item in md._data:
ori_apix = item.rlnPixelSize
tomo_name = item.rlnMicrographName
zoom_factor = float(ori_apix)/apix
new_tomo_name = "{}/{}".format(out_folder,os.path.basename(tomo_name))
with mrcfile.open(tomo_name) as mrc:
data = mrc.data
print("scaling: {}".format(tomo_name))
new_data = zoom(data, zoom_factor,order=3, prefilter=False)
#new_data = rescale(data, zoom_factor,order=3, anti_aliasing = True)
#new_data = new_data.astype(np.float32)
with mrcfile.new(new_tomo_name,overwrite=True) as mrc:
mrc.set_data(new_data)
mrc.voxel_size = apix
item.rlnPixelSize = apix
print(new_tomo_name)
item.rlnMicrographName = new_tomo_name
print(item.rlnMicrographName)
md.write(os.path.splitext(star_file)[0] + "_resized.star")
print("scale_finished")
def check(self):
from IsoNet.bin.predict import predict
from IsoNet.bin.refine import run
import skimage
import PyQt5
import tqdm
print('IsoNet --version 0.2 installed')
def gui(self):
"""
\nGraphic User Interface\n
"""
import IsoNet.gui.Isonet_star_app as app
app.main()
def Display(lines, out):
text = "\n".join(lines) + "\n"
out.write(text)
def pool_process(p_func,chunks_list,ncpu):
from multiprocessing import Pool
with Pool(ncpu,maxtasksperchild=1000) as p:
# results = p.map(partial_func,chunks_gpu_num_list,chunksize=1)
results = list(p.map(p_func,chunks_list))
# return results
if __name__ == "__main__":
core.Display = Display
# logging.basicConfig(format='%(asctime)s, %(levelname)-8s %(message)s',datefmt="%m-%d %H:%M:%S",level=logging.INFO)
if len(sys.argv) > 1:
check_parse(sys.argv[1:])
fire.Fire(ISONET)
| IsoNet-cryoET/IsoNet | bin/isonet.py | isonet.py | py | 26,881 | python | en | code | 49 | github-code | 36 |
9627005158 | import numpy as np
import pandas as pd
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from PyQt5 import QtGui
from PyQt5.QtWidgets import *
import sys
from PIL import Image
from wordCloud.WC import Ui_MainWindow
from wordcloud import WordCloud
from wordcloud import ImageColorGenerator
from collections import Counter
from konlpy.tag import Hannanum
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
class Main(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.lines = "" # 연설문
self.nowlang = self.lang.currentText()
self.eg_wordlist = []
self.kr_wordlist = []
self.mask = None
self.canvas = None
self.textbutton.clicked.connect(self.choseText)
self.imgbutton.clicked.connect(self.choseImage)
self.langbutton.clicked.connect(self.choselang)
def clearAll(self):
self.lines = ""
self.plainTextEdit.clear()
for i in reversed(range(self.verticalLayout.count())):
self.verticalLayout.itemAt(i).widget().setParent(None) # layout비우는 방법
for i in reversed(range(self.verticalLayout_2.count())):
self.verticalLayout_2.itemAt(i).widget().setParent(None)
def choselang(self):
self.nowlang = self.lang.currentText()
def choseImage(self):
for i in reversed(range(self.verticalLayout_2.count())):
self.verticalLayout_2.itemAt(i).widget().setParent(None)
fileName, _ = QFileDialog.getOpenFileName(self, '불러올 img file을 선택하세요.', '', 'img Files(*.png)')
if fileName:
self.mask = np.array(Image.open(fileName))
if self.nowlang == "영어":
self.makeImgWordCloud(self.eg_wordlist)
else:
self.makeImgWordCloud(self.kr_wordlist)
self.label_2.setPixmap(QtGui.QPixmap(fileName).scaled(400, 300))
def choseText(self):
self.clearAll()
fileName, _ = QFileDialog.getOpenFileName(self, '불러올 txt file을 선택하세요.', '', 'txt Files(*.txt)')
self.label.setText(fileName.split("/")[-1].split(".")[0] + " WordCloud")
if fileName:
f = open(fileName, "r", encoding="cp949")
if self.nowlang == "영어":
self.lines = f.readlines()[0]
f.close()
self.makeEgWordList()
else:
self.lines = f.readlines()
f.close()
self.makeKrWordList()
def makeEgWordList(self):
tokenizer = RegexpTokenizer("[\w]+") # word 단위로 구분하라
stop_words = stopwords.words("english") # 단어는 자주 등장하지만 실제 의미 분석에는 의미 없는단어
words = self.lines.lower()
tokens = tokenizer.tokenize(words)
stopped_tokens = [i for i in list(tokens) if not i in stop_words]
self.eg_wordlist = [i for i in stopped_tokens if len(i) > 1]
self.makeTop20Word(self.eg_wordlist)
self.makeWordCloud(self.eg_wordlist)
def flatten(self, l):
flatList = []
for elem in l:
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
def makeKrWordList(self):
hannanum = Hannanum()
temp = []
for i in range(len(self.lines)):
temp.append(hannanum.nouns(self.lines[i]))
word_list = self.flatten(temp)
self.kr_wordlist = pd.Series([x for x in word_list if len(x) > 1])
self.makeTop20Word(self.kr_wordlist)
self.makeWordCloud(self.kr_wordlist)
def makeTop20Word(self, wordlist):
keys = pd.Series(wordlist).value_counts().head(20).keys()
values = pd.Series(wordlist).value_counts().head(20).values
for i in range(len(keys)):
self.plainTextEdit.appendPlainText("{} : {}개".format(keys[i], values[i]))
def makeWordCloud(self, wordlist):
font_path = '/Library/Fonts/AppleGothic.ttf'
wordcloud = WordCloud(font_path=font_path, width=800, height=800, background_color="white")
count = Counter(wordlist)
wordcloud = wordcloud.generate_from_frequencies(count)
def __array__(self):
return self.to_array()
def to_array(self):
return np.array(self.to_image())
array = wordcloud.to_array()
fig = plt.figure()
plt.imshow(array, interpolation="bilinear")
self.canvas = FigureCanvas(fig)
self.canvas.draw()
self.verticalLayout.addWidget(self.canvas)
self.canvas.show()
def makeImgWordCloud(self, wordlist):
font_path = '/Library/Fonts/AppleGothic.ttf'
count = Counter(wordlist)
wc = WordCloud(font_path=font_path, mask=self.mask, background_color="white")
wc = wc.generate_from_frequencies(count)
image_color = ImageColorGenerator(self.mask)
fig = plt.figure(figsize=(8, 8))
plt.imshow(wc.recolor(color_func=image_color), interpolation="bilinear")
plt.axis("off")
self.canvas = FigureCanvas(fig)
self.canvas.draw()
self.verticalLayout_2.addWidget(self.canvas)
self.canvas.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
you_viewer_main = Main()
you_viewer_main.show()
app.exec_()
| LeeDong-Min/WordCloud | text_mining(moon_and_trump).py | text_mining(moon_and_trump).py | py | 5,583 | python | en | code | 0 | github-code | 36 |
1080065395 | from django.contrib import admin
from django.urls import path
from form1 import views
urlpatterns = [
path('form1', views.index,name='index'),
path('form2', views.form2, name='Supervisor'),
path('', views.login_view, name='home'),
path('signup', views.signup_view, name='signup'),
path('menu', views.menu_view, name='menu'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout'),
path('movetohistory/<int:case_id>', views.move_to_history, name='MoveToHistory'),
path('add_frv', views.create_frv ,name='AddFrv'),
path('driver', views.driver, name='driver'),
#path('drivermap', views.drivermap, name='drivermap'),
path('location/case/get', views.get_case_location, name='GetCaseLocation'),
path('location/case/set', views.set_case_location, name='SaveCaseLocation'),
path('assignfrv', views.assign_frv, name='AssignFRV' ),
#path('location/frv/get', views.get_frv_location, name='GetFRVocation'),
#path('location/frv/gset', views.set_frv_location, name='SetFRVLocation'),
]
| prajwalgh/QuantumGIS-SIH-PH | mainbody/form1/urls.py | urls.py | py | 1,079 | python | en | code | 0 | github-code | 36 |
22136785531 | import os
import time
import json
import torch
import random
import warnings
import torchvision
import numpy as np
import pandas as pd
import pathlib
from utils import *
from data import HumanDataset
from data import process_df
from data import process_submission_leakdata_full
from data import process_loss_weight
from data import process_together_labels
from tqdm import tqdm
from config import config
from datetime import datetime
from models.model import *
from torch import nn, optim
from collections import OrderedDict
from torch.autograd import Variable
from torch.utils.data.sampler import WeightedRandomSampler
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from sklearn.model_selection import train_test_split
from timeit import default_timer as timer
from sklearn.metrics import f1_score
from PIL import Image
import matplotlib.pyplot as plt
# 1. set random seed
random.seed(2050)
np.random.seed(2050)
torch.manual_seed(2050)
torch.cuda.manual_seed_all(2050)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
torch.backends.cudnn.benchmark = True
warnings.filterwarnings('ignore')
index_class_dict = {
0: "Nucleoplasm",
1: "Nuclear membrane",
2: "Nucleoli",
3: "Nucleoli fibrillar center",
4: "Nuclear speckles",
5: "Nuclear bodies",
6: "Endoplasmic reticulum",
7: "Golgi apparatus",
8: "Peroxisomes",
9: "Endosomes",
10: "Lysosomes",
11: "Intermediate filaments",
12: "Actin filaments",
13: "Focal adhesion sites",
14: "Microtubules",
15: "Microtubule ends",
16: "Cytokinetic bridge",
17: "Mitotic spindle",
18: "Microtubule organizing center",
19: "Centrosome",
20: "Lipid droplets",
21: "Plasma membrane",
22: "Cell junctions",
23: "Mitochondria",
24: "Aggresome",
25: "Cytosol",
26: "Cytoplasmic bodies",
27: "Rods & rings"
}
def check(check_loader, model, folds, val_data_list):
model.cuda()
model.eval()
count = 0
wrong_id = []
wrong_class = []
true_target = []
wrong_target = []
true_name = []
wrong_name = []
pred = []
for i, (image, target) in enumerate(tqdm(check_loader)):
with torch.no_grad():
image = image.cuda(non_blocking=True)
y_pred = model(image)
label = y_pred.sigmoid().cpu().data.numpy()
label_orig = label.copy().reshape((-1))
ll = label.copy().reshape((-1))
ll = -ll
ll.sort()
ll = -ll
threshold = config.threshold
# if threshold < ll[3]:
# threshold = ll[3]
label = label >= threshold
label = label.reshape(-1)
target = np.array(target)
target = target.reshape(-1)
flag = True
for j in range(len(label)):
if label[j] != target[j]:
flag = False
break
if not flag or flag:
count += 1
name = val_data_list.iloc[i].Id
wrong_img_path = os.path.join(config.train_data, name)
target1 = ' '.join(list([str(k) for k in np.nonzero(target)]))
label1 = ' '.join(list([str(k) for k in np.nonzero(label)]))
label1_name = '-&-'.join(list([index_class_dict[k] for k in np.nonzero(label)]))
label_orig = ' '.join(list(str('%1.2f' % k) for k in label_orig))
wrong_id.append(str(name))
wrong_class.append(str(flag))
true_target.append(target1)
wrong_target.append(label1)
pred.append(label_orig)
images = np.zeros(shape=(512, 512, 3), dtype=np.float)
r = Image.open(wrong_img_path + "_red.png")
g = Image.open(wrong_img_path + "_green.png")
b = Image.open(wrong_img_path + "_blue.png")
y = Image.open(wrong_img_path + "_yellow.png")
images[:, :, 0] = np.array(r) / 2 + np.array(y) / 2
images[:, :, 1] = g
images[:, :, 2] = b
images = images.astype(np.uint8)
f0 = plt.figure(0, figsize=(20, 25))
f0.suptitle('%s True:%s Pred:%s Pred_name%s' % (str(flag), target1, label1, label1_name))
ax1 = plt.subplot2grid((5, 4), (0, 0), fig=f0)
ax2 = plt.subplot2grid((5, 4), (0, 1))
ax3 = plt.subplot2grid((5, 4), (0, 2))
ax4 = plt.subplot2grid((5, 4), (0, 3))
ax5 = plt.subplot2grid((5, 4), (1, 0), rowspan=4, colspan=4)
ax1.imshow(np.array(r), cmap="Reds")
ax1.set_title("true:")
ax2.imshow(np.array(g), cmap="Greens")
ax2.set_title("pred:")
ax3.imshow(np.array(b), cmap="Blues")
ax4.imshow(np.array(y), cmap="Oranges")
ax5.imshow(images)
plt.waitforbuttonpress(0)
plt.close(f0)
if wrong_id is not []:
df = pd.DataFrame({
'Id': wrong_id,
'True': wrong_class,
'True_Target': true_target,
'Pred_Target': wrong_target,
'pred': pred
})
df.to_csv('wrong_classification.csv')
def main():
fold = config.fold
model = get_net()
model.cuda()
best_model = torch.load(
"%s/%s_fold_%s_model_best_%s.pth.tar" % (config.best_models, config.model_name, str(fold), config.best))
model.load_state_dict(best_model["state_dict"])
train_files = pd.read_csv(config.train_csv)
external_files = pd.read_csv(config.external_csv)
test_files = pd.read_csv(config.test_csv)
all_files, test_files, weight_log = process_df(train_files, external_files, test_files)
train_data_list, val_data_list = train_test_split(all_files, test_size=0.13, random_state=2050)
val_data_list = val_data_list[val_data_list['is_external'] == 0]
check_gen = HumanDataset(val_data_list, augument=False, mode="train")
check_loader = DataLoader(check_gen, 1, shuffle=False, pin_memory=True, num_workers=6)
check(check_loader, model, fold, val_data_list)
if __name__ == "__main__":
main()
| felixchen9099/kaggle_human_protein | my_utils/wrong_classification.py | wrong_classification.py | py | 6,274 | python | en | code | 31 | github-code | 36 |
4999987358 | import asyncio
import json
import random
import re
import requests
from discord import Intents
from discord import Colour
from discord import Embed
from discord.ext import commands
from discord.utils import get
from environment_variables import (
DISCORD,
REDDIT,
OPTION_FLAGS
)
from links import (
hummer_links,
tesla_link,
prius_link,
honk_links
)
from compliments import (
compliment_base,
compliment_subject,
compliment_descriptor,
compliment_ending
)
##################################
# DECLARATION #
##################################
# Declare the bot
bot = commands.Bot(
command_prefix = '!',
intents = Intents.all()
)
# Remove the {syntax}help option if required
if OPTION_FLAGS["REMOVE_HELP"]:
bot.remove_command("help")
# Initialize the required queue(s)
reddit_queue = []
# Initialize synchronization lock(s)
lock = asyncio.Lock()
##################################
# AUXILLIARY FUNCTIONS #
##################################
def administrator(roles):
return any([role.permissions.administrator for role in roles])
async def handle_EV(ctx, message):
"""
Handle any talk of EVs with brutal honesty
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Search the message for relevant string(s)
tesla_regex = re.search(".*tesla.*", message)
prius_regex = re.search(".*prius.*", message)
sentiment_regex = re.search(".*((best)|(great)|(fantastic)|(love)).*", message)
# Respond appriopriately if required to do so
if (tesla_regex or prius_regex) and sentiment_regex:
if tesla_regex:
await ctx.reply(f"{random.choice(hummer_links)}\n{tesla_link}")
else:
await ctx.reply(f"{random.choice(hummer_links)}\n{prius_link}")
def get_posts(client, secret, username, password, queue, subreddit, max_posts):
"""
Populate the given queue with a series of posts from a specified
subreddit up to a maximum of 'max_posts' using the API constructed
from the arguments provided
Arguments:
client (str): The client ID associated with the bot
secret (str): The secret token associated with the bot
username (str): The username of the Reddit account
password (str): The passsword of the Reddit account
queue (List): The queue to store posts into
subreddit (str): The desired subreddit
max_posts (int): The maximum amount of posts allowed
Returns:
N/A
Raises:
N/A
"""
# Get authorization
authorization = requests.auth.HTTPBasicAuth(client, secret)
# Specify the login method
# wth it's associated data
data = {
"grant_type": "password",
"username": username,
"password": password
}
# Setup our headers info
headers = {"User-Agent": "Thanatos/0.0.1"}
# Send our request for an OAuth token
response = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth = authorization,
data = data,
headers = headers
)
# Get the access token value
access_token = response.json()["access_token"]
# Define headers and get the headers
headers = {
**headers,
**{"Authorization": f"bearer {access_token}"}
}
requests.get("https://oauth.reddit.com/api/v1/me", headers = headers)
# Get posts based on headers and parameters specified
parameters = {"limit": 250, 't': "year"}
response = requests.get(
f"https://oauth.reddit.com/r/{subreddit}/top",
headers = headers,
params = parameters
)
# Parse the given posts
response_data = []
for post in response.json()["data"]["children"]:
if post["data"]["stickied"] == True:
continue
queue.append({
"title": post["data"]["title"],
"selftext": post["data"]["selftext"],
"ups": post["data"]["ups"],
"downs": post["data"]["downs"],
"url": post["data"]["url"],
"thumbnail": post["data"]["thumbnail"]
})
if len(queue) >= max_posts:
return
##################################
# EVENT HANDLING #
##################################
@bot.event
async def on_ready():
print(f"Logged in as: {bot.user.name} (ID {bot.user.id})")
@bot.event
async def on_typing(channel, user, when):
# Disregard input made by self
if user == bot.user:
return
@bot.event
async def on_message(message):
# Disregard input made by self
if message.author == bot.user:
return
# Check for message triggering a status change
if random.randint(0, 10) > 8:
selection = random.choice([
"with the fabric of reality",
"the souls of mortals",
"with fire",
"with something he shouldn't",
"Untitled Goose Game",
"with explosions",
"I use Arch BTW",
"👍 Hitchiker Simulator 2022",
f"with {message.author.name}"
])
# Check for string input in the message
# (e.g. not a raw media type alone)
if len(message.content) > 0:
# Act on the message as required
await handle_EV(message, message.content.lower())
# Try to process the message as a command
try:
await bot.process_commands(message)
except:
print(exception)
##################################
# COMMAND CALLS #
##################################
@bot.command()
async def puppet(ctx):
"""
Talk through the bot using
the corrosponding terminal
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Verify the author of the command call
if (ctx.author.id != DISCORD["OWNER_ID"]):
return
# Hide the original command call
await ctx.message.delete()
# Take input repeatedly from the
# terminal until no input is given
while True:
response = input("Message: ")
if (response != ""):
await ctx.send(response)
else:
print("__END__\n")
return
@bot.command()
async def peptalk(ctx):
"""
Generate and reply back with a peptalk,
sending it to another user if called as
a reply to a message that user made
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Generate random response
response = ''.join([
random.choice(compliment_base),
random.choice(compliment_subject),
random.choice(compliment_descriptor),
random.choice(compliment_ending)
])
# Check for message to be
# directed towards another user
msg = ctx.message
if ctx.message.reference != None:
ctx.message = await ctx.channel.fetch_message(ctx.message.reference.message_id)
# Send message
await ctx.message.reply(response)
@bot.command()
async def puptalk(ctx):
"""
Run an API call to get a random dog
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
response = requests.get("https://random.dog/woof.json").json()
await ctx.message.reply(response["url"])
@bot.command()
async def cattalk(ctx):
"""
Run an API call to get a random cat
Arguments:
(tag): The filter to parse results from
Returns:
N/A
Raises:
N/A
"""
# Handle the optional tags of the user input
url_base = "https://cataas.com/cat"
url_tags = ""
if len(ctx.message.content) > 8:
url_tags = f"/{ctx.message.content[9:]}"
# Send out the request and handle responding
# according to the request response
response = requests.get(f"{url_base}{url_tags}?json=true")
if response.status_code == 404:
await ctx.message.reply(f"Sorry, no results for '{ctx.message.content[9:]}' tags")
else:
await ctx.message.reply(f"{url_base}{response.json()['url'][4:]}")
@bot.command()
async def fire(ctx):
"""
Run an API call to arcgis.com
to check for fires in a given area
Arguments:
(county): The name of a county specified in
the command call (e.g. '!fire butte').
If not specified, all counties in the
state are queried, and the first three
returned are used in the text-response.
Returns:
N/A
Raises:
N/A
"""
# Define the argument filter for county
# from the command call
filter_county = "1%3D1"
if len(ctx.message.content) >= 6:
filter_county = f"irwin_POOCounty%20%3D%20'{ctx.message.content.upper()[6:]}'"
# Define the spatial filter for the request
# to look specifically in California
filter_state = [
"geometry=-138.176%2C31.172%2C-100.471%2C43.363",
"geometryType=esriGeometryEnvelope",
"inSR=4326",
"spatialRel=esriSpatialRelIntersects",
"outSR=4326",
"returnGeometry=False"
]
# Define the basic request information and the
# desired response format
request_base = '/'.join([
f"https://services3.arcgis.com",
f"T4QMspbfLg3qTGWY",
f"arcgis",
f"rest",
f"services",
f"CY_WildlandFire_Perimeters_ToDate",
f"FeatureServer",
f"0",
f"query?where={filter_county}&outFields="
])
request_format = f"f=json"
# Define the requested information
# for each event returned
request_fields = [
"poly_Acres_AutoCalc",
"irwin_FireCause",
"irwin_IncidentName",
"irwin_IncidentShortDescription",
"irwin_PrimaryFuelModel",
"irwin_UniqueFireIdentifier",
"irwin_PercentContained",
"irwin_POOCounty"
]
# Make the request to the API
response = requests.get(
request_base
+ ','.join(request_fields) + '&'
+ '&'.join(filter_state) + '&'
+ request_format
)
# Evaluate response JSON data
reply_amount = 0
for item in response.json()['features']:
# Iterate through each event found
for event, attributes in item.items():
# Check only 'big' events with incident descriptions
# (which are typically locations and whatnot)
if attributes['irwin_IncidentShortDescription'] == None:
continue
output = '\n'.join([
f"\n---------------------------------------------------\n",
f"**Incident Name:** {attributes['irwin_IncidentName']}",
f"**Unique Fire ID:** {attributes['irwin_UniqueFireIdentifier']}",
f"**County:** {attributes['irwin_POOCounty']}",
f"**Description:** {attributes['irwin_IncidentShortDescription']}",
f"**Primary Fuel:** {attributes['irwin_PrimaryFuelModel']}",
f"**Percent Contained:** {attributes['irwin_PercentContained']}%",
f"**Acres Affected:** {round(attributes['poly_Acres_AutoCalc'], 2)}",
f"**Fire Cause:** {attributes['irwin_FireCause']}"
])
await ctx.send(output)
reply_amount += 1
if reply_amount >= 3:
return
if reply_amount == 0:
await ctx.message.reply(f"Sorry, no results for '{ctx.message.content[6:]}'")
@bot.command()
async def reddit(ctx):
"""
Respond to the user with a Reddit post
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Check if the queue needs to be repopulated
if len(reddit_queue) == 0:
get_posts(
client = REDDIT["CLIENT_ID"],
secret = REDDIT["SECRET_TOKEN"],
username = REDDIT["USERNAME"],
password = REDDIT["PASSWORD"],
queue = reddit_queue,
subreddit = "memes",
max_posts = 250
)
random.shuffle(reddit_queue)
# Setup the base message
embed = Embed(
title = f"{reddit_queue[-1]['title']}",
url = reddit_queue[-1]['url'],
colour = Colour.from_rgb(*[random.randint(0, 255) for _ in range(3)])
)
# Setup the extra options for the message
embed.set_footer(text = f"-- r/memes")
embed.set_image(url = reddit_queue[-1]['url'])
# Prepare the response and then pop from the queue
# before sending the message to the calling user
reddit_queue.pop()
await ctx.reply(embed = embed)
@bot.command()
async def honk(ctx):
"""
Honk
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Setup a random picture response
response = random.choice(honk_links)
# Make sure they know he's angry
emoji = '\N{ANGER SYMBOL}'
await ctx.message.add_reaction(emoji)
# Make sure they REALLLY know he's angry
emoji = '\N{SWAN}'
await ctx.message.add_reaction(emoji)
# Release the kraken
await ctx.send(f"**HONK**\n {response}")
@bot.command()
async def uwu(ctx):
"""
Oh no...
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Choose a random response
response = random.choice([
"I\'m going to be 100\% upset at you for that.",
"Do you wish to invoke the wrath of Thanatos, mortal?!",
"Silence, mortal!",
"No.",
"Could you just *not* do that?",
"Stop that, you stop that this instant!",
"DO NOT. DO THIS.",
"Why must you bring \'him\' back?!",
"Oh no...he\'s back,",
"Here he comes again...",
"Ugh.",
"No...no no no. *No*.",
"Why are you like this, mortal?",
"How DARE you?"
])
# Choose a random emoji
emoji = random.choice([
'\N{THUMBS DOWN SIGN}',
'\N{ANGER SYMBOL}'
])
# Respond to the user command call appropriately
await ctx.message.add_reaction(emoji)
await ctx.message.reply(response)
@bot.command()
async def RGB(ctx):
"""
Briefly flash the colours in the RGB role of the server
Arguments:
N/A
Returns:
N/A
Raises:
N/A
"""
# Using the RGB call lock to handle multiple callers
async with lock:
# Get the server's RGB role if it exists
role = get(ctx.guild.roles, name = "RGB")
if role == None:
return
# Only proceed if the caller is an admin or has the RGB role
if role in ctx.author.roles or administrator(ctx.author.roles):
default = tuple(value for value in (role.colour).to_rgb())
for i in range(0, 20):
# Get the new RGB value for the role
colour = tuple(random.randint(0, 255) for _ in range(3))
# Re-assign the role's colour and sleep for a brief period
await role.edit(colour = Colour.from_rgb(*colour))
await asyncio.sleep(1)
# Re-apply the old colour to the role
await role.edit(colour = Colour.from_rgb(*default))
@bot.command()
async def set_activity(ctx):
"""
Briefly flash the colours in the RGB role of the server
Arguments:
(message): A string message to display as the activity
Returns:
N/A
Raises:
N/A
"""
# Check for caller authorization
if (ctx.author.id != DISCORD["OWNER_ID"]):
return
# Get any additional arguments from the command caller
if len(ctx.message.content) > 13:
arguments = ctx.message.content[14:]
else:
arguments = ""
await bot.change_presence(activity = discord.Game(name = arguments))
@bot.command()
async def announcement(ctx):
"""
Change the activity setting of the bot
Arguments:
(message): A post to publish in the announcements channel
Returns:
N/A
Raises:
N/A
"""
# Check for caller authorization
if (ctx.author.id != DISCORD["OWNER_ID"] and not administrator(ctx.author.roles)):
return
# Get the server's RGB role if it exists
channel = get(ctx.guild.channels, name = "announcements")
if channel == None:
return
# Setup the base message
embed = Embed(
description = f"{ctx.message.content[14:]}",
colour = Colour.from_rgb(*[random.randint(0, 255) for _ in range(3)])
)
# Setup the optional flaires for the message
embed.set_footer(text = "-- Sent via Thanatos")
embed.set_author(
name = ctx.message.author.name,
icon_url = ctx.author.avatar_url
)
# Send the message
await channel.send(embed = embed)
##################################
# INITIALIZATION #
##################################
if __name__ == "__main__":
bot.run(DISCORD["TOKEN"]) | Haskili/Thanatos | main.py | main.py | py | 17,397 | python | en | code | 1 | github-code | 36 |
18561686919 | from typing import ForwardRef
import random
def get_reversed_array(arr):
start = 0
end = len(arr) - 1
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
numbers = random.sample(range(10), 10)
print(numbers)
get_reversed_array(numbers)
print(numbers)
| prithivirajmurugan/Notes | Algorithm_DataStructures/reverse_array.py | reverse_array.py | py | 324 | python | en | code | 0 | github-code | 36 |
32805054000 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
import time
data = pd.read_csv('wdbc.data')
# data.info()
# data.columns
# replace 'M' and 'B' with 1 and 0
data['diagnosis'] = data['diagnosis'].map({'M':1,'B':0})
print (data['diagnosis'])
# dataset[1] = dataset[1].map({'M' : 1, 'B' : 0})
# print (dataset[1])
# drop the column 0, which contains 'id' (useless)
data.drop('id', axis=1, inplace=True)
print (data.head(5))
# dataset.drop(columns=0, axis=1, inplace=True)
feature = ['radius_mean','texture_mean', 'smoothness_mean','compactness_mean','symmetry_mean', 'fractal_dimension_mean']
# visualization
# data[feature].hist(bins=50, figsize=(20, 15))
# plt.show()
from sklearn.model_selection import train_test_split
train, test = train_test_split(data,test_size=0.3,train_size=0.7)
feature = ['radius_mean','texture_mean', 'smoothness_mean','compactness_mean','symmetry_mean', 'fractal_dimension_mean']
# 2, 3, 6, 7, 10, 11
print (train.shape)
train_X = train[feature]
train_y = train['diagnosis']
test_X = test[feature]
test_y = test['diagnosis']
print (train_X.head(5))
# min-max normalization
def MaxMinNormalization(x):
"""[0,1] normaliaztion"""
x = (x - np.min(x)) / (np.max(x) - np.min(x))
return x
train_X = MaxMinNormalization(train_X)
test_X = MaxMinNormalization(test_X)
print (train_X)
print (train_y.shape)
def confusion_matrix(y_true, y_pred):
matrix = np.zeros([2, 2])
for y_true, y_pred in zip(y_true,y_pred):
if y_true == 1 and y_pred == 1:
matrix[0][0] += 1
if y_true == 0 and y_pred == 1:
matrix[0][1] += 1
if y_true == 0 and y_pred == 0:
matrix[1][1] += 1
if y_true == 1 and y_pred == 0:
matrix[1][0] += 1
return matrix
# Training...
# ------------------------
print ("Training...")
model = svm.SVC()
start = time.thread_time()
model.fit(train_X, train_y)
## step 3: testing
print ("Testing...")
prediction = model.predict(test_X)
end = time.thread_time()
print ('Time used: ', (end - start))
## step 4: show the result
print ("show the result...")
errorCount = 0
for y_res, y_predict in zip(test_y, prediction):
if y_res != y_predict:
errorCount += 1
print ('The classify accuracy is: ', (len(test_y) - errorCount) / len(test_y))
c_matrix = confusion_matrix(test_y, prediction)
print (c_matrix)
| Siyuan-gwu/Machine-Learning-SVM-Diagnostic | venv/SVM.py | SVM.py | py | 2,386 | python | en | code | 1 | github-code | 36 |
38990098370 | from coleccion_vehiculos import ColeccionVehiculos
from interfaz_lista import ILista
from clase_nuevo import Nuevo
from clase_usado import Usado
import unittest
class TestLista(unittest.TestCase):
__lista= object
def setUp(self):
self.__lista= ILista(ColeccionVehiculos())
v= Usado("Toyota","Supra",2,"Rojo",32500,"AR 420 BR",1998,55631)
self.__lista.agregarVehiculo(v)
v1= Usado("Porsche","911 gt3",2,"Amarillo",11500,"AC 123 DE",2020,35000)
self.__lista.agregarVehiculo(v1)
v2= Nuevo("BMW","M3 E30",3,"Negro",115000,"Full")
self.__lista.agregarVehiculo(v2)
def test_insertarVehiculoPosicion0(self):
v3= Nuevo("BMW","M4",4,"Blanco",119000,"Full")
self.__lista.insertarElementoPosicionDeseada(v3,1)
self.assertListEqual(self.__lista.getModelos(),["M4","M3 E30","911 gt3","Supra"])
def test_insertarVehiculoPosicionIntermedia(self):
v4= Usado("Subaru","WRX STI",2,"Azul plateado",120299,"DJ 120 MO",2000,135678)
self.__lista.insertarElementoPosicionDeseada(v4,2)
self.assertListEqual(self.__lista.getModelos(),["M3 E30","WRX STI","911 gt3","Supra"])
def test_insertarVehiculoPosicionFinal(self):
v5= Usado("Audi","TT",2,"Blanco",130000,"AM 107 EF",2018,99800)
self.__lista.agregarElementoalFinal(v5)
self.assertListEqual(self.__lista.getModelos(),["M3 E30","911 gt3","Supra","TT"])
def test_agregarVehiculoColeccion(self):
v6= Nuevo("BMW","M3 E36",3,"Gris",118999,"Full")
self.__lista.agregarVehiculo(v6)
self.assertListEqual(self.__lista.getModelos(),["M3 E36","M3 E30","911 gt3","Supra"])
def test_obtenerObjetoSegunPosicion(self):
self.assertTrue(self.__lista.getModeloSegunPosicion(2),"911 gt3")
def test_modificarPrecioBaseYMostrarPrecioVentaSegunPatente(self):
self.__lista.modificarPrecioBase_segunPatenteIngresada("AR 420 BR",40000)
self.__lista.mostrarPrecioVenta_segunPatenteIngresada("AR 420 BR")
self.assertEqual(self.__lista.getImporteVentaSegunPatente("AR 420 BR"),22800)
| Nicolino-c137/Ejercicios-Unidad-3-POO | Ejercicio 9/clase_test.py | clase_test.py | py | 2,201 | python | pt | code | 0 | github-code | 36 |
22355060265 | import re
from collections import ChainMap
from os import environ
from pathlib import Path
from subprocess import run
import pytest
import yaml
here = Path(__file__).absolute().parent
tests_dir = here.parent
root = tests_dir.parent
# Need to be in root for docker context
tmp_dockerfile = Path(root / "Dockerfile.mlrun-test-nb")
with (here / "Dockerfile.test-nb").open() as fp:
dockerfile_template = fp.read()
docker_tag = "mlrun/test-notebook"
def mlrun_api_configured():
config_file_path = here / "test-notebooks.yml"
with config_file_path.open() as fp:
config = yaml.safe_load(fp)
return config["env"].get("MLRUN_DBPATH") is not None
def iterate_notebooks():
if not mlrun_api_configured():
return []
config_file_path = here / "test-notebooks.yml"
with config_file_path.open() as fp:
config = yaml.safe_load(fp)
general_env = config["env"]
for notebook_test_config in config["notebook_tests"]:
# fill env keys that reference the general env
test_env = {}
for key, value in notebook_test_config.get("env", {}).items():
match = re.match(r"^\$\{(?P<env_var>.*)\}$", value)
if match is not None:
env_var = match.group("env_var")
env_var_value = general_env.get(env_var)
if env_var_value is None:
raise ValueError(
f"Env var {env_var} references general env, but it does not exist there"
)
test_env[key] = env_var_value
else:
test_env[key] = value
notebook_test_config["env"] = test_env
yield pytest.param(
notebook_test_config, id=notebook_test_config["notebook_name"]
)
def args_from_env(env):
external_env = {}
for env_var_key in environ:
if env_var_key.startswith("MLRUN_"):
external_env[env_var_key] = environ[env_var_key]
env = ChainMap(env, external_env)
args, cmd = [], []
for name in env:
value = env[name]
args.append(f"ARG {name}")
cmd.extend(["--build-arg", f"{name}={value}"])
args = "\n".join(args)
return args, cmd
@pytest.mark.skipif(
not mlrun_api_configured(),
reason="This is an integration test, add the needed environment variables in test-notebooks.yml "
"to run it",
)
@pytest.mark.parametrize("notebook", iterate_notebooks())
def test_notebook(notebook):
path = f'./examples/{notebook["notebook_name"]}'
args, args_cmd = args_from_env(notebook["env"])
deps = []
for dep in notebook.get("pip", []):
deps.append(f"RUN python -m pip install --upgrade {dep}")
pip = "\n".join(deps)
code = dockerfile_template.format(notebook=path, args=args, pip=pip)
with tmp_dockerfile.open("w") as out:
out.write(code)
cmd = (
["docker", "build", "--file", str(tmp_dockerfile), "--tag", docker_tag]
+ args_cmd
+ ["."]
)
out = run(cmd, cwd=root)
assert out.returncode == 0, "cannot build"
| mlrun/mlrun | tests/integration/test_notebooks.py | test_notebooks.py | py | 3,076 | python | en | code | 1,129 | github-code | 36 |
31521305202 | class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
i2,i3,i5=0,0,0
nums=[1]
for _ in xrange(n-1):
u2,u3,u5=nums[i2]*2,nums[i3]*3,nums[i5]*5
nums.append(min(u2,u3,u5))
if u2==nums[-1]:
i2+=1
if u3==nums[-1]:
i3+=1
if u5==nums[-1]:
i5+=1
return nums[-1]
| szhu3210/LeetCode_Solutions | LC/264.py | 264.py | py | 473 | python | en | code | 3 | github-code | 36 |
20638178622 | from tkinter import *
from tkinter.messagebox import *
root=Tk()
h,w=root.winfo_screenheight(),root.winfo_screenwidth()
root.geometry('%dx%d+0+0'%(w,h))
def ope():
root.destroy()
import operator
def newb():
root.destroy()
import busdetails
def newr():
root.destroy()
import newroute
def newrun():
root.destroy()
import newrun
img=PhotoImage(file=".\\Bus_for_project.png")
Label(root,image=img).grid(row=0,column=0,columnspan=4,padx=(500,0),pady=10)
Label(root,text='Online Bus Booking System',font='Arial 18 bold', bg='cadetblue1',fg='red').grid(row=1,column=0,columnspan=4,padx=(500,0))
Label(root,text='Add New Details to DataBase',font='Arial 14 bold',fg='green4').grid(row=2,column=0,columnspan=4,padx=(500,0),pady=20)
Button(root,text='New Operator',bg='SpringGreen2',command=ope).grid(row=3,column=0,padx=(500,50))
Button(root,text='New Bus',bg='orange red',command=newb).grid(row=3,column=1,padx=50)
Button(root,text='New Route',bg='DodgerBlue3',command=newr).grid(row=3,column=2,padx=50)
Button(root,text='New Run',bg='pink4',command=newrun).grid(row=3,column=3,padx=50)
root.mainloop()
| aviraljain19/Python-Bus-Booking-Project | addbus.py | addbus.py | py | 1,153 | python | en | code | 0 | github-code | 36 |
30467454597 | # Given the running logs of n functions that are executed in a nonpreemptive single threaded CPU, find the exclusive time of these functions.
#
# Each function has a unique id, start from 0 to n-1. A function may be called recursively or by another function.
#
# A log is a string has this format : function_id:start_or_end:timestamp. For example, "0:start:0" means function 0 starts from the very beginning of time 0. "0:end:0" means function 0 ends to the very end of time 0.
#
# Exclusive time of a function is defined as the time spent within this function, the time spent by calling other functions should not be considered as this function's exclusive time. You should return the exclusive time of each function sorted by their function id.
#
# Example 1:
# Input:
# n = 2
# logs =
# ["0:start:0",
# "1:start:2",
# "1:end:5",
# "0:end:6"]
# Output:[3, 4]
# Explanation:
# Function 0 starts at time 0, then it executes 2 units of time and reaches the end of time 1.
# Now function 0 calls function 1, function 1 starts at time 2, executes 4 units of time and end at time 5.
# Function 0 is running again at time 6, and also end at the time 6, thus executes 1 unit of time.
# So function 0 totally execute 2 + 1 = 3 units of time, and function 1 totally execute 4 units of time.
# Note:
# Input logs will be sorted by timestamp, NOT log id.
# Your output should be sorted by function id, which means the 0th element of your output corresponds to the exclusive time of function 0.
# Two functions won't start or end at the same time.
# Functions could be called recursively, and will always end.
# 1 <= n <= 100
# 给一个array表示某个程序开始或结束时间点。求每个程序的总共运行时间,
# class Solution(object):
# def exclusiveTime(self, n, logs):
# """
# :type n: int
# :type logs: List[str]
# :rtype: List[int]
# """
# logs = [ele.split(':') for ele in logs]
# stack = []
# stack.append([logs[0][0],logs[0][1],int(logs[0][2]),0,0]) #stack-ele = [id,stadus,start-time,i,interval]
#
# res = {}
# for i in range(n):
# res[i] = 0
# i = 1
# while i < len(logs): #不能用stack != 【】这个条件
# if stack != [] and stack[-1][0] == logs[i][0] and stack[-1][1] == 'start' and logs[i][1] == 'end':
# if i == int(stack[-1][3])+1:
# interval = int(logs[i][2]) - int(stack.pop()[2]) + 1#如果是连续的,就时间相减+1
# res[int(logs[i][0])] += interval #res-ele = [id:runtime累加]
# if stack != []:
# stack[-1][4] += interval
# else: #如果不连续,
# cur = stack.pop()
#
# res[int(logs[i][0])] += int(logs[i][2])-cur[2]-cur[4]+1
# interval = int(logs[i][2]) - cur[2] + 1
# if stack != []:
#
# stack[-1][4] += interval
# i += 1
# else:
# stack.append([logs[i][0],logs[i][1],int(logs[i][2]),i,0]) #stack-ele = [id,stadus,start-time,i,interval]
# i += 1
#
# return [ele[1] for ele in sorted(res.items(),key = lambda x:x[0])]
#
# ##############################
# class Solution1:
# def exclusiveTime(self, n, logs):
# log_record = [0] * n
# log_stack = []
# # fake 爸爸 node,为了当root给前一个点加others_time时不出错
# log_stack.append(Node(-1, -1))
# for log in logs:
# ele = log.split(':')
# if ele[1] == 'start':
# node = Node(int(ele[0]), int(ele[2]))
# log_stack.append(node)
# elif ele[1] == 'end':
# node = log_stack.pop()
# log_stack[-1].others_time += int(ele[2]) + 1 - node.start_time
# log_record[int(ele[0])] += int(ele[2]) + 1 - node.start_time - node.others_time
# return log_record
#
#
# class Node:
# def __init__(self, id, start_time):
# self.id = id
# self.start_time = start_time
# self.others_time = 0
# task: start/end:timestamp
# if ele[1] == 'start':
# node = node(ele[2], ele[0], 0)
# stack.append(node)
# elif ele[1] == 'end':
# cur = stack[-1]
# total_time = ele[2] - cur.start - cur.exclude
# hash[ele[0]] += total_time
# node(time, task_id, exclude_time)
class Solution:
def exclusiveTime(self, n: int, logs):
hash_task_to_time = {}
stack = []
stack.append(Node('-1', 0))
for ele in logs:
ele = ele.split(':')
if ele[1] == 'start':
node = Node(ele[0], int(ele[2]))
stack.append(node)
elif ele[1] == 'end':
cur = stack.pop()
total_time = int(ele[2]) - cur.start_time - cur.exclude
if ele[0] in hash_task_to_time:
hash_task_to_time[ele[0]] += total_time
else:
hash_task_to_time[ele[0]] = total_time
stack[-1].exclude += int(ele[2]) - cur.start_time
return hash_task_to_time
class Node:
def __init__(self, task, start_time):
self.task = task
self.start_time = start_time
self.exclude = 0
if __name__ == '__main__':
s = Solution()
# n = 2
# logs = ["0:start:0","1:start:2","1:end:5", "0:end:6"] #Output:[3, 4]
#
# print s.exclusiveTime(n,logs)
#
# n = 1
# logs = ["0:start:0", "0:start:2", "0:end:5", "0:start:6", "0:end:6", "0:end:7"] #Output:[8]
# print(s.exclusiveTime(n, logs))
# n = 1
# logs = ["0:start:0", "0:end:0"]
# print s.exclusiveTime(n, logs)
n = 1
logs =["0:start:0", "0:start:1", "0:start:2", "0:end:3", "0:end:4", "0:end:5"]
print(s.exclusiveTime(n, logs))
n = 3 #[1, 1, 2]
logs =["0:start:0","0:end:0","1:start:1","1:end:1","2:start:2","2:end:2","2:start:3","2:end:3"]
print(s.exclusiveTime(n, logs))
| dundunmao/LeetCode2019 | 636. Exclusive Time of Functions.py | 636. Exclusive Time of Functions.py | py | 6,078 | python | en | code | 0 | github-code | 36 |
74470689064 | import os
import datetime
import glob
import urllib.request
import tqdm
import gzip
import pandas as pd
import re
import utils
import random
from time import gmtime, strftime
from multiprocessing import Process
config = __import__('0_config')
def clean_row(row):
return row.decode('utf-8', 'ignore').strip()
def compute_year_quarter(START_YEAR, START_QUARTER):
today = datetime.datetime.now()
current_year = today.year
current_quarter = (today.month-1)//3 + 1
years = list(range(START_YEAR, current_year + 1))
quarters = list(range(1,4 + 1))
year_x_quarter = [(y, q) for y in years for q in quarters]
# We didn't reach the fourth quarter, have to filter last quarters
if current_quarter < 4:
year_x_quarter = year_x_quarter[0:-(4-current_quarter)]
# The first report is not among the first quarter
if START_QUARTER > 1:
year_x_quarter = year_x_quarter[START_QUARTER-1:]
return year_x_quarter
def download_files(year_x_quarter, URL_INDEX_PATTERN, DATA_GZ_FOLDER, START_YEAR):
if not os.path.isdir(DATA_GZ_FOLDER):
os.makedirs(DATA_GZ_FOLDER)
# Don't download already downloaded files
for id, gz_file in enumerate(glob.glob(DATA_GZ_FOLDER + '/*.gz')):
y, q = [int(x) for x in gz_file[gz_file.rfind('/') + 1: gz_file.rfind('.')].split('_')]
idx = (y - START_YEAR) * 4 + q - 1
idx -= id # Removing an element in the list will translate all indices
assert (y,q) == year_x_quarter[idx]
del year_x_quarter[idx]
new_data = False
# Download GZ files
for y, q in tqdm.tqdm(year_x_quarter, desc='Downloading company\' indices'):
url = URL_INDEX_PATTERN.format(quarter=q, year=y)
filename = os.path.join(DATA_GZ_FOLDER, '{year}_{quarter}.gz'.format(year=y, quarter=q))
urllib.request.urlretrieve(url, filename)
new_data = True
return new_data
def read_data(DATA_GZ_FOLDER, DATA_PD_FOLDER, PDF_MERGE_FILE):
extra_keys = ['year', 'quarter']
if not os.path.isdir(DATA_PD_FOLDER):
os.makedirs(DATA_PD_FOLDER)
pdfs = {}
keys = None
pattern = re.compile(" *")
can_safely_load_pdf_merge = True
for file in tqdm.tqdm(glob.glob(DATA_GZ_FOLDER + '/*.gz'), desc='Processing company indices'):
y, q = [int(x) for x in file[file.rfind('/')+1:file.rfind('.')].split('_')]
if y not in pdfs:
pdfs[y] = {}
if q not in pdfs[y]:
pdfs[y][q] = []
filename = os.path.join(DATA_PD_FOLDER, '{year}_{quarter}.pd'.format(year=y, quarter=q))
# Read dataframe or process GZ file it if not exists
if os.path.isfile(filename):
pdfs[y][q] = pd.read_pickle(filename)
if keys is None:
keys = list(pdfs[y][q].columns.values)
else:
can_safely_load_pdf_merge = False
# Read file
with gzip.open(file, 'r') as fp:
for i in range(8):
next(fp)
if keys is None:
keys = re.split(pattern, clean_row(fp.readline())) + extra_keys
else:
next(fp)
next(fp)
# Raw data
for row in fp:
row = clean_row(row)
attributes = []
attribute = ''
spaces = 0
for c in row[::-1]:
if c == ' ':
spaces += 1
else:
spaces = 0
if spaces < 2:
attribute += c
elif attribute != ' ':
attributes.append(attribute[::-1].strip())
attribute = ''
spaces = 0
if attribute != '':
attributes.append(attribute[::-1].strip())
attributes = attributes[::-1]
if len(attributes) >= (len(keys) - len(extra_keys)):
while len(attributes) > (len(keys) - len(extra_keys)):
attributes[0] += ' ' + attributes[1]
del attributes[1]
pdfs[y][q].append(attributes)
# Transform to Pandas dataframe
pdfs[y][q] = pd.DataFrame.from_records([t + [y, q] for t in pdfs[y][q]], columns=keys)
pdfs[y][q].to_pickle(os.path.join(DATA_PD_FOLDER, '{year}_{quarter}.pd'.format(year=y, quarter=q)))
pdfs_merged = None
if not can_safely_load_pdf_merge:
pdfs_merged = pd.DataFrame([], columns=keys)
for pdfs in tqdm.tqdm([pdfs[y][q] for y in sorted(pdfs.keys()) for q in sorted(pdfs[y].keys())], desc='Combining Pandas DataFrames'):
pdfs_merged = pdfs_merged.append(pdfs, ignore_index=True)
pdfs_merged.to_pickle(PDF_MERGE_FILE)
else:
pdfs_merged = pd.read_pickle(PDF_MERGE_FILE)
return pdfs, pdfs_merged, keys
def download_annual_reports(pdfs_10k, DATA_AR_FOLDER, NAME_FILE_PER_CIK, URL_ROOT, LOG_FILE):
if not os.path.isdir(DATA_AR_FOLDER):
os.makedirs(DATA_AR_FOLDER)
# Create CIK folders and also the file containing all the related names
for cik, pdf in tqdm.tqdm(pdfs_10k.groupby('CIK'), desc='Creating company folders'):
company_names = pdf['Company Name'].unique()
folder = os.path.join(DATA_AR_FOLDER, cik)
if not os.path.isdir(folder):
os.mkdir(folder)
name_file = os.path.join(folder, NAME_FILE_PER_CIK)
if not os.path.exists(name_file):
with open(name_file, 'a', encoding='utf-8') as fp:
for company_name in company_names:
fp.write(company_name + '\n')
# Download all annual reports
if config.MULTITHREADING:
print('Downloading company\' annual reports')
whole_entries = [row for idx, row in pdfs_10k.iterrows()]
rows = utils.chunks(whole_entries, 1 + int(len(whole_entries) / config.NUM_CORES))
random.shuffle(rows) # Better separate work load
del whole_entries # High memory consumption
procs = []
for i in range(config.NUM_CORES):
procs.append(Process(target=_download_annual_reports_process, args=(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, rows[i])))
procs[-1].start()
for p in procs:
p.join()
else:
for idx, row in tqdm.tqdm(pdfs_10k.iterrows(), desc='Downloading company\' annual reports'):
_download_annual_reports(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, row)
def _download_annual_reports_process(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, rows):
for row in rows:
_download_annual_reports(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, row)
def _download_annual_reports(DATA_AR_FOLDER, LOG_FILE, URL_ROOT, row):
folder = os.path.join(DATA_AR_FOLDER, row['CIK'])
url = URL_ROOT + row['File Name']
filename = os.path.join(folder, url[url.rfind('/') + 1:])
if not os.path.exists(filename):
try:
urllib.request.urlretrieve(url, filename)
except:
with open(LOG_FILE, 'a') as fp:
fp.write('{}: {}, {} couldn\'t be downloaded\n'.format(strftime("%d-%m-%Y %H:%M:%S", gmtime()), url, filename))
if os.path.exists(filename):
os.remove(filename)
if __name__ == "__main__":
random.seed(config.SEED)
if os.path.exists(config.LOG_FILE):
os.remove(config.LOG_FILE)
# Compute indices for the years and quarters
year_x_quarter = compute_year_quarter(config.START_YEAR, config.START_QUARTER)
# Download all indices related to the determined years and quarters
need_to_process = download_files(year_x_quarter, config.URL_INDEX_PATTERN, config.DATA_GZ_FOLDER, config.START_YEAR)
# If nothing has changed, load the final dataframe
if need_to_process or not os.path.exists(config.PDF_MERGE_10K_FILE):
# Process the data
pdfs, pdfs_merge, keys = read_data(config.DATA_GZ_FOLDER, config.DATA_PD_FOLDER, config.PDF_MERGE_FILE)
# Filter only 10k annual reports
pdfs_10k = pdfs_merge[(pdfs_merge['Form Type'] == config.FORM_TYPE)]
pdfs_10k.to_pickle(config.PDF_MERGE_10K_FILE)
else:
pdfs_10k = pd.read_pickle(config.PDF_MERGE_10K_FILE)
# Download annual reports
download_annual_reports(pdfs_10k, config.DATA_AR_FOLDER, config.NAME_FILE_PER_CIK, config.URL_ROOT, config.LOG_FILE)
| Diego999/Risk-Analysis-using-Topic-Models-on-Annual-Reports | 1_download_data.py | 1_download_data.py | py | 8,603 | python | en | code | 6 | github-code | 36 |
16758722392 | class Spreader(object):
def __init__(self, blockpool, spread):
blockpool_iter = iter(blockpool)
self.feeders = [Feeder(blockpool_iter) for _ in range(spread)]
self.current = 0
def __iter__(self):
return self
def next(self):
next = None
while next is None:
try:
next = self.feeders[self.current].next()
except StopIteration:
del self.feeders[self.current]
if len(self.feeders) <= 0:
raise StopIteration
self.current %= len(self.feeders)
continue
self.current = (self.current + 1) % len(self.feeders)
return next
class Feeder(object):
def __init__(self, blockpool):
self.blockpool = blockpool
self.block = iter([])
def next(self):
while True:
try:
return self.block.next()
except StopIteration:
self.block = iter(self.blockpool.next())
| taavi/job_spreader | spreader1.py | spreader1.py | py | 1,027 | python | en | code | 5 | github-code | 36 |
2722536523 | class Solution:
def totalFruit(self, f: List[int]) -> int:
# sliding window
n = len(f)
res, l = float('-inf'), 0
basket = collections.defaultdict(int)
for r in range(n):
f_tpy = f[r]
basket[f_tpy] += 1
if len(basket) <= 2:
res = max(res, r - l + 1)
# while condition not met
while len(basket) > 2:
basket[f[l]] -= 1
if basket[f[l]] == 0:
del basket[f[l]]
l += 1
return res
| ZhengLiangliang1996/Leetcode_ML_Daily | contest/weekcontest102/fruitintoBaskets.py | fruitintoBaskets.py | py | 587 | python | en | code | 1 | github-code | 36 |
7737901543 | from django.contrib import admin
from django.urls import include, path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="Wallet API",
default_version='v1',
description="Application made for tracking your transactions",
contact=openapi.Contact(email="eugene.osakovich@gmail.com"),
license=openapi.License(name="License"),
),
public=True,
permission_classes=[permissions.AllowAny],
)
urlpatterns = [
path('schema/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-wallet-api'),
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
]
| sheirand/Wallet | core/urls.py | urls.py | py | 743 | python | en | code | 0 | github-code | 36 |
42576586601 | """" Detecção de Relógio """
import cv2
classificador = cv2.CascadeClassifier('cascades\\relogios.xml')
imagem = cv2.imread('outros\\relogio2.jpg')
imagemcinsa = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
detectado = classificador.detectMultiScale(imagemcinsa, scaleFactor= 1.01, minSize=(10,10), minNeighbors=10)
for (x, y, l, a) in detectado:
cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)
cv2.imshow('itens',imagem)
cv2.waitKey() | alans96/PythonProject | Computer Vision/1 Detecção de Faces com Python e OpenCV/6 exe.py | 6 exe.py | py | 459 | python | pt | code | 0 | github-code | 36 |
35652531980 | cache = []
answer = []
match = ""
dic = ""
lenMatch = 0
lenDic = 0
def wildCard(x, y):
global match, dic, lenMatch, lenDic
if cache[x][y] != -1:
return cache[x][y]
elif x == lenMatch-1 and y == lenDic-1:
if match[x] == '*' or match[x] == '?' or match[x] == dic[y]:
return True
else:
return False
elif x == lenMatch-1:
if match[x] == '*':
return True
else:
return False
elif y == lenDic:
if match[x] == '*':
return wildCard(x+1, y)
else:
return False
else:
if match[x] == '*':
cache[x][y] = wildCard(x, y+1) or wildCard(x+1, y)
return cache[x][y]
else:
if match[x] == dic[y] or match[x] == '?':
cache[x][y] = wildCard(x+1, y+1)
return cache[x][y]
else:
return False
case = int(input())
for _ in range(case):
match = str(input())
matchNum = int(input())
lenMatch = len(match)
temp = []
for _ in range(matchNum):
dic = str(input())
lenDic = len(dic)
cache = [[-1 for _ in range(101)] for _ in range(101)]
if wildCard(0, 0):
temp.append(dic)
temp.sort()
answer.extend(temp)
for i in range(len(answer)):
print(answer[i])
| 0nandon/Algorithms_Practice | algospot/Dynamic programming/WILDCARD.py | WILDCARD.py | py | 1,365 | python | en | code | 0 | github-code | 36 |
40536503890 | import requests
import json
import os
import _G
from datetime import datetime
import utils
PREV_NEWS_FILE = '.mtd_prevnews.json'
NEWS_URL = os.getenv('MTD_NEWS_URL')
WEBHOOK_URL = os.getenv('MTD_WEBHOOK_URL')
MTD_NEWS_TAG = {
1: 'MAINTENANCE',
2: 'UPDATE',
3: 'GACHA',
4: 'EVENT',
5: 'CAMPAIGN',
6: 'BUG',
7: 'MISC',
}
MTD_NEWS_ICON = {
1: 'https://cdn-icons-png.flaticon.com/512/777/777081.png',
2: 'https://cdn.icon-icons.com/icons2/1508/PNG/512/updatemanager_104426.png',
3: 'https://cdn-icons-png.flaticon.com/512/4230/4230567.png',
4: 'https://cdn-icons-png.flaticon.com/512/4285/4285436.png',
5: 'https://cdn-icons-png.flaticon.com/512/3867/3867424.png',
6: 'https://www.iconsdb.com/icons/preview/red/error-7-xxl.png',
7: 'https://cdn-icons-png.flaticon.com/512/1827/1827301.png'
}
MTD_NEWS_COLOR = {
1: 0xfc3aef,
2: 0x5299f7,
3: 0xfad73c,
4: 0x50faf4,
5: 0xff5cb0,
6: 0xdb043e,
7: 0xcccccc,
}
MTD_VOCAB_JP = {
'NEWS_TAG': {
1: 'メンテナンス',
2: 'アップデート',
3: 'ガチャ',
4: 'イベント',
5: 'キャンペーン',
6: '不具合',
7: 'その他',
}
}
def get_webhook_url():
global WEBHOOK_URL
return WEBHOOK_URL
def get_news_data():
return requests.get(NEWS_URL).json()['newsList']
def get_old_news():
ret = {}
if not os.path.exists(PREV_NEWS_FILE):
ret = get_news_data()
ret = sorted(ret, key=lambda o: -o['id'])
with open(PREV_NEWS_FILE, 'w') as fp:
json.dump(ret, fp)
else:
with open(PREV_NEWS_FILE, 'r') as fp:
ret = json.load(fp)
return ret
async def update():
news = {}
try:
news = get_news_data()
news = sorted(news, key=lambda o: -o['id'])
except Exception as err:
utils.handle_exception(err)
return
if not news or 'service unavailable' in news[0]['message'].lower():
_G.log_warning("News data endpoint failure:")
if news:
_G.log_warning(news[0]['message'])
return
olds = get_old_news()
o_cksum = 0
_G.log_debug("Checking MTD news")
if olds:
o_cksum = int(datetime.fromisoformat(olds[0]['postedAt']).timestamp())
n_cksum = int(datetime.fromisoformat(news[0]['postedAt']).timestamp())
if o_cksum > n_cksum:
_G.log_warning(f"Old news newer than latest news ({o_cksum} > {n_cksum})")
elif o_cksum == n_cksum:
_G.log_debug("No news, skip")
return
_G.log_info("Gathering MTD news")
ar = []
for n in news:
if not olds or n['id'] > olds[0]['id']:
ar.insert(0, n)
else:
break
for a in ar:
try:
send_message(a)
except Exception as err:
utils.handle_exception(err)
with open(PREV_NEWS_FILE, 'w') as fp:
json.dump(news, fp)
def send_message(obj):
payload = {}
payload['embeds'] = [{
'author': {
'name': MTD_VOCAB_JP['NEWS_TAG'][obj['tag']],
'icon_url': MTD_NEWS_ICON[obj['tag']],
},
'title': f"**{obj['title']}**",
'description': f"<t:{int(datetime.fromisoformat(obj['postedAt']).timestamp())}>",
'color': MTD_NEWS_COLOR[obj['tag']],
'fields': []
}]
# this will fail if total length is over 6000
for msg in utils.chunk(obj['message'], 1000):
payload['embeds'][0]['fields'].append({
'name': " \u200b", # zero-width space
'value': msg
})
return requests.post(get_webhook_url(), json=payload)
def init():
pass
def reload():
pass
| ken1882/RD_Terminator_3k | module/mtd_news.py | mtd_news.py | py | 3,622 | python | en | code | 0 | github-code | 36 |
11580789914 | import re
import chatterbot
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
import logging
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
f = open('E:\\ProjectWork\\ImranV.1.0\\dataset.txt','r')
train_data = []
for line in f:
m = re.search('(Q:|A:)?(.+)', line)
if m:
train_data.append(m.groups()[1])
chatbot = ChatBot(
"Terminal",
storage_adapter="chatterbot.storage.SQLStorageAdapter", #allows the chat bot to connect to SQL databases
input_adapter="chatterbot.input.TerminalAdapter", #allows a user to type into their terminal to communicate with the chat bot.
logic_adapters=['chatterbot.logic.BestMatch','chatterbot.logic.MathematicalEvaluation',],
output_adapter="chatterbot.output.TerminalAdapter", # print chatbot responce
#database="../database.db" # specify the path to the database that the chat bot will use
database_uri='sqlite:///database.sqlite3'
)
trainer = ListTrainer(chatbot)
trainer.train(train_data)
print("Type your question here...")
while True:
try:
chatbot_input = chatbot.get_response(input("Type here: "))
print(chatbot_input)
# Press ctrl-c or ctrl-d to exit
except(KeyboardInterrupt, EOFError, SystemExit):
break
| AakashMaheedar1998/ChatBot | Chatbot2.py | Chatbot2.py | py | 1,320 | python | en | code | 0 | github-code | 36 |
2946357970 | import heapq,copy,collections
from typing import List,Optional
from collections import deque
class ListNode:
def __init__(self, val = 0, next = None):
self.val = val
self.next = next
class Solution:
#排序链表:给你链表的头结点 head ,请将其按 升序 排列并返回 排序后的链表 。
def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]:
def mergeTwoList(head1: Optional[ListNode], head2: Optional[ListNode]) -> Optional[ListNode]:
dummpy = ListNode()
head = dummpy
while head1 and head2:
if head1.val < head2.val:
head.next = ListNode(head1.val)
head1 = head1.next
else:
head.next = ListNode(head2.val)
head2 = head2.next
head = head.next
if head1:
head.next = head1
if head2:
head.next = head2
return dummpy.next
if not head or not head.next:
return head
slow = head
fast = head.next.next
while slow and fast:
if not fast.next:
break
fast = fast.next.next
slow = slow.next
mid_node = slow.next
slow.next = None
return mergeTwoList(self.sortList(head), self.sortList(mid_node))
#相交链表:给你两个单链表的头节点 headA 和 headB ,请你找出并返回两个单链表相交的起始节点。如果两个链表不存在相交节点,返回 null
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> Optional[ListNode]:
ka = 0
kb = 0
tmpA = headA
tmpB = headB
while tmpA:
ka += 1
tmpA = tmpA.next
while tmpB:
kb += 1
tmpB = tmpB.next
tmpA = headA
tmpB = headB
while ka > kb and tmpA:
tmpA = tmpA.next
ka -= 1
while ka < kb and tmpB:
tmpB = tmpB.next
kb -= 1
while tmpA and tmpB:
if tmpA == tmpB:
return tmpA
tmpA = tmpA.next
tmpB = tmpB.next
return None
#环形链表II:给定一个链表的头节点 head ,返回链表开始入环的第一个节点。 如果链表无环,则返回 null。
def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head or not head.next:
return None
slow = head
fast = head
while slow and fast:
if not fast.next:
return None
slow = slow.next
fast = fast.next.next
if slow == fast:
break
slow = head
while slow and fast:
if slow == fast:
return slow
slow = slow.next
fast = fast.next
return None
#反转链表:给你单链表的头节点 head ,请你反转链表,并返回反转后的链表。
def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
if not head or not head.next:
return head
tail = head
while tail.next:
cur_node = tail.next
tail.next = cur_node.next
cur_node.next = head
head = cur_node
return head
| gpj10054211/guoDeveloper | listnode.py | listnode.py | py | 3,590 | python | en | code | 0 | github-code | 36 |
12476621690 | #example 21 generating specific pattern"
'''
*
**
***
****
*****
'''
i=1
j=1
while i<=5:
j=1
# if (i==3):
# continue
# pass
while j<=i:
print("*",end="")
j+=1
print("")
i+=1
i=2
j=2
while i<=5:
j=5
while j>=i:
print("*",end="")
j-=1
print("")
i+=1
| Mahnoorahmed928/exercixes_practice_python | specific_pattern2.py | specific_pattern2.py | py | 354 | python | en | code | 0 | github-code | 36 |
16154335818 |
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EmailValidator
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import get_template
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, Field, Layout, Submit
# Form for contacting Web-CDI team. Asks for basic contact information and test ID. Simple format.
class ContactForm(forms.Form):
contact_name = forms.CharField(label=_("Your Name"), required=True, max_length=51)
contact_email = forms.EmailField(
label=_("Your Email Address"),
required=True,
max_length=201,
validators=[EmailValidator()],
)
contact_id = forms.CharField(
label=_("Your Test URL"), required=True, max_length=101
)
content = forms.CharField(
label=_("What would you like to tell us?"),
required=True,
widget=forms.Textarea(attrs={"cols": 80, "rows": 6}),
max_length=1001,
)
def __init__(self, *args, **kwargs):
self.redirect_url = kwargs.pop("redirect_url", "")
super().__init__(*args, **kwargs)
self.fields["contact_id"].initial = self.redirect_url
self.helper = FormHelper()
self.helper.form_class = "form-horizontal"
self.helper.label_class = "col-lg-3"
self.helper.field_class = "col-lg-9"
self.helper.layout = Layout(
Field("contact_name"),
Field("contact_email"),
Field("contact_id", css_class="form-control-plaintext"),
Field("content"),
Div(
Submit("submit", _("Submit")),
css_class="col-lg-offset-3 col-lg-9 text-center",
),
)
def send_email(self):
cleaned_data = self.cleaned_data
template = get_template("cdi_forms/administration_contact_email_template.txt")
context = {
"contact_name": cleaned_data['contact_name'],
"contact_id": cleaned_data['contact_id'],
"contact_email": cleaned_data['contact_email'],
"form_content": cleaned_data['content'],
}
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
settings.CONTACT_EMAIL,
[settings.USER_ADMIN_EMAIL],
headers={"Reply-To": cleaned_data['contact_email']},
)
email.send() | langcog/web-cdi | webcdi/cdi_forms/forms/contact_form.py | contact_form.py | py | 2,511 | python | en | code | 7 | github-code | 36 |
43284653684 | import setuptools
from pathlib import Path
with open("README.md", "r") as file:
long_description = file.read()
with open("requirements.txt") as file:
REQUIREMENTS = file.read().split("\n")
setuptools.setup(
name="port_env",
version="0.0.3",
author="Moist-Cat",
author_email="moistanonpy@gmail.com",
description="Make environments portable",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Moist-Cat/port_env",
scripts=["port_env"],
install_requires=REQUIREMENTS,
include_package_data=True,
package_dir={"":"src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3",
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License",
],
)
| Moist-Cat/port_env | setup.py | setup.py | py | 898 | python | en | code | 0 | github-code | 36 |
20832781197 | from pocket_coffea.utils.configurator import Configurator
from pocket_coffea.lib.cut_definition import Cut
from pocket_coffea.lib.cut_functions import get_nObj_min, get_HLTsel, get_nBtagEq
from pocket_coffea.parameters.cuts import passthrough
from pocket_coffea.parameters.histograms import *
from pocket_coffea.parameters.btag import btag_variations
import workflow
from workflow import ttHbbBaseProcessor
from pocket_coffea.lib.columns_manager import ColOut
import cloudpickle
import custom_cut_functions
cloudpickle.register_pickle_by_value(workflow)
cloudpickle.register_pickle_by_value(custom_cut_functions)
from custom_cut_functions import *
import os
localdir = os.path.dirname(os.path.abspath(__file__))
# Loading default parameters
from pocket_coffea.parameters import defaults
default_parameters = defaults.get_default_parameters()
defaults.register_configuration_dir("config_dir", localdir+"/params")
parameters = defaults.merge_parameters_from_files(default_parameters,
f"{localdir}/params/object_preselection.yaml",
f"{localdir}/params/triggers.yaml",
update=True)
cfg = Configurator(
parameters = parameters,
datasets = {
"jsons": [f"{localdir}/datasets/backgrounds_MC_TTbb_dileptonic_redirector.json"
],
"filter" : {
"samples": ["TTbbDiLeptonic"],
"samples_exclude" : [],
"year": ["2018"]
}
},
workflow = ttHbbBaseProcessor,
skim = [get_nObj_min(1, 200., "FatJet"),
get_HLTsel(primaryDatasets=["DoubleEle","EleMu","DoubleMu"])],
preselections = [dilepton_presel,
get_nObj_min(2,25,"LeptonGood")],
categories = {
"baseline": [passthrough],
"1b" : [ get_nBtagEq(1, coll="BJetGood")],
"2b" : [ get_nBtagEq(2, coll="BJetGood")],
"3b" : [ get_nBtagEq(3, coll="BJetGood")],
"4b" : [ get_nBtagEq(4, coll="BJetGood")]
},
weights = {
"common": {
"inclusive": ["genWeight","lumi","XS",
"pileup",
"sf_ele_reco", "sf_ele_id",
"sf_mu_id","sf_mu_iso",
"sf_btag", "sf_jet_puId",
],
"bycategory" : {
}
},
"bysample": {
}
},
variations = {
"weights": {
"common": {
"inclusive": [ "pileup",
"sf_ele_reco", "sf_ele_id",
"sf_mu_id", "sf_mu_iso", "sf_jet_puId",
],
"bycategory" : {
}
},
"bysample": {
}
},
},
variables = {
**ele_hists(coll="ElectronGood", pos=0),
**muon_hists(coll="MuonGood", pos=0),
**count_hist(name="nElectronGood", coll="ElectronGood",bins=3, start=0, stop=3),
**count_hist(name="nMuonGood", coll="MuonGood",bins=3, start=0, stop=3),
**count_hist(name="nJets", coll="JetGood",bins=8, start=0, stop=8),
**count_hist(name="nBJets", coll="BJetGood",bins=8, start=0, stop=8),
**jet_hists(coll="JetGood", pos=0),
**jet_hists(coll="JetGood", pos=1),
**jet_hists(coll="JetGood", pos=2),
**jet_hists(coll="JetGood", pos=3),
**jet_hists(coll="JetGood", pos=4),
**jet_hists(name="bjet",coll="BJetGood", pos=0),
**jet_hists(name="bjet",coll="BJetGood", pos=1),
**jet_hists(name="bjet",coll="BJetGood", pos=2),
**fatjet_hists(name="fatjet",coll="FatJetGood"),
**fatjet_hists(name="bbfatjetTight",coll="BBFatJetGoodT"),
**fatjet_hists(name="bbfatjetMedium",coll="BBFatJetGoodM"),
**fatjet_hists(name="bbfatjetLoose",coll="BBFatJetGoodL"),
# 2D plots
"jet_eta_pt_leading": HistConf(
[
Axis(coll="JetGood", field="pt", pos=0, bins=40, start=0, stop=1000,
label="Leading jet $p_T$"),
Axis(coll="JetGood", field="eta", pos=0, bins=40, start=-2.4, stop=2.4,
label="Leading jet $\eta$"),
]
),
"jet_eta_pt_all": HistConf(
[
Axis(coll="JetGood", field="pt", bins=40, start=0, stop=1000,
label="Leading jet $p_T$"),
Axis(coll="JetGood", field="eta", bins=40, start=-2.4, stop=2.4,
label="Leading jet $\eta$")
]
),
},
columns = {
"common": {},
"bysample": {
"TTbbDiLeptonic": {
"bycategory": {
"baseline": [
ColOut("JetGood", ["eta","pt","phi","btagDeepFlavB"]),
ColOut("FatJetGood", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"]),
ColOut("LeptonGood",["eta","pt","phi","pdgId"]),
ColOut("BJetGood", ["eta","pt","phi","btagDeepFlavB"]),
ColOut("BBFatJetGoodT", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"]),
ColOut("BBFatJetGoodM", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"]),
ColOut("BBFatJetGoodL", ["eta", "pt", "phi", "mass", "msoftdrop", "tau1", "tau2", "tau3", "tau4", "btagDDBvLV2", "deepTagMD_ZHbbvsQCD", "deepTagMD_ZHccvsQCD", "deepTagMD_HbbvsQCD", "deepTagMD_bbvsLight", "btagHbb"])
]
}
}
}
}
)
run_options = {
"executor" : "dask/lxplus",
"env" : "singularity",
"workers" : 1,
"scaleout" : 50,
"worker_image" : "/cvmfs/unpacked.cern.ch/gitlab-registry.cern.ch/cms-analysis/general/pocketcoffea:lxplus-cc7-latest",
"queue" : "microcentury",
"walltime" : "00:40:00",
"mem_per_worker" : "4GB", # GB
"disk_per_worker" : "1GB", # GB
"exclusive" : False,
"chunk" : 400000,
"retries" : 50,
"treereduction" : 20,
"adapt" : False,
"skipbadfiles" : 10
}
| ryanm124/AnalysisConfigs | configs/ttHbb/example_config.py | example_config.py | py | 6,758 | python | en | code | null | github-code | 36 |
30148633270 | import logging
import time
from datetime import datetime
import pytz
from flask import Flask
from flask import json
from github import Github
import commands
import envvariables
from sendToRegression import bucket, administrative_issue, close
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("FlaskRest")
handler = logging.FileHandler(filename='log.txt')
handler.setLevel(logging.INFO)
logger.addHandler(handler)
props = json.load(open("properties.json", 'r'))
app = Flask(__name__)
def run():
while True:
logging.info('Polling started: %s', datetime.now(pytz.timezone('Europe/Paris')))
g = Github(envvariables.github)
org = g.get_organization(props["org"])
repo = org.get_repo(props["repo"])
prs = repo.get_pulls("open")
for pr in prs:
if check_for_labels(pr):
comments = pr.get_issue_comments()
for comment in comments:
if comment.body[0:4] == "cihu":
handle_command(comment, pr)
time.sleep(props["schedule"])
def check_for_labels(pr):
for label in pr.raw_data["labels"]:
if label["name"] in {"bucket-a", "bucket-b", "gauntlet"}:
return False
return True
def handle_command(comment, pr):
allowed_senders = props["allowedSender"]
if comment.user.login in allowed_senders:
params = str(comment.body).split(":")
if params[1] == commands.BUCKETING_COMMAND:
bucket(pr.number, params[2])
elif params[1] == "review":
if params[2] == commands.ADMINISTRATIVE_ISSUE_COMMAND:
administrative_issue(pr.number)
elif params[1] == "close":
close(pr.number)
if __name__ == '__main__':
run()
| peterkungl/bucketservice | FlaskRest.py | FlaskRest.py | py | 1,635 | python | en | code | 0 | github-code | 36 |
70992284585 | import json, hashlib, hmac, requests
def json_encode(data):
return json.dumps(data, separators=(',', ':'), sort_keys=True)
def sign(data, secret):
j = json_encode(data)
print('Signing payload: ' + j)
h = hmac.new(secret, msg=j.encode(), digestmod=hashlib.sha256)
return h.hexdigest()
class bitkub_caller:
def __init__(self, config):
self.API_HOST = 'https://api.bitkub.com'
self.API_KEY = config['API_KEY']
self.API_SECRET.extend(map(ord, config['API_SECRET']))
self.API_SECRET = bytearray(config['API_SECRET'].encode())
self.header = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-BTK-APIKEY': self.API_KEY,
}
def create_payload(self, data = None):
signature = sign(data, secret=self.API_SECRET)
data['sig'] = signature
return data
def get_json_response(self, path, payload = None):
try:
r = requests.get(url = self.API_HOST + path, headers=self.header, data=json_encode(payload))
response = r.json()
return response
except Exception as e:
print(e)
return None
def post_json_response(self, path, payload = None):
try:
r = requests.post(url = self.API_HOST + path, headers=self.header, data=json_encode(payload))
print(r.content)
response = r.json()
return response
except Exception as e:
print(e)
return None
def get_server_timestamp(self):
try:
response = requests.get(self.API_HOST + '/api/servertime')
ts = int(response.text)
return ts
except Exception as e:
print(e)
return 0
def get_status(self):
path = '/api/status'
response = self.get_json_response(path)
print(response)
def get_wallet(self):
path = '/api/market/wallet'
ts = self.get_server_timestamp()
data = {
'ts': ts
}
payload = self.create_payload(data)
response = self.post_json_response(path, payload)
print(response)
def get_balance(self):
path = '/api/market/balances'
ts = self.get_server_timestamp()
data = {
'ts': ts
}
payload = self.create_payload(data)
print(payload)
response = self.post_json_response(path, payload)
print(response) | YamatoWestern/investment-bot | bitkub_helpers/bitkub_caller.py | bitkub_caller.py | py | 2,601 | python | en | code | 0 | github-code | 36 |
5310951357 |
def gen_ol():
for o in range(16):
mask = 8
line = []
for bit in range(4, 8):
inv = '' if mask & o else '~'
line.append(f'{inv}o[{bit}]')
mask >>= 1
print(f' wire ol{o:x} = ' + ' & '.join(line) + ';')
def gen_ou():
for o in range(8):
mask = 4
line = []
for bit in range(1, 4):
inv = '' if mask & o else '~'
line.append(f'{inv}o[{bit}]')
mask >>= 1
print(f' wire ou{o:x} = ' + ' & '.join(line) + ';')
all_insts = '''
AD AH AI AIO AND ANLZ AW AWM BAL BCR BCS BDR BIR CAL1 CAL2 CAL3 CAL4 CB CBS CD CH CI CLM CLR CS CVA CVS CW
DA DC DD DH DL DM DS DSA DST DW EBS EOR EXU FAL FAS FDL FDS FML FMS FSL FSS HIO INT LAD LAH LAW LB LCD
LCF LCFI LCH LCW LD LH LI LM LPSD LRP LS LW MBS MH MI MMC MSP MTB MTH MTW MW OR PACK PLM PLW PSM PSW RD
S SD SF SH SIO STB STD STFC STH STM STS STW SW TBS TDV TIO TTBS UNPK WAIT WD XPSD XW
'''
if __name__ == '__main__':
# gen_ol()
# gen_ou()
for i in all_insts.split():
#print(f'{i}: exec_{i};')
print(f' task automatic exec_{i}; begin')
print(f' phase <= PCP2;')
print(f' end endtask;')
print()
| msiddalingaiah/Sigma | Verilog/statemachine/gen.py | gen.py | py | 1,240 | python | en | code | 0 | github-code | 36 |
41386874756 | arr = []
flash_counter = 0
step_counter = 0
with open('input.txt') as f:
for i in f:
arr.append([[int(e), False] for e in i.strip()])
def stage2(array, pos=(0, 0)):
global flash_counter
x, y = pos
if array[y][x][1]:
array[y][x][0] += 1
return
elif array[y][x][0] > 9:
flash_counter += 1
array[y][x] = [0, True]
for i in range(y-1, y+2):
if 0 <= i < len(array):
for j in range(x-1, x+2):
if 0 <= j < len(array[i]):
array[i][j][0] += 1
if array[i][j][0] > 9 and not array[i][j][1]:
stage2(array, (j, i))
def stage3(array):
rows = []
for y, i in enumerate(array):
rows.append(all([k[1] for k in i]))
for x, j in enumerate(i):
if j[1]:
array[y][x] = [0, False]
return all(rows)
while(True):
step_counter += 1
for i, e in enumerate(arr):
arr[i] = [[j[0]+1, False] for j in e]
for y, j in enumerate(arr):
for x, k in enumerate(j):
stage2(arr, (x, y))
if(stage3(arr)):
break
print(f"Flash counter:\t{flash_counter}\nStep counter:\t{step_counter}")
| Oskar-V/advent-of-code-2021 | 11/solution.py | solution.py | py | 1,249 | python | en | code | 0 | github-code | 36 |
6752387766 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QDialog
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtCore import pyqtSlot, QDate
from warehouse.views.editregtuff import Ui_Dialog
from supplyer.controllers.supplyercontroller import SupplyerController
from stuff.controllers.stuffcontroller import StuffController
from warehouse.controllers.warehousecontroller import WarehouseController
from supplyer.modules.selectstuffModule import SelectstuffModule
from lib.utils.messagebox import MessageBox
import user
import datetime
import re
class EditRegStuffModule(QDialog, Ui_Dialog):
def __init__(self, spid=None, paperno=None, papertype=0, autoid=None, parent=None):
super(EditRegStuffModule, self).__init__(parent)
self.setupUi(self)
if '28' not in user.powers:
self.close()
if user.powers['28'] == 0:
self.close()
self.power = '{:03b}'.format(user.powers['28'])
if self.power[1] == '0':
self.pushButton_accept.setVisible(False)
self.pushButton_cancel.setVisible(False)
self.ori_detail = dict()
self.new_detail = dict()
self.SC = SupplyerController()
self.WC = WarehouseController()
self.SFC = StuffController()
self.spid = spid
self.autoid = autoid
self.paperno = paperno
self.papertype = papertype
self.get_detail()
self.set_amount_validator()
self.get_producer_list()
self.get_location_list()
def get_detail(self):
if not self.autoid:
self.toolButton_more.setEnabled(True)
return
self.toolButton_more.setEnabled(False)
key_dict = {
'autoid': self.autoid
}
res = self.WC.get_stuffcheckinlist(
False, *VALUES_TUPLE_CHECK_IN_LIST, **key_dict
)
if len(res) != 1:
return
self.ori_detail = res[0]
self.lineEdit_stuff.setText(
self.ori_detail['stuffid'] + ' ' + self.ori_detail['stuffname']
)
self.label_spec.setText(self.ori_detail['spec'])
self.label_package.setText(self.ori_detail['package'])
self.lineEdit_amount.setText(str(self.ori_detail['amount']))
self.label_unit.setText(self.ori_detail['unit'])
self.lineEdit_batchno.setText(self.ori_detail['batchno'])
self.lineEdit_mbatchno.setText(self.ori_detail['mbatchno'])
self.dateEdit_makedate.setDate(self.ori_detail['makedate'])
self.dateEdit_invaliddate.setDate(self.ori_detail['expireddate'])
def set_amount_validator(self):
doubleValitor = QDoubleValidator()
doubleValitor.setBottom(0)
doubleValitor.setDecimals(3)
doubleValitor.setNotation(QDoubleValidator.StandardNotation)
self.lineEdit_amount.setValidator(doubleValitor)
def get_location_list(self):
location_list = self.WC.get_stuffcheckinlist(
True, *VALUES_TUPLE_LOCATION).distinct()
if len(location_list):
self.comboBox_location.addItems(location_list)
if len(self.ori_detail):
self.comboBox_location.setCurrentText(self.ori_detail['position'])
else:
self.comboBox_location.setCurrentText("")
def get_producer_list(self, sdid=None):
if not (self.autoid or sdid):
return
if sdid is None:
stuffid = self.ori_detail['stuffid']
key_dict_stuff = {'stuffid': stuffid}
stufffid_list = self.SFC.get_stuffdict(
True, *VALUES_TUPLE_SDID, **key_dict_stuff
)
if len(stufffid_list):
sdid = stufffid_list[0]
if sdid and self.spid:
key_dict_producer = {
'sdid': sdid,
'spid': self.spid
}
producer_list = self.SC.get_stuffsupplyer(
True, *VALUES_TUPLE_PRODUCER , **key_dict_producer
)
if len(producer_list):
self.comboBox_producer.addItems(producer_list)
if len(self.ori_detail):
self.comboBox_producer.setCurrentText(
self.ori_detail['producer']
)
@pyqtSlot(str)
def on_lineEdit_amount_textChanged(self, p_str):
try:
if p_str != self.ori_detail['amount']:
self.new_detail['amount'] = p_str
self.new_detail['piamount'] = p_str
else:
try:
del self.new_detail['amount']
del self.new_detail['piamount']
except KeyError:
pass
except KeyError:
self.new_detail['amount'] = p_str
self.new_detail['piamount'] = p_str
@pyqtSlot(str)
def on_comboBox_producer_currentTextChanged(self, p_str):
try:
if p_str != self.ori_detail['producer']:
self.new_detail['producer'] = p_str
else:
try:
del self.new_detail['producer']
except KeyError:
pass
except KeyError:
self.new_detail['producer'] = p_str
@pyqtSlot(str)
def on_comboBox_location_currentTextChanged(self, p_str):
try:
if p_str != self.ori_detail['position']:
self.new_detail['position'] = p_str
else:
try:
del self.new_detail['position']
except KeyError:
pass
except KeyError:
self.new_detail['position'] = p_str
@pyqtSlot(str)
def on_lineEdit_batchno_textChanged(self, p_str):
try:
if p_str != self.ori_detail['batchno']:
self.new_detail['batchno'] = p_str
else:
try:
del self.new_detail['batchno']
except KeyError:
pass
except KeyError:
self.new_detail['batchno'] = p_str
@pyqtSlot(str)
def on_lineEdit_mbatchno_textChanged(self, p_str):
try:
if p_str != self.ori_detail['mbatchno']:
self.new_detail['mbatchno'] = p_str
else:
try:
del self.new_detail['mbatchno']
except KeyError:
pass
except KeyError:
self.new_detail['mbatchno'] = p_str
@pyqtSlot(QDate)
def on_dateEdit_makedate_dateChanged(self, q_date):
try:
if type(self.ori_detail['makedate']) is str:
self.new_detail['makedate'] = q_date.toPyDate()
return
if q_date != QDate(self.ori_detail['makedate']):
self.new_detail['makedate'] = q_date.toPyDate()
else:
try:
del self.new_detail['makedate']
except KeyError:
pass
except KeyError:
self.new_detail['makedate'] = q_date.toPyDate()
@pyqtSlot(QDate)
def on_dateEdit_invaliddate_dateChanged(self, q_date):
try:
if type(self.ori_detail['expireddate']) is str:
self.new_detail['expireddate'] = q_date.toPyDate()
return
if q_date != QDate(self.ori_detail['expireddate']):
self.new_detail['expireddate'] = q_date.toPyDate()
else:
try:
del self.new_detail['expireddate']
except KeyError:
pass
except KeyError:
self.new_detail['expireddate'] = q_date.toPyDate()
@pyqtSlot()
def on_toolButton_more_clicked(self):
detail = SelectstuffModule(self.spid, self)
detail.selected.connect(self.set_stuff)
detail.show()
def set_stuff(self, p_int):
key_dict = {'autoid': p_int}
res = self.SFC.get_stuffdict(False, *VALUES_TUPLE_STUFF, **key_dict)
if not len(res):
return
stuff = res[0]
self.lineEdit_stuff.setText(stuff['stuffid'] + stuff['stuffname'])
self.label_spec.setText(stuff['spec'])
self.label_package.setText(stuff['package'])
self.label_unit.setText(stuff['spunit'])
self.dateEdit_makedate.setDate(user.now_date)
self.dateEdit_invaliddate.setDate(
user.now_date + datetime.timedelta(days=stuff['expireddays'])
)
self.new_detail['stuffid'] = stuff['stuffid']
self.new_detail['stuffname'] = stuff['stuffname']
self.new_detail['spec'] = stuff['spec']
self.new_detail['package'] = stuff['package']
self.new_detail['unit'] = stuff['spunit']
self.new_detail['stufftype'] = stuff['stufftype']
self.lineEdit_batchno.setFocus()
self.get_producer_list(p_int)
@pyqtSlot()
def on_pushButton_accept_clicked(self):
text = ''
if self.lineEdit_stuff.text() == '':
text = "物料不能为空!\n"
if self.lineEdit_amount.text() in ('', '0'):
text += "到货数量不能为空!\n"
if self.lineEdit_batchno.text() == '':
text += "进厂批号不能为空!\n"
if len(text) > 0:
message = MessageBox(
self, text="以下信息填写错误",
informative=text
)
message.show()
return
if len(self.new_detail):
if self.spid:
key_dict_supplyer = {'autoid': self.spid}
supplyer_list = self.SC.get_supply(
False, *VALUES_TUPLE_SUPPLYER, **key_dict_supplyer
)
if len(supplyer_list):
supplyer = supplyer_list[0]
self.new_detail['supid'] = supplyer['supid']
self.new_detail['supname'] = supplyer['supname']
self.new_detail['paperno'] = self.paperno
self.new_detail['papertype'] = self.papertype
self.new_detail['checkindate'] = user.now_date
res = self.WC.update_stuffcheckinlist(self.autoid, **self.new_detail)
self.accept()
@pyqtSlot()
def on_pushButton_cancel_clicked(self):
self.close()
VALUES_TUPLE_SDID = ('autoid',)
VALUES_TUPLE_PRODUCER = ('producer',)
VALUES_TUPLE_LOCATION = ('position',)
VALUES_TUPLE_SUPPLYER = ('supid','supname')
VALUES_TUPLE_CHECK_IN_LIST = (
"autoid", "stuffid", "stuffname", "spec", "package", "producer", "batchno",
"mbatchno", "unit", "amount", "makedate", "expireddate", "position", "supid"
)
VALUES_TUPLE_STUFF = (
'stuffid', 'stuffname', 'stufftype', 'spec', 'package', 'spunit',
"expireddays"
)
| zxcvbnmz0x/gmpsystem | warehouse/modules/editregstuffmodule.py | editregstuffmodule.py | py | 10,777 | python | en | code | 0 | github-code | 36 |
40795182425 | import asyncio
from concurrent.futures import ThreadPoolExecutor
import nest_asyncio
from discord import Message, File
from ImageGenerator import ImageGenerator
from wiezenlibrary.Game import Game
_executor = ThreadPoolExecutor(10)
nest_asyncio.apply()
class DiscordWiezen(Game):
def __init__(self, bot, parent):
self.stop_notifier = parent
super().__init__()
def stop(self):
self.stop_notifier.stop(self)
def update_table_images(self):
# msg: Message
# for msg in self.table_messages.values():
# if msg:
# # loop = asyncio.get_event_loop()
# asyncio.ensure_future(
# msg.delete()
# )
self.send_tables()
def show_cards(self, players: list):
img_gen = ImageGenerator(1)
for player in players:
img_gen.hand_to_image(player)
img_file = File(img_gen.get_output('hand').strip())
# loop = asyncio.get_event_loop()
player.send_message("Hier zijn uwer kaarten")
player.send_message(img_file, is_file=True)
def send_to(self, players: list, message: str or File, img=False):
for player in players:
if img:
file = File(message)
self.sendMsg(file, player)
else:
player.send_message(message)
def sendMsg(self, file, player):
msg = player.send_message(file, is_file=True)
self.table_messages[player] = msg
def send_tables(self):
ImageGenerator(1).generate_table(self.current_slag, self.players, self.teams)
file = ImageGenerator(1).get_output('table').strip()
self.send_to(self.players, file, img=True)
| FreekDS/De-Grote-Wiezen-Bot | bot/DiscordWiezen.py | DiscordWiezen.py | py | 1,751 | python | en | code | 1 | github-code | 36 |
41484161294 | from random import *
from Boat import Boat
from State import States
boatlist = []
for i in range(2):
boat = Boat(randint(1, 6), randint(1, 6))
print(boat.x, boat.y)
if i > 0:
for ent in boatlist:
if boat == ent:
boat = Boat(randint(1, 6), randint(1, 6))
boatlist.append(boat)
def areAllAlive():
k = 0
for boat in boatlist:
if boat.isAlive():
k += 1
if k > 0:
return True
else:
return False
while areAllAlive():
x = int(input("Colonne : "))
y = int(input("Ligne : "))
for boat in boatlist:
if boat.state == States.Dead:
print("Coulé !")
elif boat.x == x and boat.y == y:
boat.setState(States.Dead)
print("Coulé !")
elif boat.x == x or boat.y == y:
print("En vue !")
else:
print("A l'eau !")
print("Bravo ! Vous avez coulé tous les navires !")
| xStagg/bataille_navale | bataille-navale.py | bataille-navale.py | py | 1,004 | python | en | code | 0 | github-code | 36 |
18306677513 | import asyncio
from loguru import logger
from mipa.ext.commands import Bot
from mipac import (
Note,
NotificationFollowRequest,
LiteUser,
ClientManager,
NotificationFollow,
)
from catline.adapters import QueueStorageJSONAdapter, QueueStorageRedisAdapter
from catline.queue import IFQueueStorageAdapter
from src.config import config
from src.utils.common import get_name
from src.di_container import injector
INITIAL_EXTENSIONS = [
{'path': 'src.cogs.follow', 'is_enable': True},
{'path': 'src.cogs.reminder', 'is_enable': True},
{'path': 'src.cogs.avatar_fix', 'is_enable': config.features.notfound_fixer},
]
async def follow_user(user: LiteUser, client: ClientManager):
await user.api.follow.action.add()
await client.note.action.send(
visibility='specified',
visible_user_ids=[user.id],
content=f'{user.api.action.get_mention()} さん、よろしくね!',
)
STRIKE = {}
class Akari(Bot):
def __init__(self, **kwargs):
super().__init__(**kwargs)
async def connect_channel(self):
await self.router.connect_channel(['main', 'global'])
async def setup_hook(self) -> None:
for cog in INITIAL_EXTENSIONS:
if cog['is_enable']:
await self.load_extension(cog['path'])
async def on_reconnect(self, ws):
logger.warning('サーバーとの接続をロストしました。再接続します。')
await self.connect_channel()
async def on_ready(self, ws):
logger.success(f'Connected {get_name(self.user)}')
await self.connect_channel()
async def on_note(self, note: Note):
logger.info(f'{get_name(note.author)}: {note.content}')
async def on_follow_request(self, follow_request: NotificationFollowRequest):
logger.info(f'{get_name(follow_request.user)}さんからフォローリクエストが届きました')
await follow_request.user.api.follow.request.action.accept()
logger.success('フォローリクエストを承認しました')
await follow_user(follow_request.user, self.client)
async def on_user_followed(self, follow: NotificationFollow):
logger.info(f'{get_name(follow.user)}さんからフォローされました')
await follow_user(follow.user, self.client)
async def main():
bot = Akari()
injector.binder.bind(IFQueueStorageAdapter, QueueStorageJSONAdapter if config.job_queue.type == 'json' else QueueStorageRedisAdapter(**config.redis.to_dict))
await bot.start(config.url, config.token)
if __name__ == '__main__':
asyncio.run(main())
| TeamBlackCrystal/akari | main.py | main.py | py | 2,637 | python | en | code | 2 | github-code | 36 |
24778042796 | """
From https://brian2.readthedocs.io/en/stable/resources/tutorials/3-intro-to-brian-simulations.html
An experiment to inject current into a neuron and change the amplitude randomly every 10 ms. Model that using a Hodgkin-Huxley type neuron.
"""
from brian2 import *
import matplotlib.pyplot as plt
start_scope()
# Parameters
area = 20000 * umetre**2
Cm = 1 * ufarad * cm**-2 * area
gl = 5e-5 * siemens * cm**-2 * area
El = -65 * mV
EK = -90 * mV
ENa = 50 * mV
g_na = 100 * msiemens * cm**-2 * area
g_kd = 30 * msiemens * cm**-2 * area
VT = -63 * mV
# The model
eqs_HH = '''
dv/dt = (gl*(El-v) - g_na*(m*m*m)*h*(v-ENa) - g_kd*(n*n*n*n)*(v-EK) + I)/Cm : volt
dm/dt = 0.32*(mV**-1)*(13.*mV-v+VT)/
(exp((13.*mV-v+VT)/(4.*mV))-1.)/ms*(1-m)-0.28*(mV**-1)*(v-VT-40.*mV)/
(exp((v-VT-40.*mV)/(5.*mV))-1.)/ms*m : 1
dn/dt = 0.032*(mV**-1)*(15.*mV-v+VT)/
(exp((15.*mV-v+VT)/(5.*mV))-1.)/ms*(1.-n)-.5*exp((10.*mV-v+VT)/(40.*mV))/ms*n : 1
dh/dt = 0.128*exp((17.*mV-v+VT)/(18.*mV))/ms*(1.-h)-4./(1+exp((40.*mV-v+VT)/(5.*mV)))/ms*h : 1
I : amp
'''
group = NeuronGroup(1, eqs_HH,
threshold='v > -40*mV',
refractory='v > -40*mV',
method='exponential_euler')
group.v = El
statemon = StateMonitor(group, 'v', record=True)
spikemon = SpikeMonitor(group, variables='v')
# we replace the loop with a run_regularly
group.run_regularly('I = rand()*50*nA', dt=10 * ms)
run(50 * ms)
plt.figure(figsize=(9, 4))
# we keep the loop just to draw the vertical lines
for l in range(5):
plt.axvline(l * 10, ls='--', c='k')
plt.axhline(El / mV, ls='-', c='lightgray', lw=3)
plt.plot(statemon.t / ms, statemon.v[0] / mV, '-b')
plt.plot(spikemon.t / ms, spikemon.v / mV, 'ob')
plt.xlabel('Time (ms)')
plt.ylabel('v (mV)')
plt.show()
| seankmartin/NeuroModelling | hodgkin_huxley.py | hodgkin_huxley.py | py | 1,776 | python | en | code | 0 | github-code | 36 |
13492120891 | def read_msh(file, flag_plot):
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
#custom functions
from read_gmsh_V1 import read_gmsh
from elarea import elarea
#%% Create a structure for the output and assign
class structtype():
pass
f = structtype()
#%%
# Read Mesh
msh = read_gmsh("mesh/"+file)
#We have first order mesh
itri = 1 #triangle element indices in the .msh class
ilin = 0 #line element indices in the .msh class
maxel = int(msh.maxel[itri])
maxnp = int(msh.maxnp)
nop = np.array(msh.nop[itri])-1 # -1 is due to index starting from 0 in python
cord = msh.cord[:,0:2]
cordx = cord[:,0]
cordy = cord[:,1]
physs = np.array(msh.phys_group[itri]) #triangular element material indices
physsl = np.array(msh.phys_group[ilin]) #line element material indices
nodel = (msh.nodel[itri])[itri] #number of nodes of an element
nopl = np.array(msh.nop[ilin])-1 #line elements
#material indices defined as in gmsh file
f.AIR = 1000
f.IRON1 = 1001
f.IRON2 = 1004
f.COIL1 = 1002
f.COIL1_neg = 1012
f.COIL2 = 1003
f.COIL2_neg = 1013
f.DIR = 2000
f.DIR_disp = 2001
#dirichlet nodes
indd = np.unique(nopl[physsl == f.DIR,:])
#%% confirm counter-clockwise element numbering for triangles
for i in range(0,maxel):
corde = cord[nop[i,:],:]
if elarea(corde)<0:
nop[i,:] = nop[i,[1,0,2]]
#%% Air-gap line
noplb = nopl[np.where(physsl==3000)]
maxlb = np.size(noplb,0)
#%% Assign the output variables
f.msh = msh
f.maxel = maxel
f.maxnp = maxnp
f.nop = nop
f.cord = cord
f.cordx = cordx
f.cordy = cordy
f.physs = physs
f.physsl = physsl
f.nodel = nodel
f.nopl = nopl
f.indd = indd;
#%% Plotting mesh
if flag_plot:
cmap = mpl.cm.jet
plt.figure()
plt.triplot(cordx, cordy, nop[np.where(physs==f.AIR)], lw = 1.0, color=cmap(100,100))
plt.triplot(cordx, cordy, nop[np.where(physs==f.IRON1)], lw = 1.0, color=cmap(1,1))
plt.triplot(cordx, cordy, nop[np.where(physs==f.IRON2)], lw = 1.0, color=cmap(1,1))
#plt.triplot(cordx, cordy, nop[np.where(physs==IRON2)], lw = 1.0, color=cmap(130,302))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL1)], lw = 1.0, color=cmap(150,150))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL1_neg)], lw = 1.0, color=cmap(150,150))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL2)], lw = 1.0, color=cmap(200,200))
plt.triplot(cordx, cordy, nop[np.where(physs==f.COIL2_neg)], lw = 1.0, color=cmap(200,200))
#dirichlet nodes
plt.plot(cordx[indd], cordy[indd], 'bo');
plt.axis('equal')
plt.xlabel('X (m)')
plt.ylabel('Y (m)')
plt.show()
return f | aydinu1/UA-fem | fem_util/read_msh.py | read_msh.py | py | 3,122 | python | en | code | 0 | github-code | 36 |
37749838738 | # coding=utf-8
from nuntium.models import OutboundMessage
from mailit.management.commands.handleemail import AnswerForManageCommand
from global_test_case import GlobalTestCase as TestCase
from mailit.bin.handleemail import EmailHandler
class ParsingMailsWithAttachments(TestCase):
def setUp(self):
super(ParsingMailsWithAttachments, self).setUp()
self.outbound_message = OutboundMessage.objects.get(id=1)
self.outbound_message.outboundmessageidentifier.key = '4aaaabbb'
self.outbound_message.outboundmessageidentifier.save()
self.mail_with_attachments = ""
with open('mailit/tests/fixture/mail_with_attachments.txt') as f:
self.mail_with_attachments += f.read()
f.close()
self.handler = EmailHandler(answer_class=AnswerForManageCommand)
def test_handle_mail_with_attachments(self):
'''Handle mails with attachments creates some'''
email_answer = self.handler.handle(self.mail_with_attachments)
answer = email_answer.send_back()
self.assertTrue(email_answer.attachments)
self.assertTrue(answer.attachments.all())
def test_attachments_with_names(self):
'''When I get an attachment it should have names'''
email_answer = self.handler.handle(self.mail_with_attachments)
answer = email_answer.send_back()
self.assertTrue(answer.attachments.filter(name="fiera_parque.jpg"))
self.assertTrue(answer.attachments.filter(name="hello.pd.pdf"))
self.assertTrue(answer.attachments.filter(name="hello.docx"))
| ciudadanointeligente/write-it | mailit/tests/email_parser/email_with_attachments_parser_tests.py | email_with_attachments_parser_tests.py | py | 1,576 | python | en | code | 38 | github-code | 36 |
70177547624 | import re
if __name__ == '__main__':
string = input('Please enter string')
regexQuery = input('Please enter regex query')
try:
p = re.compile(regexQuery)
if p.match(string) is not None:
print(p.match(string))
else:
print('Returns nothing')
except Exception as e:
print(str(e))
| krishna-kumar456/Code-Every-Single-Day | solutions/regexquery.py | regexquery.py | py | 353 | python | en | code | 0 | github-code | 36 |
16147236974 | from typing import List
from app.movements.base import Special
from app.movements.constants import Attacks
from app.movements.utils import replace_values_string
from app.settings import BASSIC_ATTACK_ENERGY, PLAYER_ENERGY
class Fighter:
def __init__(self, name, specials:List[Special]) -> None:
self.name = name
self.specials = specials
self.energy = PLAYER_ENERGY
def get_moves(self, move):
special = self.__move_is_special(move)
if special:
return special
result_move = replace_values_string(move)
if Attacks.P.value in result_move or Attacks.K.value in result_move:
return {
"name": result_move,
"energy": BASSIC_ATTACK_ENERGY
}
return {
"name": result_move,
"energy": 0
}
def __move_is_special(self, combination):
for special in self.specials:
special_info = special(self.name)
if special_info.combination == combination:
return {
"name": special_info.name,
"energy": special_info.energy
}
return None | FranciscoAczayacatl/GameRPG | app/fighters/fighter.py | fighter.py | py | 1,201 | python | en | code | 0 | github-code | 36 |
19262571802 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from datetime import datetime, timedelta
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot import inventory
from pokemongo_bot.item_list import Item
from pokemongo_bot.human_behaviour import sleep, action_delay
class HealPokemon(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(HealPokemon, self).__init__(bot, config)
self.bot = bot
self.config = config
self.enabled = self.config.get("enabled", False)
self.revive_pokemon = self.config.get("revive", True)
self.heal_pokemon = self.config.get("heal", True)
self.next_update = None
self.to_heal = []
self.warned_about_no_revives = False
self.warned_about_no_potions = False
def work(self):
if not self.enabled:
return WorkerResult.SUCCESS
# Check for pokemon to heal or revive
to_revive = []
self.to_heal = []
pokemons = inventory.pokemons().all()
pokemons.sort(key=lambda p: p.hp)
for pokemon in pokemons:
if pokemon.hp < 1.0:
self.logger.info("Dead: %s (%s CP| %s/%s )" % (pokemon.name, pokemon.cp, pokemon.hp, pokemon.hp_max))
to_revive += [pokemon]
elif pokemon.hp < pokemon.hp_max:
self.logger.info("Heal: %s (%s CP| %s/%s )" % (pokemon.name, pokemon.cp, pokemon.hp, pokemon.hp_max))
self.to_heal += [pokemon]
if len(self.to_heal) == 0 and len(to_revive) == 0:
if self._should_print:
self.next_update = datetime.now() + timedelta(seconds=120)
#self.logger.info("No pokemon to heal or revive")
return WorkerResult.SUCCESS
# Okay, start reviving pokemons
# Check revives and potions
revives = inventory.items().get(Item.ITEM_REVIVE.value).count
max_revives = inventory.items().get(Item.ITEM_MAX_REVIVE.value).count
normal = inventory.items().get(Item.ITEM_POTION.value).count
super_p = inventory.items().get(Item.ITEM_SUPER_POTION.value).count
hyper = inventory.items().get(Item.ITEM_HYPER_POTION.value).count
max_p = inventory.items().get(Item.ITEM_MAX_POTION.value).count
self.logger.info("Healing %s pokemon" % len(self.to_heal))
self.logger.info("Reviving %s pokemon" % len(to_revive))
if self.revive_pokemon:
if len(to_revive) > 0 and revives == 0 and max_revives == 0:
if not self.warned_about_no_revives:
self.logger.info("No revives left! Can't revive %s pokemons." % len(to_revive))
self.warned_about_no_revives = True
elif len(to_revive) > 0:
self.logger.info("Reviving %s pokemon..." % len(to_revive))
self.warned_about_no_revives = False
for pokemon in to_revive:
self._revive_pokemon(pokemon)
if self.heal_pokemon:
if len(self.to_heal) > 0 and (normal + super_p + hyper + max_p) == 0:
if not self.warned_about_no_potions:
self.logger.info("No potions left! Can't heal %s pokemon" % len(self.to_heal))
self.warned_about_no_potions = True
elif len(self.to_heal) > 0:
self.logger.info("Healing %s pokemon" % len(self.to_heal))
self.warned_about_no_potions = False
for pokemon in self.to_heal:
self._heal_pokemon(pokemon)
if self._should_print:
self.next_update = datetime.now() + timedelta(seconds=120)
self.logger.info("Done healing/reviving pokemon")
def _revive_pokemon(self, pokemon):
item = Item.ITEM_REVIVE.value
amount = inventory.items().get(item).count
if amount == 0:
self.logger.info("No normal revives left, using MAX revive!")
item = Item.ITEM_MAX_REVIVE.value
amount = inventory.items().get(item).count
if amount > 0:
response_dict_revive = self.bot.api.use_item_revive(item_id=item, pokemon_id=pokemon.unique_id)
action_delay(2, 3)
if response_dict_revive:
result = response_dict_revive.get('responses', {}).get('USE_ITEM_REVIVE', {}).get('result', 0)
revive_item = inventory.items().get(item)
# Remove the revive from the iventory
revive_item.remove(1)
if result is 1: # Request success
self.emit_event(
'revived_pokemon',
formatted='Revived {name}.',
data={
'name': pokemon.name
}
)
if item == Item.ITEM_REVIVE.value:
pokemon.hp = int(pokemon.hp_max / 2)
self.to_heal.append(pokemon)
else:
# Set pokemon as revived
pokemon.hp = pokemon.hp_max
return True
else:
self.emit_event(
'revived_pokemon',
level='error',
formatted='Failed to revive {name}!',
data={
'name': pokemon.name
}
)
return False
def _heal_pokemon(self, pokemon):
if pokemon.hp == 0:
self.logger.info("Can't heal a dead %s" % pokemon.name)
return False
# normal = inventory.items().get(Item.ITEM_POTION.value).count
# super_p = inventory.items().get(Item.ITEM_SUPER_POTION.value).count
# hyper = inventory.items().get(Item.ITEM_HYPER_POTION.value).count
max_p = inventory.items().get(Item.ITEM_MAX_POTION.value).count
# Figure out how much healing needs to be done.
def hp_to_restore(pokemon):
pokemon = inventory.pokemons().get_from_unique_id(pokemon.unique_id)
return pokemon.hp_max - pokemon.hp
if hp_to_restore(pokemon) > 200 and max_p > 0:
# We should use a MAX Potion
self._use_potion(Item.ITEM_MAX_POTION.value, pokemon)
pokemon.hp = pokemon.hp_max
return True
# Okay, now we see to heal as effective as possible
potions = [103, 102, 101]
heals = [200, 50, 20]
for item_id, max_heal in zip(potions, heals):
if inventory.items().get(item_id).count > 0:
while hp_to_restore(pokemon) > max_heal:
if inventory.items().get(item_id).count == 0:
break
action_delay(2, 3)
# More than 200 to restore, use a hyper first
if self._use_potion(item_id, pokemon):
pokemon.hp += max_heal
if pokemon.hp > pokemon.hp_max:
pokemon.hp = pokemon.hp_max
else:
break
# return WorkerResult.ERROR
# Now we use the least
potion_id = 101 # Normals first
while hp_to_restore(pokemon) > 0:
action_delay(2, 4)
if inventory.items().get(potion_id).count > 0:
if potion_id == 104:
self.logger.info("Using MAX potion to heal a %s" % pokemon.name)
if self._use_potion(potion_id, pokemon):
if potion_id == 104:
pokemon.hp = pokemon.hp_max
else:
pokemon.hp += heals[potion_id - 101]
if pokemon.hp > pokemon.hp_max:
pokemon.hp = pokemon.hp_max
else:
if potion_id < 104:
self.logger.info("Failed with potion %s. Trying next." % potion_id)
potion_id += 1
else:
self.logger.info("Failed with MAX potion. Done.")
break
elif potion_id < 104:
potion_id += 1
else:
self.logger.info("Can't heal a %s" % pokemon.name)
break
def _use_potion(self, potion_id, pokemon):
if pokemon.hp >= pokemon.hp_max:
# Already at MAX health
return True
potion_count = inventory.items().get(potion_id).count
healing = 0
if potion_count == 0:
return False
if potion_id == 101:
self.logger.info("Healing with a normal potion we have %s left." % (potion_count - 1))
healing = 20
if potion_id == 102:
self.logger.info("Healing with a Super potion we have %s left." % (potion_count - 1))
healing = 50
if potion_id == 103:
self.logger.info("Healing with a HYper potion we have %s left." % (potion_count - 1))
healing = 200
if potion_id == 104:
self.logger.info("Healing with a MAX potion we have %s left." % (potion_count - 1))
healing = pokemon.hp_max - pokemon.hp
response_dict_potion = self.bot.api.use_item_potion(item_id=potion_id, pokemon_id=pokemon.unique_id)
# Select potion
sleep(2)
if response_dict_potion:
result = response_dict_potion.get('responses', {}).get('USE_ITEM_POTION', {}).get('result', 0)
if result is 1 or result is 0: # Request success
potion_item = inventory.items().get(potion_id)
# Remove the potion from the iventory
potion_item.remove(1)
self.emit_event(
'healing_pokemon',
formatted='Healing {name} ({hp} -> {hp_new}/{hp_max}).',
data={
'name': pokemon.name,
'hp': pokemon.hp,
'hp_new': pokemon.hp + healing,
'hp_max': pokemon.hp_max
}
)
return True
elif result == 3:
# ERROR_CANNOT_USE
pokemon.hp = pokemon.hp_max
self.logger.info("Can't use this to heal the %s" % pokemon.name)
return False
else:
self.logger.info("Result was: %s" % result)
self.emit_event(
'healing_pokemon',
level='error',
formatted='Failed to heal {name} ({hp} -> {hp_new}/{hp_max})!',
data={
'name': pokemon.name,
'hp': pokemon.hp,
'hp_new': pokemon.hp + healing,
'hp_max': pokemon.hp_max
}
)
return False
def _should_print(self):
"""
Returns a value indicating whether the pokemon should be displayed.
:return: True if the stats should be displayed; otherwise, False.
:rtype: bool
"""
return self.next_update is None or datetime.now() >= self.next_update
| PokemonGoF/PokemonGo-Bot | pokemongo_bot/cell_workers/heal_pokemon.py | heal_pokemon.py | py | 11,496 | python | en | code | 3,815 | github-code | 36 |
1889249844 | '''
Created on 08.02.2016.
@author: Lazar
'''
def static_link_procesor(object):
classesString = "";
if not object.classes is None:
for x in object.classes.htmlClasses:
if hasattr(x, 'value'):
classesString += " " + x.key + "=\"" + x.value + "\"";
else:
classesString += " " + x.key;
object.classes = classesString
else:
object.classes = ""
object.classes = classesString
class StaticLink(object):
'''
classdocs
'''
def __init__(self, parent, stLink, classes):
self.parent = parent
self.stLink = stLink
self.classes = classes
def accept(self, visitor):
return visitor.visit_other_selector("staticLink", link=self.stLink, classes=self.classes)
| lazer-nikolic/GenAn | src/concepts/static_link.py | static_link.py | py | 798 | python | en | code | 2 | github-code | 36 |
17883182675 | from PySide2.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QSpacerItem, QSizePolicy, QPushButton
from PySide2.QtCore import QSize, QCoreApplication
class PMReportWidget(QWidget):
def __init__(self):
super().__init__()
_translate = QCoreApplication.translate
self.setObjectName("tab_report")
self.verticalLayout_2 = QVBoxLayout(self)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_2 = QWidget(self)
self.widget_2.setMaximumSize(QSize(16777215, 30))
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_5 = QHBoxLayout(self.widget_2)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout_4 = QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem1 = QSpacerItem(
40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.pushButton_browser_open = QPushButton(self.widget_2)
self.pushButton_browser_open.setMinimumSize(QSize(80, 0))
self.pushButton_browser_open.setObjectName("pushButton_browser_open")
self.horizontalLayout_4.addWidget(self.pushButton_browser_open)
self.horizontalLayout_5.addLayout(self.horizontalLayout_4)
self.verticalLayout_2.addWidget(self.widget_2)
self.horizontalLayout_result = QHBoxLayout()
self.horizontalLayout_result.setObjectName("horizontalLayout_result")
spacerItem2 = QSpacerItem(
20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
self.horizontalLayout_result.addItem(spacerItem2)
self.verticalLayout_2.addLayout(self.horizontalLayout_result)
self.pushButton_browser_open.setText(_translate("MainWindow", "浏览器打开"))
| pyminer/pyminer | pyminer/lib/ui/widgets/reportwidget.py | reportwidget.py | py | 1,899 | python | en | code | 77 | github-code | 36 |
3101135100 | class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def length(node):
length = 0
if node is None:
return length
while node is not None:
length += 1
node = node.next
return length
def count(node,data):
count = 0
if node is None:
return count
while node is not None:
if data == node.data:
count = count + 1
node = node.next
return count
print(Node('99'))
print(length(Node('99')))
| lennystudy/LeetCode | python/LinkedLists.py | LinkedLists.py | py | 499 | python | en | code | 0 | github-code | 36 |
29431099884 | def search_visitor(check_name):
with open("방명록.txt","r",encoding="UTF-8") as file:
visitor = file.read()
if visitor.find(name) == -1:
# if name in visitor:
return False
return True
name = input('이름을 입력하세요 ( 예 : 홍길동 ) : ')
is_visit = search_visitor(name)
print(is_visit)
if is_visit :
print(f'{name}님 다시 방문해 주셔서 감사합니다. 즐거운 시간 되세요.')
else:
birthday=int(input('생년월일을 입력 하세요 ( 예 : 900327 ) : '))
with open("방명록.txt",'a',encoding='UTF-8') as file:
file.write(f"\n{name} {birthday}")
print(f"{name}님 환영합니다. 아래 내용을 입력하셨습니다.")
print(f"{name} {birthday}")
| hyunjaebong/NewDataScience | PythonBasic/chapter08/추가문제/6 search_visito.py | 6 search_visito.py | py | 770 | python | ko | code | 1 | github-code | 36 |
39780727033 | import os
import sys
import numpy as np
import pickle
from matplotlib import pyplot as plt
from tqdm import tqdm
ZOOMIN_BUFFER = 1.0
def compute_epoch(result):
return result['epoch'] + result['step_within_epoch'] / result['epoch_length']
def compute_avg_acc(result, standard_or_own_domain):
d = result['zeroshot_top1_acc_as_percentage'][standard_or_own_domain + '_text_template']
return np.mean([d[k] for k in sorted(d.keys())])
#returns epoch_list, acc_list
def grab_data(results, standard_or_own_domain):
epoch_result_pairs = sorted([(compute_epoch(results[k]), results[k]) for k in sorted(results.keys())])
epoch_list, result_list = list(zip(*epoch_result_pairs)) #a pair of lists instead of a list of pairs
acc_list = [compute_avg_acc(result, standard_or_own_domain) for result in result_list]
return epoch_list, acc_list
#will make 4 plots, toggling between zoomin vs zoomout and standard-prompt vs own-domain-prompt
#will put a gold star at the highest point of all the lines, with text of its y-value
#will put a grey dotted line at the starting value of the first sequence in results_list, without any label in the legend
#will try to put the legend below the plot
#zoomout will be organic. zoomin will just change ylim[0] to be the grey-line value minus some buffer
#will always plot accuracy as percentage, averaged across all domains
#will always plot x-axis as epochs
def make_plots(results_list, color_list, marker_list, linestyle_list, label_list, plot_prefix):
os.makedirs(os.path.dirname(plot_prefix), exist_ok=True)
for standard_or_own_domain in ['standard', 'own_domain']:
plt.clf()
plt.figure(figsize=[14.4, 4.8])
best_x = None
best_y = float('-inf')
baseline_y = None
for results, color, marker, linestyle, label in zip(results_list, color_list, marker_list, linestyle_list, label_list):
epoch_list, acc_list = grab_data(results, standard_or_own_domain)
plt.plot(epoch_list, acc_list, color=color, marker=marker, linestyle=linestyle, label=label)
if baseline_y is None:
baseline_y = acc_list[0]
if max(acc_list) > best_y:
best_y = max(acc_list)
best_x = epoch_list[np.argmax(acc_list)]
plt.plot(plt.xlim(), [baseline_y, baseline_y], linestyle='dashed', color='0.5')
plt.scatter([best_x], [best_y], s=320, marker='*', color='gold')
plt.text(best_x, best_y, '%.1f%%'%(best_y))
ax = plt.gca()
box = ax.get_position()
ax.set_position([box.x0, box.y0, 0.7 * box.width, box.height])
plt.legend(framealpha=1, bbox_to_anchor=(1,0.5), loc='center left')
plt.title('(' + standard_or_own_domain + ' prompt)')
plt.xlabel('epochs')
plt.ylabel('zero-shot accuracy (%)')
plt.savefig(plot_prefix + '-' + standard_or_own_domain + '-zoomout.png')
plt.ylim((baseline_y - ZOOMIN_BUFFER, best_y + ZOOMIN_BUFFER))
plt.savefig(plot_prefix + '-' + standard_or_own_domain + '-zoomin.png')
plt.clf()
| kjmillerCURIS/vislang-domain-exploration | clip_finetuning_plot_utils.py | clip_finetuning_plot_utils.py | py | 3,093 | python | en | code | 0 | github-code | 36 |
36324286850 | import json
import smtplib, ssl
import os
from email.message import EmailMessage
import db_functions
from datetime import datetime
## Helper functions Start
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
## Helper functions End
#Function for Raising Request
def RaiseRequest(intent_request):
userEmail=intent_request['currentIntent']['slots']['userEmail']
ticketSubject=intent_request['currentIntent']['slots']['ticketSub']
ticketBody=intent_request['currentIntent']['slots']['ticketBody']
session_attributes = intent_request['sessionAttributes']
mainusername=session_attributes['mainuser']
if intent_request['invocationSource']=='DialogCodeHook':
validation_result = Slot_Validation(intent_request['currentIntent']['slots'])
if not validation_result['isValid']:
slots = intent_request['currentIntent']['slots']
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
intent_request['currentIntent']['name'],
slots,
validation_result['violatedSlot'],
validation_result['message']
)
return delegate(session_attributes, intent_request['currentIntent']['slots'])
try:
s=sendEmail(userEmail,mainusername,ticketSubject,ticketBody)
except:
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Error Raised'
}
)
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'Ticket #{}, has been raised and will be resolved soon. Please check your mail for further info.'.format(s[1])
}
)
def AutoWelcomeMessage(intent_request):
user_name=intent_request['currentIntent']['slots']['userName']
session_attributes = intent_request['sessionAttributes']
session_attributes['mainuser']=user_name
return delegate(session_attributes, intent_request['currentIntent']['slots'])
def Slot_Validation(slot):
user_Email=slot['userEmail']
if(user_Email == os.environ['SENDER_EMAIL']):
return build_validation_result(
False,
'userEmail',
'This email ID {} is not valid. Please provide valid email ID'.format(user_Email)
)
return {'isValid': True}
def build_validation_result(isvalid, violated_slot, message_content):
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
#Send Email Function
def sendEmail(r_email,r_username,ticketSubject,ticketBody):
msg=EmailMessage()
host=os.environ['SMTPHOST']
port=os.environ['SMTPPORT']
sender_email=os.environ['SENDER_EMAIL']
sender_password=os.environ['SENDER_PASSWORD']
#generaating unique id
id_tuple=db_functions.query_record('select max(id) from requests_tab',1)
if(id_tuple[0]==None):
id=0
else:
id=id_tuple[0]
id=id+1
context=ssl.create_default_context()
message_to_user="""
Hi """+r_username+""",
Thanks for writing to us. We have received your Query. One of our representative will reply to you shortly.
Your Query: """+ticketBody+"""
Thanks,
Kraftcache Team
Note: This mail is an acknowledgment for your ticket raised with our Chatbot"""
msg.set_content(message_to_user)
msg['Subject']='Ticket #'+str(id)+' - ' + ticketSubject
msg['From']=sender_email
msg['To']=r_email
msg['Bcc']=os.environ['BCC']
msg['Reply-To']=r_email
#sending mail
with smtplib.SMTP_SSL(host,port,context=context) as server:
try:
server.login(sender_email,sender_password)
db_functions.insert_records(id,r_username,r_email,ticketSubject,ticketBody,'OPEN',datetime.now())
server.send_message(msg)
server.close()
status=['True',id]
except:
status=['False',id]
return status
def lambda_handler(event, context):
intent= event['currentIntent']['name']
if intent=='RaiseRequest':
return RaiseRequest(event)
if intent=='AutoWelcomeMessage':
return AutoWelcomeMessage(event) | anilreddy864/BBot | Lex_Code/lambda_function.py | lambda_function.py | py | 4,994 | python | en | code | 0 | github-code | 36 |
75076600744 | import os
import time
import json
"""
This Script is used to gather all the data from running all the combinations of inputs to ./main
It will then write the output to a file called "data.txt" which can be processed and changed
into json format using processData.py in the reports file.
"""
data_json = {}
testAmount = 0
Generations = ["100","1000","10000"]
GridSize = ["256", "512", "1024"]
OptimiseLevel = ["all", "levelOne", "levelTwo", "levelThree"]
ImplementationTypes = ["SerialOptimized", "Parallel", "GPU"]
Patterns = [
"PatternFiles/13enginecordership.txt",
"PatternFiles/Frothing_puffer.txt",
"PatternFiles/Gosper_glider_gun.txt",
"PatternFiles/Period144Oscillator.txt"]
for a in ImplementationTypes:
data_json[a] = {}
if a == "SerialOptimized":
os.chdir("../" + a)
if a == "Parallel":
os.chdir("../" + a)
OptimiseLevel = ["para", "para1", "para2", "para3"]
if a == "GPU":
os.chdir("../" + a)
OptimiseLevel = ["gpu"]
for i in OptimiseLevel:
data_json[a][i] = {}
os.system("make " + i)
for j in Generations:
data_json[a][i][j] = {}
for k in GridSize:
data_json[a][i][j][k] = {}
for l in Patterns:
title = l.split("/")[1]
data_json[a][i][j][k][title[:-4]] = {}
for m in range(0,6):
start_time = time.time()
os.system("./main " + l + ' ' + k + ' ' +
k + ' ' + j + ' ' + 'NoVis')
end_time = time.time() - start_time
testAmount = testAmount + 1
data_json[a][i][j][k][title[:-4]][m] = str(end_time)
os.chdir("../" + a)
with open("data.json", 'w') as dataFile:
json.dump(data_json, dataFile)
dataFile.close()
print(testAmount)
| DaveR27/Game-of-Life | DataGathering/GatherData.py | GatherData.py | py | 1,945 | python | en | code | 0 | github-code | 36 |
12633123289 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
# load libraries
import numpy as np
import scipy.sparse as sp
import cplex as cp
# In[4]:
def mixed_integer_linear_programming(direction, A, senses, b, c, l, u, types):
# create an empty optimization problem
prob = cp.Cplex()
# add decision variables to the problem including their coefficients in objective and ranges
prob.variables.add(obj = c.tolist(), lb = l.tolist(), ub = u.tolist(), types = types.tolist())
# define problem type
if direction == "maximize":
prob.objective.set_sense(prob.objective.sense.maximize)
else:
prob.objective.set_sense(prob.objective.sense.minimize)
# add constraints to the problem including their directions and right-hand side values
prob.linear_constraints.add(senses = senses.tolist(), rhs = b.tolist())
# add coefficients for each constraint
row_indices, col_indices = A.nonzero()
prob.linear_constraints.set_coefficients(zip(row_indices.tolist(), col_indices.tolist(), A.data.tolist()))
# solve the problem
print(prob.write_as_string())
prob.solve()
# check the solution status
print(prob.solution.get_status())
print(prob.solution.status[prob.solution.get_status()])
# get the solution
x_star = prob.solution.get_values()
obj_star = prob.solution.get_objective_value()
return(x_star, obj_star)
# In[7]:
def coin_distribution_problem(coins_file, M):
coins = np.loadtxt(coins_file)
N = coins.shape[0] # number of coins
# number of decision variables = number of coins * number of children
E = M * N
# number of constraints = number of coins + number of children
V = M + N
# money per child = total money // number of chilren
P = np.sum(coins) / M
print(P)
c = np.repeat(1, E)
b = np.concatenate((np.repeat(P, M), np.repeat(1, N)))
l = np.repeat(0, E)
u = np.repeat(1, E)
senses = np.repeat("E", V)
types = np.repeat("B", E)
aij = np.concatenate((np.tile(coins, M), np.repeat(1, E)))
row = np.concatenate((np.repeat(range(M), N), M + np.repeat(range(N), M)))
col = np.concatenate((np.array(range(E)).reshape(N, M).T.flatten(), range(E)))
A = sp.csr_matrix((aij, (row, col)), shape = (V, E))
X_star, obj_star = mixed_integer_linear_programming("maximize", A, senses, b, c, l, u, types)
return(np.array(X_star).reshape(N, M))
# In[8]:
X_star = coin_distribution_problem("coins.txt", 2)
print(X_star)
# In[ ]:
| berdogan20/Operations-Research-Problems | TheCoinDistributionProblem/Solution.py | Solution.py | py | 2,514 | python | en | code | 0 | github-code | 36 |
17702834387 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 1 19:59:15 2023
@author: rockerzega
"""
from clases import SimpleRNN, STData, RNN
from torch.utils.data import DataLoader
from funciones import fit, generador, RSME, predict, plot_series
# preparacion de la data simulada
n_steps = 50
series = generador(10000, n_steps + 1)
X_train, y_train = series[:7000, :n_steps], series[:7000, -1]
X_valid, y_valid = series[7000:9000, :n_steps], series[7000:9000, -1]
X_test, y_test = series[9000:, :n_steps], series[9000:, -1]
# Infomracion de la data
print(X_train.shape, y_train.shape)
# y_pred = X_test[:,-1]
dataset = {
'train': STData(X_train, y_train),
'eval': STData(X_valid, y_valid),
'test': STData(X_test, y_test, train=False)
}
dataloader = {
'train': DataLoader(dataset['train'], shuffle=True, batch_size=64),
'eval': DataLoader(dataset['eval'], shuffle=False, batch_size=64),
'test': DataLoader(dataset['test'], shuffle=False, batch_size=64)
}
rnn = SimpleRNN()
fit(rnn, dataloader)
y_pred = predict(rnn, dataloader['test'])
plot_series(X_test, y_test, y_pred.cpu().numpy())
print(RSME(y_test, y_pred.cpu()))
# Parametros de la RNN Simple
print(rnn.rnn.weight_hh_l0.shape,
rnn.rnn.weight_ih_l0.shape,
rnn.rnn.bias_hh_l0.shape,
rnn.rnn.bias_ih_l0.shape)
rnn = RNN()
# Parametros de la RNN completa
print(rnn.rnn.weight_hh_l0.shape,
rnn.rnn.weight_ih_l0.shape,
rnn.rnn.bias_hh_l0.shape,
rnn.rnn.bias_ih_l0.shape,
rnn.fc.weight.shape,
rnn.fc.bias.shape)
fit(rnn, dataloader)
print(RSME(y_test, y_pred.cpu())) | rockerzega/rnn-ejemplo | src/rnn-lib.py | rnn-lib.py | py | 1,606 | python | en | code | 0 | github-code | 36 |
20948647501 | import numpy as np
from sklearn.metrics import confusion_matrix
import settings
def evaluate_conf_mat(conf_mat):
ignore_label = settings.IGNORE_LABEL
# omit ignore label row and column from confusion matrix
if ignore_label >= 0 and ignore_label < settings.NUM_CLASSES:
row_omitted = np.delete(conf_mat, ignore_label, axis=0)
conf_mat = np.delete(row_omitted, ignore_label, axis=1)
# calculate metrics
acc = np.diag(conf_mat).sum() / conf_mat.sum()
acc_cls = np.diag(conf_mat) / conf_mat.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iou = np.diag(conf_mat) / (conf_mat.sum(axis=1) + conf_mat.sum(axis=0) - np.diag(conf_mat))
mean_iou = np.nanmean(iou)
freq = conf_mat.sum(axis=1) / conf_mat.sum()
fwavacc = (freq[freq > 0] * iou[freq > 0]).sum()
return {
'pAcc' : acc,
'mAcc': acc_cls,
'fIoU': fwavacc,
'mIoU': mean_iou,
'iou' : iou,
}
| ElhamGhelichkhan/semiseggan | metric.py | metric.py | py | 950 | python | en | code | 0 | github-code | 36 |
16146047995 | # report 1 (my way)
# report headings
print(f"ACCOUNT NO CUSTOMER NAME PHONE NO")
# intitialize counters and accumulators
cust_counter = 0
# open file
f = open("Customers.dat", "r")
# process each line in file
for line in f:
line_split = line.split(", ")
cust_num = line_split[0].strip()
cust_name = line_split[1].strip()
phone_num = line_split[4].strip()
# calculations (if any)
# print detail line
print_line = f"{cust_num:<5s} {cust_name:^18s} {phone_num:>8s}"
print(print_line)
# increment any counters and accumulators as needed
cust_counter += 1
# close the file
f.close()
# print summary or footer (analytics data)
print("-" * 43)
print(f"TOTAL CUSTOMER LISTED: {cust_counter:>02d}")
| sweetboymusik/Python | Lesson 33/reports.py | reports.py | py | 761 | python | en | code | 0 | github-code | 36 |
42425916848 | import os
from dotenv import load_dotenv
DEFAULT_GUNICORN_WORKERS = 4
DEFAULT_CONFIG_PATH = ".env"
ACCESS_TOKEN_EXPIRE_MINUTES = 30 # 30 minutes
REFRESH_TOKEN_EXPIRE_MINUTES = 60 * 24 * 7 # 7 days
ALGORITHM = "HS256"
load_dotenv(DEFAULT_CONFIG_PATH)
JWT_SECRET_KEY = os.environ["JWT_SECRET_KEY"]
JWT_REFRESH_SECRET_KEY = os.environ["JWT_REFRESH_SECRET_KEY"]
| IslomK/family_budget | family_budget/core/const.py | const.py | py | 364 | python | en | code | 3 | github-code | 36 |
20427901161 | # Written by P. Xydi, Feb 2022
######################################
# Import libraries
######################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.cm as cm
color_1 = cm.get_cmap("Set2")(2) # set blue
color_2 = cm.get_cmap("Set2")(1) # set orange
from sklearn.metrics import ConfusionMatrixDisplay
######################################
def sentence_distribution(dataset, label = 'training', to_plot = False):
'''
Plots the distribution of sentences in a given dataset
INPUTS:
- dataset: list, list of samples
- label: str, used in the title of the output plot
OUTPUT:
- histogram of the distribution of sentences in dataset
- nbr_sents : list, number of sentences per sample in dataset
'''
######################################
# Create empty list
nbr_sents = []
for i in range(len(dataset)):
nbr_sents.append(len(dataset[i]))
if to_plot:
# Plot the sentence distibution
# Barplot and font specifications
barplot_specs = {"color": color_1, "alpha": 0.7, "edgecolor": "grey"}
label_specs = {"fontsize": 12}
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.03}
plt.figure(figsize=(8,4))
plt.hist(nbr_sents, bins = 20, **barplot_specs)
plt.xlabel('Nbr of sentences per sample', **label_specs)
plt.ylabel('Nbr of samples',**label_specs)
plt.title('Distribution of sentences in {} set'.format(label),**title_specs)
plt.show()
return np.sum(nbr_sents)
######################################
def plot_token_distribution(dataset, label):
'''
Plots the distribution of tokens in the sentences of a
given dataset.
INPUTS:
- dataset: list, list of samples
- label: str, used in the title of the output plot
OUTPUT:
- histogram of the distribution of tokens in dataset
- nbr_tokens: list, number of tokens per sentence in dataset
'''
######################################
# Create empty list
nbr_tokens = []
# Count tokens in sentences and append to list
for i in range(len(dataset)):
for j in range(len(dataset[i])):
nbr_tokens.append(len(dataset[i][j]))
# Plot the sentence distibution
# Barplot and font specifications
barplot_specs = {"color": "mediumpurple", "alpha": 0.7, "edgecolor": "grey"}
label_specs = {"fontsize": 12}
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.03}
plt.figure(figsize=(8,4))
plt.hist(nbr_tokens, bins = 20, **barplot_specs)
plt.xlabel('Nbr of tokens per sentence', **label_specs)
plt.ylabel('Nbr of sentences',**label_specs)
plt.title('Distribution of tokens in {} set'.format(label),**title_specs)
plt.show()
return nbr_tokens
######################################
def target_sample_distribution(labels):
"""
Plots the distribution of samples in target variable (categorical)
Input:
- labels : list, list of target values
"""
######################################
w = pd.value_counts(labels)
# Barplot and font specifications
barplot_specs = {"color": color_2, "alpha": 0.7, "edgecolor": "grey"}
label_specs = {"fontsize": 12}
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.02}
plt.figure(figsize=(8,4.5))
sns.barplot(x=w.index,y=w.values, **barplot_specs);
plt.ylabel('Counts',**label_specs);
plt.xticks(rotation=45)
plt.yscale('log')
plt.title('Sample distribution in target variable',**title_specs);
######################################
def plot_loss_accuracy_curves(history):
######################################
title_specs = {"fontsize": 16}
label_specs = {"fontsize": 14}
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
# Plot loss values
ax1.set_title('Validation loss: {:.4f}'.format(history.history['val_loss'][-1]))
ax1.plot(history.history['loss'], color =color_1, label='training set')
ax1.plot(history.history['val_loss'], color =color_2, label='validation set')
ax1.set_xlabel('Epochs',**label_specs)
ax1.set_ylabel('Loss',**label_specs)
ax1.set_ylim([0,None])
ax1.legend()
# plot accuracy values
ax2.set_title('Validation accuracy: {:.2f}%'.format(history.history['val_accuracy'][-1]*100))
ax2.plot(history.history['accuracy'], color =color_1, label='training set')
ax2.plot(history.history['val_accuracy'], color =color_2, label='validation set')
ax2.set_xlabel('Epochs',**label_specs)
ax2.set_ylabel('Accuracy',**label_specs)
ax2.set_ylim([None,1])
ax2.legend()
plt.tight_layout()
######################################
def plot_confusion_matrix(y, y_pred, labels, suptitle):
######################################
# Create two subplots
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
title_specs = {"fontsize": 14, "fontweight": "bold", "y": 1.03}
plt.suptitle(suptitle,**title_specs)
# Plots the standard confusion matrix
ax1.set_title("Confusion Matrix (counts)", y= 1.02)
ConfusionMatrixDisplay.from_predictions(y,
y_pred,
display_labels=labels,
cmap=plt.cm.Blues,
values_format='d',
ax=ax1)
ax1.set_xticklabels(labels = labels, rotation=90)
# Plots the normalized confusion matrix
ax2.set_title("Confusion Matrix (ratios)", y= 1.02)
ConfusionMatrixDisplay.from_predictions(y,
y_pred,
normalize="true",
display_labels=labels,
cmap=plt.cm.Blues,
values_format='.1g',
ax=ax2)
ax2.set_xticklabels(labels = labels, rotation=90)
plt.tight_layout() | pxydi/Named-Entity-Recognition | src/tools.py | tools.py | py | 6,287 | python | en | code | 0 | github-code | 36 |
27177310215 | #region libraries
import cv2
import numpy as np
#endregion
#region process
def process(img_path,template_path): # This Function takes the path and name of basic image and template image
img_bgr = cv2.imread(img_path) # read the image by opencv(cv2)
img_gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY) # convert the color of the image to gray
img_template = cv2.imread(template_path,0) # read the template image(pattern)
h,w = img_template.shape # it will get the height and width of the template image as h,w
match = cv2.matchTemplate(img_gray, img_template, cv2.TM_CCOEFF_NORMED) # This function will compare the template image with input image to find the template image in input image by CCOEDD_NORMED Method
threshold = 0.9 # we have to define a threshold for accuracy of matching
loc = np.where(match >= threshold) # we use numpy to recognize the accuracy in found template in input image , we definded the accuracy (threshold)
for points in zip(*loc[::-1]): # we need a loop to draw a rectangle for detected template and we need to zip the 'loc' to have all arrays together
img_bgr = cv2.rectangle(img_bgr, points, (points[0]+w ,points[1]+h), (0,255,0),1) # a rectangle will be drawn around the detected template
cv2.imshow('result', img_bgr) # the result will be shown by opencv (cv2)
#endregion
process('images/image.jpg', 'images/pattern.jpg') # You can call the function and enter its input arguments to perform the operation
cv2.waitKey(0)
cv2.destroyAllWindows() | RealTourani/Match-Point | Match_Point.py | Match_Point.py | py | 1,547 | python | en | code | 3 | github-code | 36 |
18909907329 | from prettytable import PrettyTable
class Database:
def __init__(self, database_name):
import mysql.connector as m1
self.var = "w"
self.conn = m1.connect(host="localhost", user="root", password="utkarsh")
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE DATABASE IF NOT EXISTS %s" % database_name)
self.cursor.execute("USE %s" % database_name)
self.cursor.execute(
"CREATE TABLE IF NOT EXISTS products (product_id INTEGER unsigned primary key auto_increment, product_name VARCHAR(50) not null,quantity INTEGER unsigned NOT NULL,Company enum('asus','msi','asrock','gigabyte','inno3d') not null ,gpu_company enum('nvidea','amd') not null,price INTEGER unsigned NOT NULL,vram_gb INTEGER unsigned NOT NULL)"
)
self.cursor.execute(
"CREATE TABLE IF NOT EXISTS admins(admin_id integer primary key auto_increment,admin_name VARCHAR(20) unique not null,pas VARCHAR(20) not null)"
)
self.producttabletemplate = [
"product id",
"product name",
"quantity",
"graphics card seller",
"gpu company",
"price",
"vram",
]
try:
self.cursor.execute(
"INSERT INTO admins (admin_name,pas) VALUES('admin','1234')"
)
self.conn.commit()
except Exception as e:
pass
def make_database_table(self,table):
table = PrettyTable(table)
for i in self.cursor:
table.add_row(i)
print(table)
def add_admin(self, adminname, password):
try:
self.cursor.execute(
"INSERT INTO admins (admin_name,pas) VALUES('{}','{}')".format(adminname, password)
)
self.conn.commit()
except Exception as e:
print("something wrong happend.try again,maybe user name taken already")
def delete(self, id):
self.cursor.execute("DELETE FROM products where product_id={}".format(id))
self.conn.commit()
def insert(self):
forward = True
productname = input("enter product name")
self.cursor.execute(
"SELECT * FROM products WHERE product_name = '{}'".format(productname)
)
for i in self.cursor:
forward = i
if forward == True:
quantity = int(input("enter quantity of product"))
Company = input(
"enter company name ('asus','msi','asrock','gigabyte','inno3d')"
)
gpu_company = input("enter gpu_company('nvidea','amd')")
price = input("enter price of product")
vram = int(input("input vram: "))
self.cursor.execute(
"INSERT INTO products(product_name,quantity,Company,gpu_company,price,vram_gb) values('{}',{},'{}','{}',{},{})".format(
productname, quantity, Company, gpu_company, price, vram
)
)
self.conn.commit()
def showproductsbyvram(self, vram):
self.cursor.execute("select * from products where vram_gb = {}".format(vram))
self.make_database_table(self.producttabletemplate)
def showproductsbygpu_company(self, gpu):
if gpu not in ["nvidea", "amd"]:
print("no such gpu company available:")
return
self.cursor.execute(
"select * from products where gpu_company = '{}'".format(gpu)
)
self.make_database_table(self.producttabletemplate)
def showproductsbyseller(self, comp):
if comp not in ("asus", "msi", "asrock", "gigabyte", "inno3d"):
print("no such seller available")
return
self.cursor.execute("select * from products where Company = '{}'".format(comp))
self.make_database_table(self.producttabletemplate)
def showproductsbybudget(self, maximum_price):
self.cursor.execute(
"select * from products where price < {}".format(maximum_price)
)
self.make_database_table(self.producttabletemplate)
def showproductsrecords(self):
self.cursor.execute("select * from products")
self.make_database_table(self.producttabletemplate)
def showproductsrecordsbyname(self, name):
self.cursor.execute("select * from products where LOWER(product_name) = '{}'".format(name.lower()))
self.make_database_table(self.producttabletemplate)
def updateprice(self, id):
price = input("enter price of product")
self.cursor.execute(
"UPDATE products SET price={} WHERE product_id={}".format(price, id)
)
self.conn.commit()
def updatequantity(self, id):
quantity = input("enter quantity of product")
self.cursor.execute(
"UPDATE products SET quantity={} WHERE product_id={}".format(quantity, id)
)
self.conn.commit()
if __name__ == "__main__":
db = Database("graphics_shop")
db.showproductsbybudget(2000000)
# db.showproductsrecords()
# db.updateprice(1)
# db.updatequantity(1)
| Codineer/shop-management-sytem | database.py | database.py | py | 5,264 | python | en | code | 0 | github-code | 36 |
39479422820 | from django.contrib.auth.models import Group, User
from datetime import datetime
from django.utils import timezone
from schedule.periods import Day
from datetime import timedelta
from apps.policies.models import SchedulePolicyRule
from apps.services.models import Service
"""def get_current_events_users(calendar):
now = timezone.make_aware(datetime.now(), timezone.get_current_timezone())
result = []
day = Day(calendar.events.all(), now)
for o in day.get_occurrences():
if o.start <= now <= o.end:
usernames = o.event.title.split(',')
print usernames
for username in usernames:
result.append(User.objects.get(username=username.strip()))
return result
"""
def get_current_events_users(calendar):
now = timezone.now()
result = []
day = Day(calendar.events.all(), now)
for o in day.get_occurrences():
if o.start <= now <= o.end:
items = o.event.title.split(',')
for item in items:
if Group.objects.filter(name=item.strip()).exists():
for user in User.objects.filter(groups__name=item.strip()):
user.came_from_group = item.strip()
result.append(user)
else:
result.append(User.objects.get(username=item.strip()))
return result
def get_events_users_inbetween(calendar, since, until):
delta = until - since
result = {}
added_users = []
for i in range(delta.days + 1):
that_day = since + timedelta(days=i)
if not timezone.is_aware(that_day):
that_day = timezone.make_aware(that_day, timezone.get_current_timezone())
day = Day(calendar.events.all(), that_day)
for o in day.get_occurrences():
if o.start <= that_day <= o.end:
items = o.event.title.split(',')
for item in items:
username = item.strip()
if Group.objects.filter(name=username):
for user in User.objects.filter(groups__name=username):
if user not in added_users:
result[username] = {
"start": o.start,
"person": user.username,
"end": o.end,
"email": user.email
}
added_users.append(user)
else:
if username not in result.keys():
user_instance = User.objects.get(username=username)
result[username] = {
"start": o.start,
"person": username,
"end": o.end,
"email": user_instance.email
}
return result.values()
def get_escalation_for_service(service):
result = []
if service.notifications_disabled:
return result
rules = SchedulePolicyRule.get_rules_for_service(service)
print(rules)
for item in rules:
print(item.schedule)
print(item.user_id)
print(item.group_id)
if item.schedule:
current_events_users = get_current_events_users(item.schedule)
for user in current_events_users:
if user not in result:
result.append(user)
if item.user_id:
if item.user_id not in result:
result.append(item.user_id)
if item.group_id:
for user in item.group_id.user_set.all():
if user not in result:
result.append(user)
return result
def services_where_user_is_on_call(user):
from django.db.models import Q
services = Service.objects.filter(
Q(policy__rules__user_id=user) | Q(policy__rules__schedule__event__title__icontains=user)
)
return services
| openduty/openduty | apps/incidents/escalation_helper.py | escalation_helper.py | py | 4,082 | python | en | code | 121 | github-code | 36 |
28281049495 | import argparse
import pathlib
import itertools
import sys
import urllib
import docker
import tqdm
from compose import config
version = "0.8.0"
def _resolve_name(args, service):
if args.use_service_image_name_as_filename:
return urllib.parse.quote(service["image"], safe="")
return service["name"]
def save(args, client, service, print):
image = service["image"]
real_images = [i for i in client.images.list() if image in i.tags]
if not real_images:
print("{}: missed (pull, build or specify precisely image name)".format(image))
sys.exit(1)
if len(real_images) > 1:
names = ", ".join(set(itertools.chain.from_iterable(i.tags for i in real_images)))
print("{}: specify image name more precisely (candidates: {})".format(image, names))
sys.exit(1)
path = args.output / "{}.tar".format(_resolve_name(args, service))
if path.exists() and not args.overwrite:
print("{} skip ({} already exists)".format(image, path))
return
print("{} saving...".format(image))
args.output.mkdir(parents=True, exist_ok=True)
with path.open("wb") as f:
for chunk in real_images[0].save():
f.write(chunk)
def load(args, client, service, print):
print("{} loading...".format(service["image"]))
path = args.input / "{}.tar".format(_resolve_name(args, service))
with path.open("rb") as f:
i, *_ = client.images.load(f)
i.tag(service["image"])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--version", default=False, action="store_true", help="show version")
parser.add_argument("--timeout", default=60, type=int, help="docker connection timeout [default: %(default)s]")
parser.add_argument("--use-service-image-name-as-filename", default=False, action="store_true",
help="Support legacy naming behavior")
parser.add_argument("-f", "--file", default=None, type=pathlib.Path,
help="specify an alternate compose file")
sub_commands = parser.add_subparsers(dest="command")
sub_commands.required = True
p = sub_commands.add_parser("save")
p.set_defaults(function=save)
p.add_argument("-o", "--output", type=pathlib.Path, default=".",
help="output directory [default: %(default)s]")
p.add_argument("--overwrite", action="store_true", default=False,
help="overwrite if exist [default: %(default)s]")
p = sub_commands.add_parser("load")
p.set_defaults(function=load)
p.add_argument("-i", "--input", type=pathlib.Path, default=".",
help="input directory [default: %(default)s]")
return parser.parse_args()
def gen_services(path):
parent = str(path.parent)
env = config.environment.Environment.from_env_file(parent)
details = config.find(parent, [path.name], env)
resolved = config.load(details)
for s in resolved.services:
if "image" not in s:
raise RuntimeError("Service {!r} have no 'image' field".format(s["name"]))
yield s
def main():
args = parse_args()
if args.version:
print(version)
return
if args.file is None:
files = ["docker-compose.yml", "docker-compose.yaml"]
else:
files = [args.file]
for file in files:
path = pathlib.Path(file)
if not path.exists():
continue
path = path.resolve()
services = list(gen_services(path))
break
else:
raise RuntimeError("Files does not exists {!r}".format(files))
client = docker.from_env(timeout=args.timeout)
viewed = set()
with tqdm.tqdm(total=len(services)) as pbar:
services.sort(key=lambda s: s["name"])
for service in services:
if service["image"] not in viewed:
args.function(args, client, service, print=pbar.write)
viewed.add(service["image"])
pbar.update(1)
| pohmelie/docker-compose-transfer | docker_compose_transfer/__init__.py | __init__.py | py | 3,985 | python | en | code | 1 | github-code | 36 |
20220878757 | import pathlib
prj_path = str(pathlib.Path(__file__).parent.parent.parent.resolve())
from advent_of_code.lib import parse as aoc_parse
from advent_of_code.lib import aoc
@aoc.pretty_solution(1)
def part1(data):
horizontal = sum(x[1] for x in data if x[0] == 'forward')
depth = sum(
-x[1] if x[0] == 'up' else
x[1] if x[0] == 'down' else
0
for x in data)
return horizontal * depth
@aoc.pretty_solution(2)
def part2(data):
h_pos = 0
d_pos = 0
aim = 0
for dir, step in data:
if dir == "down":
aim += step
elif dir == "up":
aim -= step
elif dir == "forward":
h_pos += step
d_pos += aim * step
return h_pos * d_pos
def main():
def map_line(line):
a, b = line.split()
return a, int(b)
data = aoc_parse.map_input_lines(prj_path + '/year2021/input/day02.txt', map_line)
return part1(data), part2(data)
if __name__ == "__main__":
main()
| Perruccio/advent-of-code | advent_of_code/year2021/solutions/day02.py | day02.py | py | 1,006 | python | en | code | 0 | github-code | 36 |
8228541339 | from models.pointnet import PointNetDenseCls
import torch
import torch.nn as nn
import torch.nn.functional as F
import hydra
import os
from datasets import kpnet
import logging
from itertools import combinations
import numpy as np
from tqdm import tqdm
def pdist(vectors):
distance_matrix = -2 * vectors.mm(torch.t(vectors)) + vectors.pow(2).sum(dim=1).view(1, -1) + vectors.pow(2).sum(
dim=1).view(-1, 1)
return distance_matrix
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class PairSelector:
"""
Implementation should return indices of positive pairs and negative pairs that will be passed to compute
Contrastive Loss
return positive_pairs, negative_pairs
"""
def __init__(self):
pass
def get_pairs(self, embeddings, labels):
raise NotImplementedError
# reference: https://github.com/adambielski/siamese-triplet
class HardNegativePairSelector(PairSelector):
"""
Creates all possible positive pairs. For negative pairs, pairs with smallest distance are taken into consideration,
matching the number of positive pairs.
"""
def __init__(self, cpu=True):
super(HardNegativePairSelector, self).__init__()
self.cpu = cpu
def get_pairs(self, embeddings, labels):
if self.cpu:
embeddings = embeddings.cpu()
distance_matrix = pdist(embeddings)
labels = labels.cpu().data.numpy()
all_pairs = np.array(list(combinations(range(len(labels)), 2)))
all_pairs = torch.LongTensor(all_pairs)
positive_pairs = all_pairs[(
labels[all_pairs[:, 0]] == labels[all_pairs[:, 1]]).nonzero()]
negative_pairs = all_pairs[(
labels[all_pairs[:, 0]] != labels[all_pairs[:, 1]]).nonzero()]
negative_distances = distance_matrix[negative_pairs[:,
0], negative_pairs[:, 1]]
negative_distances = negative_distances.cpu().data.numpy()
top_negatives = np.argpartition(negative_distances, len(positive_pairs))[
:len(positive_pairs)]
top_negative_pairs = negative_pairs[torch.LongTensor(top_negatives)]
return positive_pairs, top_negative_pairs
# reference: https://github.com/adambielski/siamese-triplet
class OnlineContrastiveLoss(nn.Module):
"""
Online Contrastive loss
Takes a batch of embeddings and corresponding labels.
Pairs are generated using pair_selector object that take embeddings and targets and return indices of positive
and negative pairs
"""
def __init__(self, margin, pair_selector, mean_distance=None):
super(OnlineContrastiveLoss, self).__init__()
self.margin = margin
self.pair_selector = pair_selector
if mean_distance is not None:
self.mean_distance = mean_distance[0].cuda()
else:
self.mean_distance = None
def forward(self, embeddings, target):
positive_pairs, negative_pairs = self.pair_selector.get_pairs(
embeddings, target)
if embeddings.is_cuda:
positive_pairs = positive_pairs.cuda()
negative_pairs = negative_pairs.cuda()
positive_loss = (embeddings[positive_pairs[:, 0]] -
embeddings[positive_pairs[:, 1]]).pow(2).sum(1)
labels_1 = tuple(target[negative_pairs[:, 0]].tolist())
labels_2 = tuple(target[negative_pairs[:, 1]].tolist())
label_pair = (labels_1, labels_2)
if self.mean_distance is not None:
negative_loss = F.relu(
self.mean_distance[label_pair] - ((embeddings[negative_pairs[:, 0]] - embeddings[negative_pairs[:, 1]]).pow(2).sum(
1) + 1e-6).sqrt()).pow(2)
else:
negative_loss = F.relu(
self.margin - ((embeddings[negative_pairs[:, 0]] - embeddings[negative_pairs[:, 1]]).pow(2).sum(
1) + 1e-6).sqrt()).pow(2)
loss = torch.cat([positive_loss, negative_loss], dim=0)
return loss.mean()
@hydra.main(config_path='config', config_name='config')
def main(cfg):
logger = logging.getLogger(__name__)
train_dataset = kpnet.KeypointDataset(cfg)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=True)
model = PointNetDenseCls(feature_transform=True, cfg=cfg).cuda()
logger.info('Start training on 3D embeddings')
optimizer = torch.optim.Adam(
model.parameters(),
lr=1e-3
)
criterion = OnlineContrastiveLoss(1., HardNegativePairSelector())
meter = AverageMeter()
for epoch in range(cfg.max_epoch + 1):
train_iter = tqdm(train_dataloader)
# Training
meter.reset()
model.train()
for i, (pc, kp_idxs) in enumerate(train_iter):
pc, kp_idxs = pc.cuda(), kp_idxs.cuda()
outputs = model(pc.transpose(1, 2))
embeddings = []
labels = []
for i in range(cfg.batch_size):
embedding_model = outputs[i]
keypoints = kp_idxs[i]
for idx in range(len(keypoints)):
kp_idx = keypoints[idx]
if kp_idx < 0:
continue
embedding_kp = embedding_model[kp_idx]
embeddings.append(embedding_kp)
labels.append(idx)
embeddings = torch.stack(embeddings)
labels = torch.tensor(labels).cuda()
loss = criterion(embeddings, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_iter.set_postfix(loss=loss.item())
meter.update(loss.item())
logger.info(
f'Epoch: {epoch}, Average Train loss: {meter.avg}'
)
torch.save(model.state_dict(), f'epoch{epoch}.pth')
if __name__ == '__main__':
main()
| qq456cvb/SemanticTransfer | train_emb.py | train_emb.py | py | 6,581 | python | en | code | 11 | github-code | 36 |
2251372103 | from typing import List
import mlflow
import pandas as pd
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
from zenml.steps import BaseParameters, Output, step
class EvaluateClassifierConfig(BaseParameters):
"""Trainer params"""
input_shape: List[int] = (224, 224, 3)
batch_size: int = 4
@step(enable_cache=False, experiment_tracker="local_mlflow_tracker")
def evaluate_classifier(
config: EvaluateClassifierConfig, model: tf.keras.Model, test_df: pd.DataFrame
) -> Output(test_acc=float):
# Test data generator
test_generator = ImageDataGenerator()
test_images = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col="Filepath",
y_col="Label",
target_size=(config.input_shape[0], config.input_shape[1]),
color_mode="rgb",
class_mode="categorical",
batch_size=config.batch_size,
shuffle=False,
)
results = model.evaluate(test_images, verbose=1)
mlflow.log_metric("Test accuracy", results[1])
print("Model performance on Test Set:")
print("Accuracy on Test Set: {:.2f}".format(results[1]))
return results[1]
| thbinder/mlops_sea_animal_classification | src/domain/steps/mlflow_evaluator.py | mlflow_evaluator.py | py | 1,170 | python | en | code | 4 | github-code | 36 |
24680231703 | # Bank account examples (with data) using decimal instead of floating
# point numbers
from decimal import *
class Account(object):
""" This class represents a bank account
Constants:
qb: Decimal formatting for bankers rounding
Attributes:
name (str): The name of the bank account
balance (float): The current acount balance in dollars
Methods:
deposit: Checks if amount provided as argument is above 0, if true
then amount is added to the the account instance's balance
attribute and the amount is returned.
withdraw: Checks if amount provided as argument is above 0 and above
the account instance's balance attribute. If true, the account
instance's balance attribute is reduced by the amount and the
amount is returned. If false, a statement is printed notifying the
user of the withdraw parameters and 0.0 is returned.
show_balance: Prints the account instance's current name and balance.
"""
# class constant accesible without creating an instance
_qb = Decimal('0.00')
def __init__(self, name: str, opening_balance: float = 0.0):
self.name = name
self._balance = Decimal(opening_balance).quantize(Account._qb)
print("Account created for {}".format(name), end='')
self.show_balance()
# hint that method takes float type
def deposit(self, amount: float) -> Decimal:
decimal_amount = Decimal(amount).quantize(Account._qb)
if decimal_amount > Account._qb:
self._balance = self._balance + decimal_amount
print("{} deposited".format(decimal_amount))
return self._balance
# hint that method takes float type
def withdraw(self, amount: float) -> Decimal:
decimal_amount = Decimal(amount).quantize(Account._qb)
if Account._qb < decimal_amount <= self._balance:
self._balance = self._balance - decimal_amount
print("{} withdrawn".format(decimal_amount))
return decimal_amount
else:
print("The amount must be greater than zero and more than your account balance")
return 0.0
def show_balance(self):
print("Balance on account {} is {}".format(self.name, self._balance))
# tresting code to be run if file not run as module
if __name__ == '__main__':
scott = Account("Scott")
scott.deposit(10.1)
scott.deposit(0.1)
scott.deposit(0.1)
scott.withdraw(0.3)
scott.withdraw(0)
scott.show_balance()
print("*" * 80) | scottherold/python_refresher_8 | RollingBack/rollback2.py | rollback2.py | py | 2,587 | python | en | code | 0 | github-code | 36 |
15371935865 | from RsSmw import *
import json
try:
with open ("config.json") as config_f:
RsSmw.assert_minimum_version('5.0.44')
config = json.load(config_f)
IP_ADDRESS_GENERATOR = config["IP_ADDRESS_GENERATOR"]
PORT = config["PORT"]
CONNECTION_TYPE = config["CONNECTION_TYPE"]
TRACE_FILE = config["TRACE_FILE"]
MEASURE_TIME = config["MEASURE_TIME"]
resource = f'TCPIP::{IP_ADDRESS_GENERATOR}::{PORT}::{CONNECTION_TYPE}' # Resource string for the device
generator = RsSmw(resource, True, True, "SelectVisa='socket'")
config_f.close()
except FileNotFoundError:
print("Brak pliku konfiguracyjnego.")
exit()
def com_check():
# Driver's instrument status checking ( SYST:ERR? ) after each command (default value is True):
generator.utilities.instrument_status_checking = True
# The generator object uses the global HW instance one - RF out A
generator.repcap_hwInstance_set(repcap.HwInstance.InstA)
def meas_prep(set : True, mode : enums.FreqMode, amplitude : int, freq : int):
generator.output.state.set_value(set)
generator.source.frequency.set_mode(mode)
generator.source.power.level.immediate.set_amplitude(amplitude)
generator.source.frequency.fixed.set_value(freq)
print(f'Channel 1 PEP level: {generator.source.power.get_pep()} dBm')
# Direct SCPI interface:
response = generator.utilities.query_str('*IDN?')
print(f'Direct SCPI response on *IDN?: {response}')
generator.close()
if __name__ == "__main__":
com_check()
meas_prep(True, enums.FreqMode.CW, -20, 23E9)
exit() | mgarczyk/channel-sounder-5g | generator.py | generator.py | py | 1,625 | python | en | code | 0 | github-code | 36 |
8196850520 | from django.contrib import admin
from .models import User
class UserAdmin(admin.ModelAdmin):
list_display = (
'pk', 'role', 'username', 'email',
'first_name', 'last_name',
)
search_fields = ('username', 'email',)
list_filter = ('email', 'username')
admin.site.register(User, UserAdmin)
| lojiver/foodgram-project | backend/foodgram/users/admin.py | admin.py | py | 323 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.