blob_id stringlengths 40 40 | repo_name stringlengths 5 127 | path stringlengths 2 523 | length_bytes int64 22 3.06M | score float64 3.5 5.34 | int_score int64 4 5 | text stringlengths 22 3.06M |
|---|---|---|---|---|---|---|
1683cfe20acc2fdc633b4d5cdb92295c2793fa0a | codewithgauri/HacktoberFest | /python/Leetcode_1371_Find_the_Longest_Substring_Containing_Vowels_in_Even_Counts.py | 1,687 | 4.0625 | 4 | '''
Given the string s, return the size of the longest substring containing each vowel an even number of times. That is, 'a', 'e', 'i', 'o', and 'u' must appear an even number of times.
Example 1:
Input: s = "eleetminicoworoep"
Output: 13
Explanation: The longest substring is "leetminicowor" which contains two each of the vowels: e, i and o and zero of the vowels: a and u.
Example2:
Input: s = "leetcodeisgreat"
Output: 5
Explanation: The longest substring is "leetc" which contains two e's.
'''
'''
This can be solved using concept of Bitmasking
1. To solve this question in O(n) time, realise that a mask of vowels "00000" can be used. Since, we only need to
check whether count of any vowel is odd or even, we canuse 1/0 to denote odd and even.
2. Now, if suppose we have curr = "00010" i.e. till " j " i.e count of b is odd and other vowels is even. If we encounter this again at "i" index, then we can say that:
count of vowels from s[j+1:i] (i inclusive, j exclusive) is even.
3.therefore length becomes "i - j". This can be comparedwith max_len.
'''
class Solution:
'''
Time Complexity O(n)
Space Complexity O(1) (there are only 5 vowels)
'''
@staticmethod
def findTheLongestSubstring(s):
hashmap = {"a":1,"e":2,"i":3,"o":4,"u":32}
res = 0
seen = {0:-1}
cur = 0
for i,c in enumerate(s):
cur ^= 1 << (hashmap.get(c,0)) >> 1
seen.setdefault(cur,i)
res = max(res,i - seen[cur])
return res
# Driver Code
if __name__ == "__main__":
s = "leetcodeisgreat"
result = Solution.findTheLongestSubstring(s)
print("length of longest Substring = ", result) |
f51ad176266865429620db22dae1090011bafe66 | codewithgauri/HacktoberFest | /python/Learning Files/31-Object Oriented Programming-Class method and Static Method.py | 2,069 | 4.09375 | 4 | # class Account(object):
# count = 0
# # Class Variable
# def __init__(self,cust_id,name,initial_balance=0):
# self.__customer_id=cust_id
# self.__name=name
# self.__balance=initial_balance
# Account.count+=1
# def get_balance(self):
# return self.__balance
# def get_id(self):
# return self.__customer_id
# def deposite(self,amount):
# self.__balance= self.__balance +amount
# def withdral(self,amount):
# if amount>self.__balance:
# print("balance insufficient")
# else:
# self.__balance-=amount
# customer1=Account("101","XYZ")
# customer2=Account("102","PQR")
# print(Account.count)
# print(customer1.count)
# Account.count+=5
# print(Account.count)
# customer1.count=100
# print(Account.count)
# print(customer1.count)
# print(customer2.count)
# if we want to modify a class variable we can only modify by class name
# but if we try to update an class variable using object python wont give error
#rather it will make a local variable for customer1
# print(Account.__dict__)
# print(customer1.__dict__)
# Class Method: If we want to work with all ur object variables
class Account(object):
count = 0
# Class Variable
@classmethod
def incr_count(cls):
cls.count+=1
@classmethod
def get_count(cls):
return cls.count
@staticmethod
def print_val():
print("staticmethod in account class")
def __init__(self,cust_id,name,initial_balance=0):
self.__customer_id=cust_id
self.__name=name
self.__balance=initial_balance
Account.incr_count()
def get_balance(self):
return self.__balance
def get_id(self):
return self.__customer_id
def deposite(self,amount):
self.__balance= self.__balance +amount
def withdral(self,amount):
if amount>self.__balance:
print("balance insufficient")
else:
self.__balance-=amount
customer1=Account("101","XYZ")
customer2=Account("102","PQR")
# If we want to access the class Methods we need to access using class names
print(Account.get_count())
Account.print_val()
# In staticmethod we wont be accessing the class variable nor the init variable |
453eb80f8c7d3c8353c7288f4beea8e3f7e0c1c5 | codewithgauri/HacktoberFest | /python/Cryptography/Prime Numbers/naive_primality_test.py | 576 | 4.21875 | 4 | ##Make sure to run with Python3 . Python2 will show issues
from math import sqrt
from math import floor
def is_prime(num):
#numbers smaller than 2 can not be primes
if num<=2: return False
#even numbers can not be primes
if num%2==0: return False
#we have already checked numbers < 3
#finding primes up to N we just have to check numbers up to sqrt(N)
#increment by 2 because we have already considered even numbers
for i in range(3,floor(sqrt(num)),2):
if num%i==0:
return False
return True
if __name__ == "__main__":
print(is_prime(99194853094755497))
|
51d1cb5a523fa102734d50143a3b9eab17faf2cb | codewithgauri/HacktoberFest | /python/Learning Files/13-Dictionary Data Types , Storing and Accessing the data in dictionary , Closer look at python data types.py.py | 1,580 | 4.3125 | 4 | # dict:
# 1. mutable
# 2.unordered= no indexing and slicing
# 3.key must be unque
# 4.keys should be immutable
# 5. the only allowed data type for key is int , string , tuple
# reason mutable data type is not allowed
# for example
# d={"emp_id":101 , [10,20,30]:100,[10,20]:200}
# if we add an element into [10,20] of 30 we will be creating a duplicate key
#d={"emp_id":101, "emp_name":"Uday Kiran", "email_id":"kiranu941@gmail.com"}
# print(d)
#d["email_id"]=102
#print(d)
#d["contact_no"]=123456789
#print(d)
# d["contact_no"]=1234567898
# it will update the value
# get
# setdeafult
# get retrive a data from the key specified
#print(d.get("emp_name"))
# if we specified a key which doesnt exist it wont through an error
# it will return None
# if u want the function to return a value when the key doesnt exist
# we can specify a second parameter
#print(d.get("email","Key doesnt exist"))
#setdeafult adds elemets if key doesnt exit else it will retrive data
#print(d.setdefault("age"))
# since age is not present it will add the age key and the assign a value of None
# if we want to assign a value to it i inilization its self
# print(d.setdefault("age",50))
#d["email_id"]="kiranu942@gmail.com"
#print(d)
#for x in d:
# print(x)
# defaultly it will iterate over the keys
#for x in d:
# print(x,d[x])
# if we also want the values
#dic={}
#for num in range(1,11):
# dic[num]=num*num
#print(dic)
#keys
#values
#items
# print(d.keys()) it is a list of all the keys
# print(d.values()) it is a list of all the values
# print(d.items()) it returns a tuple
# for t in d.items():
# print(t) |
875a5fb9806d97f9aa2fb602246ba5ddd7b03e4d | codewithgauri/HacktoberFest | /python/interpolation_search.py | 1,423 | 4.09375 | 4 | """
Implementation of Interpolation Search
"""
def interpolation_search(sample_input, lowest, highest, item):
"""
function to search the item in a give list of item
:param sample_input: list of number
:param lowest: the lowest element on our list
:param highest: the highest element on our list
:param item: the item element to search in our list
:return: true if found else fals
"""
distance = item - sample_input[lowest]
value_range = sample_input[highest] - sample_input[lowest]
ratio = distance / value_range
found = False
estimation = int(lowest + ratio * (highest - lowest))
if sample_input[estimation] == item:
found = True
elif item < sample_input[estimation]:
highest = estimation
else:
lowest = estimation
while lowest <= highest and not found:
mid = (lowest + highest) // 2
if sample_input[mid] == item:
found = True
elif sample_input[mid] < item:
lowest = mid + 1
else:
highest = mid - 1
return found
if __name__ == "__main__":
sample_input = [0, 4, 7, 9, 12, 14, 18, 25, 27, 36, 46, 50, 64, 79, 88]
item = int(input("Enter the item to search: "))
result = interpolation_search(sample_input, min(sample_input), len(sample_input) - 1, item)
if result:
print("Successfully found")
else:
print("Not Found")
|
31051c6c2ab1c3de61a06f386ec75978486f1e0f | codewithgauri/HacktoberFest | /python/Algorithms/Implementation/Modified Kaprekar Numbers.py | 412 | 3.65625 | 4 | import math
def kaprekarNumbers(p, q):
# c = list(str(int(math.pow(p,2))))
# print("Yes" if int(c[0])+int(c[1])==p else "NO")
for i in range(p,q+1):
c = list(str(int(math.pow(i,2))))
# print(c)
if len(c)>1:
if int(c[0]) + int(c[1]) == i:
print(i)
if __name__ == '__main__':
p = int(input())
q = int(input())
kaprekarNumbers(p, q)
|
e6f197c0cf5d3c02698c8f82449cb2a50c21d34a | codewithgauri/HacktoberFest | /python/Cryptography/Ceasar's Cipher/Ceasar_crack_Brute_force.py | 549 | 3.59375 | 4 | # -*- coding: utf-8 -*-
"""
Created on Sat May 11 11:57:28 2019
@author: jmat
"""
ALPHABET=" ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def ceasar_crack(cipher_text):
for key in range(len(ALPHABET)):
plain_text=""
for c in cipher_text:
index=ALPHABET.find(c)
index=(index-key)%len(ALPHABET)
plain_text=plain_text+ALPHABET[index]
print("With key " + str(key)+ ", the result is " + str(plain_text))
if __name__ =="__main__":
encrypted="VJKUBKUBCBOGUUCIG"
ceasar_crack(encrypted)
|
767c1781d90744b7945f59904a953844ffcba9fa | emaquino44/Graphs | /projects/graph/src/graph.py | 2,485 | 3.75 | 4 | """
Simple graph implementation compatible with BokehGraph class.
"""
class Graph:
"""Represent a graph as a dictionary of vertices mapping labels to edges."""
def __init__(self):
self.vertices = {}
self.groups = []
def add_vertex(self, vertex):
if vertex in self.vertices:
print("That vertex already exists")
return False
else:
self.vertices[vertex] = set()
def add_edge(self, startpoint, endpoint):
if startpoint not in self.vertices or endpoint not in self.vertices:
print("Invalid start or endpoint")
return False
else:
self.vertices[startpoint].add(endpoint)
self.vertices[endpoint].add(startpoint)
def breadth_first_for_each(self, start):
queue = [] # setup a que to check nodes
queue.append(start) # first/root node
visited = []
while len(queue) > 0:
current = queue.pop(0) # first in que pulled out
visited.append(current) #push into visited
for edge in self.vertices[current]: #look at connections,
if edge not in visited and edge not in queue: # if the connected node isn't visited,
queue.append(edge) # then enqueue it
return visited
def depth_first_for_each(self, start):
stack = [] # setup a que to check nodes
stack.append(start) # first/root node
visited = []
while len(stack) > 0:
current = stack.pop() # first in que pulled out
visited.append(current) #push into visited
for edge in self.vertices[current]: #look at connections,
if edge not in visited and edge not in stack: # if the connected node isn't visited,
stack.append(edge) # then enqueue it
# print(visited)
# vertices = {
# 0: {1,3},
# 1: {0,3,4},
# 2: {4},
# 3: {1,0,5},
# 4: {1,2}
# 5: {3}
# }
graph = Graph() # Instantiate your graph
graph.add_vertex('0')
graph.add_vertex('1')
graph.add_vertex('2')
graph.add_vertex('3')
graph.add_vertex('4')
graph.add_vertex('5')
graph.add_vertex('6')
graph.add_vertex('7')
graph.add_edge('0', '1')
graph.add_edge('0', '3')
graph.add_edge('4', '2')
graph.add_edge('1', '4')
graph.add_edge('5', '3')
# print('vertices',graph.vertices)
# print('values',graph.vertices.values())
graph.breadth_first_for_each('0')
graph.depth_first_for_each('0')
|
6ecd3e7ab69721bc7e0183560f7610176e0dd941 | madskinner/bibterm2dict | /bibterm2dict/myclasses/multi_column_listbox.py | 5,651 | 3.625 | 4 | # -*- coding: utf-8 -*-
'''
Here the TreeView widget is configured as a multi-column listbox
with adjustable column width and column-header-click sorting in Python3.
'''
from tkinter.ttk import Treeview, Scrollbar, Label, Frame
from tkinter.font import Font
from tkinter import messagebox
class MultiColumnListbox(object):
"""use a ttk.TreeView as a multicolumn ListBox"""
def __init__(self, parent=None, aheader=['a', 'b',], items=[['','',],], \
_column=0, _row=0, _columnspan=12, _rowspan=20):
self.parent = parent
self.tree = None
self._setup_widgets(aheader, items, _column, _row, _columnspan, _rowspan)
self._build_tree(aheader, items)
def _setup_widgets(self, aheader, items, _column, _row, _columnspan, _rowspan):
"""\click on header to sort by that column
to change width of column drag boundary
"""
# self.tree = Treeview(self.f3, selectmode="extended", height=8)
# self.tree.grid(column=0, row=0, \
# columnspan=12, rowspan=20, sticky='news', padx=5)
# ysb = Scrollbar(self.f3, orient='vertical', command=self.tree.yview)
# xsb = Scrollbar(self.f3, orient='horizontal', command=self.tree.xview)
# self.tree.configure(yscroll=ysb.set, xscroll=xsb.set)
# ysb.grid(row=0, column=11, rowspan=20, padx=5, sticky='nse')
# xsb.grid(row=20, column=0, columnspan=12, padx=5, sticky='ews')
# msg = Label(wraplength="4i", justify="left", anchor="n")
# msg.grid(column=0, row=0, padx=5, pady=5, sticky='news' text=s)
container = Frame(self.parent, width=1000, height=400)
# container.grid(column=_column, row=_row, columnspan=_columnspan, \
# rowspan=_rowspan, padx=5, pady=5, sticky='news' )
# create a treeview with dual scrollbars
self.tree = Treeview(container, selectmode="extended", height=8, show="headings")
# self.tree.grid(column=_column, row=_row, \
# columnspan=_columnspan, rowspan=_rowspan, \
# padx=5,pady=5,sticky='news')
vsb = Scrollbar(self.parent, orient='vertical', command=self.tree.yview)
hsb = Scrollbar(self.parent, orient='horizontal', command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
# vsb.grid(column=(_column + _columnspan-1), row=0, rowspan=_rowspan, sticky='nse') #, in_=container)
# hsb.grid(column=0, row=(_row + _rowspan), columnspan=_columnspan, sticky='ews') #, in_=container)
# container.grid_columnconfigure(0, weight=1)
# container.grid_rowconfigure(0, weight=1)
container.pack_propagate(0)
hsb.pack(side="bottom", fill="x")
vsb.pack(side="right", fill="x")
self.tree.pack(side="top", fill="both", expand=True)
def _build_tree(self, aheader, items):
self.tree['columns'] = aheader
self.tree["displaycolumns"] = aheader
self.tree.column("#0", minwidth=0, width=0, stretch=False)
# self.tree.heading('#0', text=LOCALIZED_TEXT[lang]['#0'])
widths = [Font().measure(col) for col in aheader]
for item in items:
for i in range(0,len(item)):
if widths[i] < Font().measure(item[i]):
widths[i] = Font().measure(item[i])
for i in range(0,len(aheader)):
self.tree.column(aheader[i], minwidth=100, width=widths[i], \
anchor='center', stretch=True)
# print(self.tree['columns'], col)
# self.tree.column(col, minwidth=100, width=400, stretch=False)
self.tree.heading(aheader[i], text=aheader[i])
## for col in aheader:
# for col in range( 0, len(aheader)):
## self.tree.heading(col, text=col.title(),
## command=lambda c=col: sortby(self.tree, c, 0))
# print('#{}'.format(col), aheader[col])
# self.tree.heading('#{}'.format(col), text=aheader[col])
# # adjust the column's width to the header string
## self.tree.column(col, width= Font().measure(aheader[col]))
for item in items:
self.tree.insert('', 'end', values=item)
# adjust column's width if necessary to fit each value
# for ix, val in enumerate(item):
# col_w = Font().measure(val)
# if self.tree.column(aheader[ix], width=None) < col_w:
# self.tree.column(aheader[ix], width=col_w)
def delete_selected_row(self):
"""deletes selected row, if no row selected gives warning message"""
pass
def add_row(self, _values):
"""adds a row at the bottom of table, but if the only row has zero
length contents it will be replaced"""
return(self.tree.insert('', index='end', open=True))
pass
#def getKey(item):
# return item[0]
#
#def sortby(tree, col, descending):
# """sort tree contents when a column header is clicked on"""
# # grab values to sort
# data = [(tree.set(child, col), child) \
# for child in tree.get_children('')]
# # if the data to be sorted is numeric change to float
# #data = change_numeric(data)
# # now sort the data in place
# data.sort(key=getKey, reverse=descending)
# #for ix, item in enumerate(data):
# for ix in range(0, len(data)):
# tree.move(data[ix][1], '', ix)
# # switch the heading so it will sort in the opposite direction
# tree.heading(col, command=lambda col=col: sortby(tree, col, \
# int(not descending)))
|
a232e1136f5e328525317a30c0d8a24dacb3d175 | Art83/python_bioinformatics | /patternMatch.py | 708 | 3.5625 | 4 | # Finding all occurences of a ppatern in a string
def PatternMatching(Pattern, Genome):
positions = [] # output variable
i = -1
while True:
i = Genome.find(Pattern, i+1)
if i != -1:
positions.append(i)
else:
break
return positions
#Alternative way
'''
def PatternMatching(Pattern,Genome):
positions = []
for i in range(len(Genome)-len(Pattern)+1):
if Pattern == Genome[i:i+len(Pattern)]:
positions.append(i)
return positions
'''
def main():
with open('Vibrio_cholerae.txt') as input:
Genome = input.read()
print(PatternMatching("CTTGATCAT",Genome))
if __name__ == "__main__":
main()
|
aad7e7286442ee3f7bb46bee869f0f89a80f1369 | bdngo/math-algs | /python/fizzbuzz.py | 368 | 3.78125 | 4 | def fizzbuzz(n: int) -> None:
"""Solves the FizzBuzz problem.
>>> fizzbuzz(10)
1
2
fizz
4
buzz
6
7
8
fizz
buzz
"""
for i in range(1, n + 1):
s = ''
if i % 3 == 0:
s += 'fizz'
if i % 5 == 0:
s += 'fuzz'
if s == '':
s += str(i)
print(s)
|
2725b85849ce224e97685919f148cc9807e60d83 | bdngo/math-algs | /python/checksum.py | 1,155 | 4.21875 | 4 | from typing import List
def digit_root(n: int, base: int=10) -> int:
"""Returns the digital root for an integer N."""
assert type(n) == 'int'
total = 0
while n:
total += n % base
n //= base
return digit_root(total) if total >= base else total
def int_to_list(n: int, base: int=10) -> List[int]:
"""Returns a list of the digits of N."""
digit_list = []
while n:
digit_list += [n % base]
n //= base
return list(reversed(digit_list))
def check_sum(n: int) -> bool:
"""Checks if N is a valid bank card."""
digits, doubled_digits = int_to_list(n), []
for i in range(len(digits)):
doubled_digits.append(
digit_root(digits[i] * 2) if i % 2 == 0 else digits[i])
return sum(doubled_digits) % 10 == 0
def vat_check(n: int) -> bool:
"""Checks if N satisfies the old HMRC VAT number check."""
factor = 8
digits, last_two = int_to_list(n)[:-2], int_to_list(n)[-2:]
for i in range(len(digits)):
digits[i] *= factor
factor -= 1
check_digit = sum(digits) + (last_two[0]*10 + last_two[1]) + 55
return check_digit % 97 == 0
|
67fa4b28d9bc8d0df0f2f784d94a0ba6eb9dd889 | anatoliis/ProjectEuler | /problems/Problem50.py | 387 | 3.6875 | 4 | #!/usr/bin/env python2
from primes import primes_list, prime
def find():
primes = primes_list(10**4)
for length in xrange(1000, 0, -1):
first = 0
while True:
summ = sum(primes[first:first+length])
# print summ
if summ >= 10**6: break
if prime(summ): return summ
first += 1
print 'result: %s' % find()
|
154ee4f1f1ba4c3c2575c65d20419fe4af2b50f5 | jingruhou/Python-2 | /python100/day02/variable3.py | 727 | 3.96875 | 4 | #coding=utf-8
#使用type()检查变量的类型
a = 100
b = 12.345
c = 1 + 5j
d = 'hello, world'
e = True
print(type(a))
print(type(b))
print(type(c))
print(type(d))
print(type(e))
# 在对变量类型进行转换时可以使用Python的内置函数(准确的说下面列出的并不是真正意义上的函数,而是后面我们要讲到的创建对象的构造方法)
# int():将一个数值或字符串转换成整数,可以指定进制
# float():将一个字符串转换成浮点数
# str():将指定的对象转换成字符串形式,可以指定编码
# chr():将整数转换成该编码对应的字符串(一个字符)
# ord():将字符串(一个字符)转换成对应的编码(整数) |
e59e8ab7f5309cb1f0b20db374f2e8850619c765 | JasonTGuerrero/UCLA-PIC-16A | /hw3.py | 2,686 | 3.828125 | 4 | import copy
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __str__(self):
return str(self.data)
def __repr__(self):
return repr(self.data)
# class LinkedList:
# def __init__(self, data):
# self.node = Node(data)
# self.first = self.node
# self.last = self.node
# self.n = 1
# def append(self, data):
# newNode = Node(data)
# self.last.next = newNode
# self.last = newNode
# self.n += 1
# def __iter__(self):
# self.node = self.first
# return self
# def __next__(self):
# if self.node == None:
# raise StopIteration
# node = self.node
# self.node = self.node.next
# return node.data
class LinkedList:
def __init__(self, data):
self.node = Node(data)
self.first = self.node
self.last = self.node
self.n = 1
self.__values = [self.node]
def append(self, data):
newNode = Node(data)
self.last.next = newNode
self.last = newNode
self.n += 1
self.__values.append(newNode)
def __str__(self):
result = "["
for node in self.__values:
result = result + str(node) + "->"
result = result + "]"
return result
def __repr__(self):
result = "["
for node in self.__values:
result = result + str(node) + "->"
result = result + "]"
return "'" + result + "'"
def __add__(self, value):
first_element = self.__values[0]
copy = LinkedList(first_element)
for node in self.__values[1:]:
copy.append(node)
copy.append(value)
return copy
def __len__(self):
return self.n
def __iter__(self):
return self.generator()
def generator(self):
self.node = self.first
for _ in range(0, self.n):
yield self.node.data
self.node = self.node.next
def __getitem__(self, index):
if index < 0:
raise IndexError("list index out of range")
elif index >= self.n:
raise IndexError("list index out of range")
return self.__values[index]
def __setitem__(self, index, value):
if index < 0:
raise IndexError("list index out of range")
elif index >= self.n:
raise IndexError("list index out of range")
self.__values[index] = value
# a = LinkedList(0)
# a.append(1)
# a.append(2)
# a + 1
# print(str(a))
# a = a + 1
# print(str(a))
|
e64687b9baaa75ae481ea65ed9e2cd26a203e41a | kimoror/python-practice | /practice1_extra/main.py | 2,722 | 4.25 | 4 | # Ответы на теоретические вопросы находятся в файле AnswersOnQuestions.md
"""
Попробуйте составить код для решения следующих задач. Из арифметических операций можно использовать только явно
указанные и в указанном количестве. Входным аргументом является переменная x.
"""
def no_multiply(x):
if x == 12:
return 3 + 3 + 3 + 3
elif x == 16:
return 4 + 4 + 4 + 4
elif x == 15:
return 6 + 6 + 6 - 2 - 1
elif x == 29:
return 5 + 5 + 5 + 5 + 5 + 5 - 1
print(f'Умножение 1 на {no_multiply(29)}: {no_multiply(29)}')
"""
# Некто попытался реализовать "наивную" функцию умножения с помощью сложений. К сожалению, в коде много ошибок.
# Сможете ли вы их исправить?
"""
def naive_mul(x, y):
r = 0
for i in range(y):
r += x
return r
def naive_mul_test():
for x in range(101):
for y in range(101):
assert naive_mul(x, y) == x * y
print("naive_mul_test is passed")
naive_mul_test()
"""
# Реализуйте функцию fast_mul в соответствии с алгоритмом двоичного умножения в столбик.
# Добавьте автоматическое тестирование,как в случае с naive_mul.
"""
def fast_mul(x, y):
# if x == 1:
# return y
# if y == 1:
# return x
res = 0
while x >= 1:
if x == 0 or y == 0:
return 0
elif x % 2 == 0:
y *= 2
x //= 2
elif x % 2 != 0:
res += y
y *= 2
x //= 2
return res
def fast_mul_test():
for x in range(101):
for y in range(101):
assert fast_mul(x, y) == x * y
print("fast_mull_test is passed")
fast_mul_test()
# Реализуйте аналогичную функцию для возведения в степень
def fast_pow(base, degree, mul=1):
if degree == 0 and base == 0:
return 1
# elif degree == 1:
# return base
if degree == 0:
return mul
elif base == 0:
return 0
elif base == 1:
return 1
if degree % 2 != 0:
mul *= base
return fast_pow(base * base, degree // 2, mul)
def fast_pow_test():
for x in range(101):
for y in range(101):
assert fast_pow(x, y) == x ** y
print("fast_mull_test is passed")
fast_pow_test()
|
b7b944e5d5dd1cd1b41952c55bc13c348f905024 | Oyekunle-Mark/Graphs | /projects/ancestor/ancestor.py | 1,360 | 3.671875 | 4 | from graph import Graph
from util import Stack
def earliest_ancestor(ancestors, starting_node):
# FIRST REPRESENT THE INPUT ANCESTORS AS A GRAPH
# create a graph instance
graph = Graph()
# loop through ancestors and add every number as a vertex
for parent, child in ancestors:
# add the parent as a vertex
graph.add_vertex(parent)
# add the child as a vertex as well
graph.add_vertex(child)
# # loop through ancestors and build the connections
for parent, child in ancestors:
# connect the parent to the child
# the connection is reversed because dft transverses downward
graph.add_edge(child, parent)
# if starting node has no child
if not graph.vertices[starting_node]:
# return -1
return -1
# create a stack to hold the vertices
s = Stack()
# add the starting_node to the stack
s.push(starting_node)
# set earliest_anc to -1
earliest_anc = -1
# loop while stack is not empty
while s.size() > 0:
# pop the stack
vertex = s.pop()
# set the earliest_anc to vertex
earliest_anc = vertex
# add all its connected vertices to the queue
# sort the vertices maintain order
for v in sorted(graph.vertices[vertex]):
s.push(v)
return earliest_anc
|
613b5d072a397ca353476abcac2d66495e96b4d9 | CrazyPants9527/py-tasks | /guessnumber/GuessNum.py | 2,273 | 3.671875 | 4 | import random
# 返回一个元素0-9,随机且不重复,长度为4的list
def random_number():
numbers = []
temp_list = []
while True:
temp_list.append(random.randint(0,9))
numbers = list(set(temp_list))
if len(numbers) == 4:
return numbers
# 不改变原来list元素顺序情况下去除重复元素
def unlike(before_list):
if isinstance(before_list,list):
list2 = list((set(before_list)))
list2.sort(key = before_list.index)
return list2
else:
return "数据类型不是list"
# 输入一串字符,如果该字符是4个不一样的正整数,如0123,则返回list = [0, 1, 2, 3]
# 否则一直返回该函数
def myinput():
print("输入4个不同的数字")
nums = input(">")
# 保证输入为整数
try:
int_nums = int(nums)
except ValueError as e:
print("输入的不是整数!")
return myinput()
# 将输入的数迭代成元素为int型,可能有重复元素的temp_list
temp_num = int(nums)
if temp_num < 10000 and temp_num > 99:
a = temp_num//1000
b = temp_num//100-a*10
c = temp_num//10-a*100-b*10
d = temp_num % 10
temp_list = [a,b,c,d]
else:
print("你输入的数字不是4个数!")
return myinput()
list2 = unlike(temp_list) # 排除相同元素后的list2
# 排除相同元素后的list2是否为4位数
if len(list2) == 4:
return list2
else:
print("不一样的数字得有4个才能玩")
return myinput()
# 和猜的数字对比
def comparison(list1,list2):
a = 0 # 多少个数字位置,大小相同
b = 0 # 多少个数字大小相同,位置不同
if list1 == list2:
return "猜对了!"
else:
for i in range(4):
for j in range(4):
if list1[i] == list2[j] and i == j:
a = a + 1
elif list1[i] == list2[j] and i != j:
b = b + 1
return a, "A", b, "B"
# 主方法
list1 = random_number()
while True:
list2 = myinput()
print("你的正确率是-->:",comparison(list1,list2))
if comparison(list1,list2) == "猜对了!":
break
|
3e0c85bb6321ca933c71fb484ef0a520e3ad2f87 | chinski99/minilogia | /2008/etap 2/gwiazdki2007.py | 582 | 3.59375 | 4 | from turtle import *
from math import sqrt
def gwiazdki():
l=50
shape(l)
for _ in range(5):
x=pos()
y=heading()
fd(l)
lt(18)
fd(l)
lt(18)
shape(l)
setpos(x)
setheading(y)
rt(72)
def triangle(a):
fd(a)
lt(135)
fd(a*sqrt(2))
lt(135)
fd(a)
lt(90)
def shape(l): # 18 54
lt(45+180-9)
pendown()
fillcolor("green")
begin_fill()
for _ in range(5):
triangle(l)
rt(72)
end_fill()
penup()
lt(45+180)
speed(0)
gwiazdki()
done() |
3659c9dd6a3993115d9b610d6d6b9c5e0b93c6fa | chinski99/minilogia | /2014/etap 3/plecionka.py | 735 | 3.515625 | 4 |
from turtle import *
def simple(a,col):
pendown()
fillcolor(col)
begin_fill()
lt(30)
fd(3*a)
lt(120)
fd(a)
lt(60)
fd(2*a)
rt(60)
fd(2*a)
lt(60)
fd(a)
lt(120)
fd(3*a)
end_fill()
lt(30)
penup()
def compo(a):
c=["brown","orange","yellow"]
for i in range(3):
simple(a,c[i])
rt(120)
def pile(a,n):
for _ in range(n):
compo(a)
lt(90)
bk(4*a)
rt(90)
def plecionka(n):
penup()
bk(200)
lt(90)
fd(200)
rt(90)
a=480/(4*n+1)
for i in reversed(range(n)):
pile(a,i+1)
lt(90)
fd(4*a*(i+1))
rt(120)
fd(4*a)
lt(30)
plecionka(6)
done()
|
f183bcf992758994b4e6f873fbde493e0b5529c5 | chinski99/minilogia | /2012/etap 3/dywan.py | 528 | 3.53125 | 4 | from turtle import *
def element(level, k):
fillcolor("red")
begin_fill()
for _ in range(4):
fd(k)
lt(90)
end_fill()
if level>1:
for _ in range(3):
fd(k)
rt(90)
element(level - 1, k/2)
lt(180)
fd(k)
lt(90)
def dywan(n):
penup()
bk(80)
rt(90)
fd(80)
lt(90)
element(0, 160)
for _ in range(4):
fd(160)
rt(90)
element(n, 80)
lt(180)
speed(0)
dywan(3)
done()
|
5cde8d9cd2ad809629b64393c33d2935f8db6c2c | chinski99/minilogia | /2015/etap 2/reg.py | 1,142 | 3.59375 | 4 | __author__ = 'apple'
from turtle import *
from random import randint
K = 20
def reg(szer, n): # szerokość regału, liczba półek
start_position(szer, n)
regal(szer, n)
fd(K)
for _ in range(n):
polka(szer // K - 2)
up(7)
def polka(k):
rect(k * K, 6*K, "white")
pendown()
for _ in range(k):
r = randint(1, 3)
if r == 1:
rect(K, 3 * K, "red")
elif r == 2:
rect(K, 4 * K, "green")
elif r == 3:
rect(K, 5 * K, "darkblue")
fd(K)
penup()
bk(k * K)
def rect(a, b, col):
fillcolor(col)
begin_fill()
for _ in range(2):
fd(a)
lt(90)
fd(b)
lt(90)
end_fill()
def border(szer):
rect(K, K, "sienna")
fd(szer - K)
rect(K, K, "sienna")
bk(szer - K)
def up(k):
lt(90)
fd(k * K)
rt(90)
def start_position(szer, n):
penup()
bk(szer / 2)
up(-(n * 7 + 3) / 2)
def regal(szer, n):
border(szer)
up(1)
rect(szer, n * 7 * K + K, "sienna")
up(n * 7 + 1)
border(szer)
up(-n * 7)
#speed(0)
reg(300, 4)
done()
|
65069818b7113570eaa349f69cb5336c6005745a | carolinesalves/projeto-simples | /Exercicio_Python/Exerc11.py | 385 | 4.09375 | 4 | N1 = int(input("Digite o primeiro número inteiro: "))
N2 = int(input("Digite o segundo número inteiro: "))
R = float(input("Digite um número real: "))
produto = (2 * N1) * (N2/2)
print("O produto entre os números é: ", round(produto,1))
soma = (3*N1) + R
print("A soma dos números é: ", round(soma,1))
potencia = R**3
print("A potência do número é: ", round(potencia,1))
|
c9454251b1907aa3ea2d92a02dc891fe862e7e63 | Seth-R/WEB_STANFORD | /CURSO_08_FLASK/app3.py | 780 | 3.734375 | 4 | #CODIGO PARA MULTIPLES ROUTES EN FLASK PERSONALIZADAS SEGUN RUTA INGRESADA
#Nota: flask NO viene en librerias por defecto, debemos instalarla adicionalmente (pip install flask)
from flask import Flask
#Creamos web application llamada "app" (basada en Flask), con nombre del archivo actual (__name__)
app = Flask(__name__)
#Indicamos funcion asociada a la ruta por defecto "/" (osea si NO tiene nada de ruta adicional)
@app.route("/")
def index():
return( "PRIMERA PAGINA WEB CON FLASK!!!!!")
#Creamos ahora una ruta PERSONALIZADA donde se puede ingresar LO QUE SEA!!!!
#Ahora, el servidor nos muestra un saludo, seguno lo ingresado respectivamente
@app.route("/<string:nombre>")
def saludar(nombre):
nombre = nombre.upper()
return("BUENOS DIAS, " + nombre + " <3") |
0f812ca5e58d16447763e00bb60c6b34e025d5c1 | jancoufal/pyshards | /overloaded-operators.py | 1,085 | 3.734375 | 4 | class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return '[%.2f;%.2f]' % (self.x, self.y)
def __neg__(self):
return Point(-self.x, -self.y)
def __add__(self, point):
return Point(self.x + point.x, self.y + point.y)
def __sub__(self, point):
return Point(self.x - point.x, self.y - point.y)
def __mul__(self, factor):
return Point(self.x * factor, self.y * factor)
def __rmul__(self, factor):
return Point(self.x * factor, self.y * factor)
def __call__(self, *args, **kwargs):
print('oh, wow. calling a point...', *args, **kwargs)
def main():
p1 = Point(1, 1)
p2 = Point(2, 2)
p3 = p1 + p2 # add
print(p1, '+', p2, '=', p3)
p3 = p1 - p2 # sub
print(p1, '-', p2, '=', p3)
p3 = p2 - p1 # sub
print(p2, '-', p1, '=', p3)
p3 += p1 # add
print('(iadd) +', p1, '=', p3)
p3 -= p1 # add
print('(iadd) -', p1, '=', p3)
p3 = -p1 # neg
print('(unary)-', p1, '=', p3)
p3 = p2 * 3 # mul
print(p2, '* 3 =', p3)
p3 = 3 * p2 # rmul
print('3 *', p2, '=', p3)
if __name__ == '__main__':
main()
|
a215bb43b5e7e913c09874141bc6381e7048a69e | prihoda/bgc-pipeline | /bgc_detection/evaluation/lco_split.py | 10,110 | 3.5625 | 4 | #!/usr/bin/env python
# David Prihoda
# Create Leave-Class-Out splits from set of negative and positive samples
# Produces a folder with (train, test) files for each class.
import merge_split_samples
import argparse
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import KFold, ShuffleSplit
NEG_CLASS_NAME = '_neg_'
class LeaveClassOutSplitter:
"""
Splitter that splits Series of samples in Leave-Class-Out fashion.
"""
def __init__(self, unique_classes, neg_class: str, neg_test_size, pos_test_count=None, random_state=0):
"""
:param unique_classes: Unique positive classes that will be present when splitting
:param neg_class: Class label to handle as negative class
:param neg_test_size: Fraction of negative samples to use for testing. The rest is used for training.
:param pos_test_count: Upsample positive samples to given number. If pos_test_count is lower than the number
of samples in given class, sampling without replacement is used. If it is higher, we first select all of the samples of given class
and sample the remainder with replacement.
:param random_state: Random state to use for sampling.
"""
self.unique_classes = unique_classes
self.neg_class = neg_class
if neg_test_size < 0 or neg_test_size > 1:
raise AttributeError('Negative test size has to be a fraction between 0.0 and 1.0')
self.neg_splitter = ShuffleSplit(test_size=neg_test_size, random_state=random_state)
self.pos_test_count = pos_test_count
def split(self, samples, classes):
"""
Split Series of samples in Leave-Class-Out fashion.
Returns a generator that produces a (train, test) tuple for each positive class.
The train split contains a random subset of negative samples (marked as self.neg_class) and all positive samples except given class.
The test split contains the remaining samples from the negative set and all samples from given class.
Positive samples can be upsampled by using pos_test_count. If pos_test_count is lower than the number
of samples in given class, sampling without replacement is used. If it is higher, we first select all of the samples of given class
and sample the remainder with replacement.
:param samples: Pandas Series of DataFrames (samples)
:param classes: Series or list of classes for each sample
:return: Generator of (train, test) split indexes, will generate one tuple for each positive class
"""
if len(samples) != len(classes):
raise AttributeError("Samples and classes have to be the same length")
neg_idx = np.where(classes == self.neg_class)[0]
if not len(neg_idx):
raise AttributeError("No negative samples. Add samples with class = {}.".format(self.neg_class))
neg_samples = samples[neg_idx]
neg_train_idx, neg_test_idx = next(self.neg_splitter.split(neg_samples))
neg_train_idx = neg_idx[neg_train_idx]
neg_test_idx = neg_idx[neg_test_idx]
for klass in self.unique_classes:
# Train on all other classes except negative
pos_train_idx = np.where((classes != klass) & (classes != self.neg_class))[0]
# Test on given class
pos_test_idx = np.where(classes == klass)[0]
num_class_samples = len(pos_test_idx)
if not num_class_samples:
print('No samples of class {} found'.format(klass))
if self.pos_test_count:
if num_class_samples > self.pos_test_count:
# we have more samples than we are sampling, choose without replacement
pos_test_idx = np.random.choice(pos_test_idx, self.pos_test_count, replace=False)
else:
# we have less samples than we are sampling, use all indexes + a sampled remainder
num_remaining = self.pos_test_count - num_class_samples
remaining_choice = np.random.choice(pos_test_idx, num_remaining, replace=True)
pos_test_idx = np.concatenate([pos_test_idx, remaining_choice])
print('Train: {} pos, {} neg. Test: {} pos, {} neg'.format(len(pos_train_idx), len(neg_train_idx), len(pos_test_idx), len(neg_test_idx)))
# Return unions of negative and positive splits
yield np.concatenate([pos_train_idx, neg_train_idx]), np.concatenate([pos_test_idx, neg_test_idx])
def filter_lco_samples(pos_samples, pos_classes, neg_samples):
# Remove hybrids, Other and unknown
selected_classes = sorted(list(set([c for c in set(pos_classes) if ';' not in c and 'Other' not in c and '?' not in c and 'Nucleoside' not in c])))
print('Selected classes:', selected_classes)
# Get positive samples of selected classes
sel_pos_samples = [s for s, c in zip(pos_samples, pos_classes) if c in selected_classes]
sel_pos_classes = [c for c in pos_classes if c in selected_classes]
print('{} non-hybrid of {} total BGCs remained'.format(len(sel_pos_samples), len(pos_samples)))
# Merge into one list
lco_samples = sel_pos_samples + neg_samples
lco_classes = np.concatenate([sel_pos_classes, np.array([NEG_CLASS_NAME] * len(neg_samples))])
print('{} total samples'.format(len(lco_samples)))
return selected_classes, lco_samples, lco_classes
def lco_samples(selected_classes, val_samples, val_classes, result_path, random_seeds, pos_test_count=300):
meta = []
for random_seed in random_seeds:
print('Random seed', random_seed)
lco_splitter = LeaveClassOutSplitter(
unique_classes=selected_classes,
neg_class=NEG_CLASS_NAME,
neg_test_size=0.33,
pos_test_count=pos_test_count,
random_state=random_seed
)
labels = ['{}\n({} BGCs sampled {}x)'.format(class_name, sum(val_classes == class_name), pos_test_count) for class_name in selected_classes]
np.random.seed(random_seed)
merged_splits = merge_split_samples.merged_split(
val_samples,
lco_splitter,
shuffle_train=False,
shuffle_test=True,
split_params={'classes': val_classes}
)
merged_splits = list(merged_splits)
for split_domains, class_name, label in zip(merged_splits, selected_classes, labels):
split_name = '{}.seed{}'.format(class_name, random_seed)
train_domains, test_domains = split_domains
train_csv_path = os.path.join(result_path, split_name+'.train.csv')
train_domains.to_csv(train_csv_path, index=False)
print('Saved LCO {} train sequence to: {}'.format(split_name, train_csv_path))
test_csv_path = os.path.join(result_path, split_name+'.test.csv')
test_domains.to_csv(test_csv_path, index=False)
print('Saved LCO {} test sequence to: {}'.format(split_name, test_csv_path))
meta.append({
'label': label,
'name': split_name,
'group': class_name
})
meta_csv_path = os.path.join(result_path, 'splits.csv')
meta = pd.DataFrame(meta)
meta.to_csv(meta_csv_path, index=False)
print('Saved splits meta file to: {}'.format(meta_csv_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--maxevalue", dest="maxevalue", required=True, type=float,
help="Maximum domain independent e-value.", metavar="FLOAT")
parser.add_argument("-p", "--positive", dest="positive", required=True,
help="Path to positive samples file.", metavar="FILE")
parser.add_argument("-c", "--classes", dest="classes", required=True,
help="Path to csv file containing classes of positive samples.", metavar="FILE")
parser.add_argument("--classes-column", dest="classes_column", default="classes",
help="Class column in classes file.", metavar="STRING")
parser.add_argument("--pos-test-count", dest="pos_test_count", required=False, default=300, type=int,
help="Number of positive test samples (use sampling with replacement).", metavar="INT")
parser.add_argument("--random-seed", dest="random_seed", required=False, default=[], type=int, action='append',
help="Random seed used to shuffle the samples.", metavar="INT")
parser.add_argument("-n", "--negative", dest="negative", required=True,
help="Path to negative samples file.", metavar="FILE")
parser.add_argument("-o", "--output", dest="output", required=True,
help="Output samples folder path.", metavar="FILE")
options = parser.parse_args()
pos_domains = pd.read_csv(options.positive)
pos_domains = pos_domains[pos_domains['evalue'] < options.maxevalue]
pos_samples = [s for i, s in pos_domains.groupby('contig_id')]
pos_ids = np.array([sample['contig_id'].iloc[0] for sample in pos_samples])
neg_domains = pd.read_csv(options.negative)
neg_domains = neg_domains[neg_domains['evalue'] < options.maxevalue]
neg_samples = [s for i, s in neg_domains.groupby('contig_id')]
properties = pd.read_csv(options.classes).set_index('contig_id')
pos_classes = properties[options.classes_column][pos_ids]
selected_classes, val_samples, val_classes = filter_lco_samples(
pos_samples=pos_samples,
pos_classes=pos_classes,
neg_samples=neg_samples
)
print('Output will be saved to {}/'.format(os.path.abspath(options.output)))
os.makedirs(options.output)
if not options.random_seed:
options.random_seed = [0]
lco_samples(
selected_classes=selected_classes,
val_samples=val_samples,
val_classes=val_classes,
result_path=options.output,
pos_test_count=options.pos_test_count,
random_seeds=options.random_seed
)
|
0dab75e2db748b4934d10038634d4d7f438ad02b | prihoda/bgc-pipeline | /bgc_detection/evaluation/merge_split_samples.py | 2,431 | 3.640625 | 4 | #!/usr/bin/env python
# David Prihoda
# Functions for shuffling and merging sequence samples (DataFrames) into a long sequence
# Used for fake genome generation from positive and negative BGC samples
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
def merge_samples(samples_series, idx, shuffle=True):
"""
Merge a Series of DataFrames into a single DataFrame, filtered by a Series index. DataFrames can be shuffled before merging.
Used to generate an artificial genome (long DataFrame) from a series of samples (short DataFrames)
:param samples_series: Series of DataFrames
:param idx: Array of indexes used to select DataFrames for merging
:param shuffle: Whether to shuffle the DataFrames
:return: Subset of given series selected by idx, shuffled if specified and merged to a single DataFrame
"""
if shuffle:
np.random.shuffle(idx)
return pd.concat(list(samples_series[idx]))
def merged_split(samples_list, splitter, shuffle_train=True, shuffle_test=True, split_params=None):
"""
Create generator of random train and test splits, where each train and test split
is a single DataFrame created from shuffled and merged samples using merge_samples function.
Will generate given number of (train, test) splits based on splitter argument.
:param samples_list: list of DataFrames to repeatedly split and merge
:param splitter: Number of KFold splits or Splitter with split(samples) function that will be used
:param shuffle_train: Whether to shuffle the samples in the train split before merging
:param shuffle_test: Whether to shuffle the samples in the test split before merging
:param split_params: Additional arguments to pass to the splitter split function
:return: Generator of (train, test) splits for given list of samples, where each train and test split
is a single DataFrame created from shuffled and merged samples using merge_samples function.
"""
if split_params is None:
split_params = {}
if isinstance(splitter, int):
splitter = KFold(n_splits=splitter, shuffle=True)
indexable_X = pd.Series(samples_list)
for trainidx, testidx in splitter.split(indexable_X, **split_params):
train_X = merge_samples(indexable_X, trainidx, shuffle=shuffle_train)
test_X = merge_samples(indexable_X, testidx, shuffle=shuffle_test)
yield train_X, test_X
|
d29ead2b058d80104f5a2912cf49efa0b572fd1b | OniDaito/PythonCourse | /bomberman/part3/level.py | 666 | 3.9375 | 4 | import pyglet
from square import *
class Level():
''' A class that represents a typical level '''
grid = [] # This defines a list
block_size = 30
grid_width = 20
grid_height = 20
def __init__(self):
# First thing we do is create a basic looking level of blanks
''' FOR YOU: For each row, create an array and then, for each column,
add a new Blank Square to that array '''
pass
def draw(self):
''' Draw out the level by looping through each grid square and calling
the draw function'''
for row in self.grid:
for block in row:
block.draw() |
56e828677722595a316d4711f683aba86f02b0bf | sianmck/price_comparison | /02_no_numbers.py | 244 | 3.765625 | 4 | name=input("Product Name:")
error="This cannot have numbers"
has_errors=""
for letter in name:
if letter.isdigit()==True:
print(error)
has_errors="yes"
break
if has_errors!="yes":
print("Continue")
|
04ea9ee089582a05a37c7bf5aa39312e17970016 | paprockiw/lookup_tools | /revised_lookup.py | 5,646 | 3.828125 | 4 | import csv
class LookupBase(object):
def __init__(self):
self.key_fields = []
self.mapped = {}
def _comparable(self, other):
'''Helper function for building comparision methods in this class. It
checks to see if the object that is to be compared to the base object
is an instance of the same class, and has an equal number of fields in
its keys for comparison.'''
assert isinstance(other, self.__class__), \
'Argument must be instance of %s.' % self.__class__
assert len(self.key_fields) == len(other.key_fields), \
'Other object must have same number of key_fields'
def _assign_attr(self, name, d):
'''Helper function for assigning new attribute to object.'''
r = Result(d)
setattr(self, name, r)
def match(self, other, attr_name):
'''Takes another lookup object and an attribute name as input. Then it
takes each item stored in the 'mapped' attribute on this object and
checks to see if that item's key is present in the 'mapped' attribute
of the other object. If the key is present in the other object, the
record from the other object is added to '''
self._comparable(other)
d = {}
for key in self.mapped:
if key in other.mapped:
d.update({key:other.mapped[key]})
self._assign_attr(attr_name, d)
def merge(self, other, attr_name, *merge_fields):
'''Works like a merge, but takes an arbitrary number of args that are
the names of fields on the object being passed to this function. When
a match is found between this and the other object, the data in the
fields on the other object are merged into the matching rows on this
object. The results are stored on an attribute you specify, as with
the match method.'''
self._comparable(other)
d = {}
for key in self.mapped:
if key in other.mapped:
# get values to map to merge data fields
field_values = [other.mapped[key][field] for field in merge_fields]
# make dict to merge
new_items = dict(zip(merge_fields, field_values))
records = self.mapped[key].copy()
records.update(new_items)
d.update({key:records})
self._assign_attr(attr_name, d)
def diff(self, other, attr_name):
'''The opposite of the match method, this takes another lookup object
and and an attribute name. It goes through each item in this object,
and looks for each item's key in the other object. If there is no
match, the item on this object is saved to the results, which are
stored on the attribute specified as an argument. '''
self._comparable(other)
d = {}
for key in self.mapped:
if key not in other.mapped:
d.update({key:self.mapped[key]})
self._assign_attr(attr_name, d)
def write(self, title):
''' Takes a title as input and writes the values stored in the
'mapped' attribute to a new csv file with the title specified as an
argument.'''
fieldnames = self.key_fields
with open(title, 'wb') as output:
writer = csv.DictWriter(output, fieldnames)
writer.writeheader()
for row in self.mapped.values():
writer.writerow(row)
class Result(LookupBase):
'''Object for storing results of comparison methods such as match or diff.
This can be used for further comparisons since it inherits from the base
object. '''
def __init__(self, d):
self.mapped = d
self.key_fields = d[d.keys()[0]].keys()
class LookupMap(LookupBase):
'''Object used for taking a csv file, and mapping its contents based on
the column names from the csv file. Each row of the csv file is entered
into a dictionary that has a tuple of the data in the specified rows as a
key, and the row itself (as a dictionary) as the associated value. This is
used for comparing data between this and other similar objects. '''
def __init__(self, filename, *args):
#super(LookupMap, self)
self.mapped = {}
self.filename = filename
self.key_fields = args
self.loss = []
self._get_contents()
def _get_contents(self):
'''Loads the contents of the file into the lookup map structure.'''
with open(self.filename, 'rb') as input:
reader = csv.DictReader(input)
# handle empty fields as a tuple of fieldnames from csv object
if len(self.key_fields) == 0:
self.key_fields = tuple(reader.fieldnames)
for record in reader:
key = tuple([record[k] for k in self.key_fields])
if key in self.mapped:
# Adds items already in dict to 'loss' attr, so that items
# in dict are not overwritten and repeat data are preserved
self.loss.append(record)
else: self.mapped.update({key:record})
@property
def loss_count(self):
return len(self.loss)
if __name__ == '__main__':
a = LookupMap('test1.csv', 'animal', 'number')
b = LookupMap('test2.csv', 'creature', 'num')
# a.match(b, 'b_match')
# for row in a.b_match.mapped:
# print row, a.b_match.mapped[row]
#
# a.b_match.write('bmatch.csv')
a.merge(b, 'b_merge', 'chemical', 'num')
a.b_merge.write('merge_test.csv')
|
66dd5532e8b4d3d17b240455b0d69ab11d5f1e3d | miea/bravo | /bravo/utilities/automatic.py | 1,013 | 3.609375 | 4 | from itertools import product
def naive_scan(automaton, chunk):
"""
Utility function which can be used to implement a naive, slow, but
thorough chunk scan for automatons.
This method is designed to be directly useable on automaton classes to
provide the `scan()` interface.
"""
for i, block in enumerate(chunk.blocks):
if block in automaton.blocks:
coords = i >> 11, (i >> 7) & 0xf, i & 0x7f
automaton.feed(coords)
def column_scan(automaton, chunk):
"""
Utility function which provides a chunk scanner which only examines the
tallest blocks in the chunk. This can be useful for automatons which only
care about sunlit or elevated areas.
This method can be used directly in automaton classes to provide `scan()`.
"""
for x, z in product(range(16), repeat=2):
y = chunk.height_at(x, z)
if chunk.get_block((x, y, z)) in automaton.blocks:
automaton.feed((x + chunk.x * 16, y, z + chunk.z * 16))
|
e4b5deaf617944350733bb7c11a4da31eb2843a5 | sandeepjrs/python-prac | /__lt__.py | 501 | 3.90625 | 4 | class Saving_account():
'''this is the saving account PIN and balance'''
def __init__(self, name, pin, balance = 0.0):
self._name= name;
self._pin = pin
self._balance = balance
def __lt__(self, other):
return self._name < other._name
def __eq__(self, other):
return self._pin == other._pin
sa1 = Saving_account("sandeep",1234,26.3)
sa2 = Saving_account("sandeep",12234,26.3)
# sa2 = Saving_account("Sandeep",5689,59.6)
print sa2 == sa1
|
0c28b3bf79060065060af91d7de1316b80c39ce8 | eksalkeld/data-scientist-exercise02 | /analytics/modeling_fns.py | 4,486 | 3.65625 | 4 | # -*- coding: utf-8 -*-
"""
Functions to train a logistic regression model
Created on Sun Sep 6 15:57:19 2020
@author: eksalkeld
"""
from constants import *
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, roc_curve
import numpy as np
def pd_to_np(df,model_cols):
"""
Pandas dataframe to numpy array
df: data to transform
model_cols: list of predictive columns
return: X array with predictive columns, y array with target column
"""
X=np.array(df[model_cols])
y=np.array(df['target'])
return X,y
def ttv_split(X,y):
"""
Split the data into train, test, and val sets
X: matrix like data with the predictor vars
y: array like data with the known outcome
return: matrices of the predictive data for train test and val, arrays of data with the target variable for train test and val
"""
#Adjust the ratios for how the algorithm understands them
test_size1=1-train_proportion
test_size2=test_size1-val_proportion
#Pull off the train set
X_train, X_hold, y_train, y_hold = train_test_split(X, y,stratify=y,test_size=test_size1,random_state=seed)
#Pull off the test and val sets
X_test, X_val, y_test, y_val = train_test_split(X_hold, y_hold,stratify=y_hold,test_size=test_size2,random_state=seed)
return X_train, y_train, X_test, y_test, X_val, y_val
def tt_split(X,y):
"""
Split the data into just train and test sets
X: matrix like data with the predictor vars
y: array like data with the known outcome
return: matrices of the predictive data for train and test, arrays of data with the target variable for train and test
"""
#Adjust the ratios for how the algorithm understands them
test_size1=1-train_proportion
#Pull off the train set
X_train, X_test, y_train, y_test = train_test_split(X, y,stratify=y,test_size=test_size1,random_state=seed)
return X_train, y_train, X_test, y_test
def model_train(X,y):
"""
Train a logistic regression model, finding the best parameters
X: matrix like data to use in training
y: array like data with the known outcome
return: the grid search model, the logistic regression that was the best fit, the c param, the penalty, the balancing param, the model performance
"""
#Logistic regression
lr =LogisticRegression(random_state=seed,max_iter=5000)
#Hyper params for the grid to evaluate
random_grid = dict(C=c, penalty=penalty,class_weight=classweight)
#Define the grid search
cvlr = GridSearchCV(estimator=lr,param_grid=random_grid, scoring='f1', cv= kfold)
#Fit to the data
model=cvlr.fit(X,y)
#Obtain the selected parameters
chosenmodel=model.best_estimator_
modelc=model.best_params_['C']
modelpenalty=model.best_params_['penalty']
modelweight=model.best_params_['class_weight']
#Performance
modelperf=model.best_score_
return model, chosenmodel, modelc, modelpenalty, modelweight, modelperf
def model_predict(X,model):
"""
Find the predicted probability and predicted classification of new data on the logistic regression model
X: matrix like data to score
model: model to score with
return: array of probabilities, array of classifications
"""
#Predict probability
prob_pred=model.predict_proba(X)[:,1]
#Predict 1/0 classification
class_pred=model.predict(X)
return prob_pred, class_pred
def performance(class_pred,y):
"""
Find the performance metrics for a model applied to a dataset
class_pred: array with the predicted classifications
y: array with the known outcomes
return: precision, recall, f1, confusion matrix, true negative, false positives, false negative, true positives
"""
#Pull the performance scores for the injury class
scores=classification_report(y,class_pred,output_dict=True)['1.0']
precision=scores['precision']
recall=scores['recall']
f1=scores['f1-score']
#Create the confusion matrix, pull off the quadrants
cm=confusion_matrix(y,class_pred)
TN=cm[0][0]
FP=cm[0][1]
FN=cm[1][0]
TP=cm[1][1]
return precision, recall, f1, cm, TN, FP, FN, TP
|
db73ba125994c18470008cb985d63f6204e854d5 | leaferickson/Journalism_NLP | /data_collection/tweet_gatherer.py | 2,144 | 3.6875 | 4 | # -*- coding: utf-8 -*-
import pandas as pd
import tweepy
###Below funtion based on code from Yanofksy at https://gist.github.com/yanofsky/5436496
def get_all_tweets(screen_name, number_to_grab = 50):
"""Gather a user's last 3240 tweets (the most that twitter will allow).
To switch to a different number of tweets comment out while lop and switch
count = 200 to count - number_to_grab."""
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200) #switch this to number_to_grab if you want < 200
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
# print ("...%s tweets downloaded so far") % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]
#write the csv
csv_out = pd.DataFrame(outtweets)
csv_out.columns = ["id","created_at","text"]
return outtweets
def processTweet(tweet, source):
"""Process the tweets into a dictionary"""
tweet_dict = {
'datetime': tweet[1],
'tweet': str(tweet[2]),
'source': str(source)
}
return tweet_dict
|
f08e0ffcbe3ce6168714de18e8a936ecf935c761 | dword0/Python_projects | /Password_generator.py | 455 | 4 | 4 | #PASSWORD GENERATOR
import random
print("PASSWORD GENERATOR")
print("==================")
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()_.,?0123456789'
number = int(input("Number of passwords to be generated : "))
length = int(input("Password Length : "))
print("Here are your passwords : ")
for pwd in range(number):
passwords = ''
for c in range(length):
passwords += random.choice(chars)
print(passwords)
|
17c45e7f86ead12c1f87669be88f0eb456dd145b | DistantThunder/learn-python | /ex5.py | 988 | 4.0625 | 4 | my_name = 'Darkeox'
my_age = 35 # is it?
my_height = 185 # centimeters
my_weight = 75 # kilos
my_eyes = 'Dark Brown'
my_teeth = 'White'
my_hair = 'Black'
print("Let's talk about %s." % my_name)
print("He's %d centimeters tall." % my_height)
print("He's %d kilos heavy." % my_weight)
print("Actually, that's not too heavy.")
print("He's got %s eyes and %s hair." % (my_eyes, my_hair))
# this line is tricky
print("If I add %d, %d, and %d I get %d." % (my_age, my_height, my_weight, my_age + my_height + my_weight))
# Drill 4. The only one I found interesting
# Convert Centimeters and Kilos into inches and pounds
# value of 1 pound for in kilo
pound = 0.45359237 # kilo
# value of 1 inches in centimeters
inch = 2.54 # centimeters
my_height_inches = my_height / inch
my_weight_pounds = my_weight / pound
print("Using the imperial customary system of measurement, "
"we can also say that he is %d inches tall and %d pounds heavy." % (my_height_inches, my_weight_pounds))
|
52c44bf0aa15ba0bfcc1abda81fffefba6be075c | DistantThunder/learn-python | /ex33.py | 450 | 4.125 | 4 | numbers = []
# while i < 6:
# print("At the top i is {}".format(i))
# numbers.append(i)
#
# i = i + 1
# print("Numbers now: ", numbers)
# print("At the bottom i is {}\n{}".format(i, '-'))
def count_numbers(count):
count += 1
for i in range(0, count):
numbers.append(i)
return 0
count_numbers(int(input("Enter the number you want to count to: ")))
print("The numbers: ")
for num in numbers:
print(num)
|
bf2a57052391e74eb1a01c6350cbd8a89a64ff54 | ralphtatt-IW/BrIW | /src/classes.py | 4,246 | 3.578125 | 4 | class Person:
# name
def __init__(self, person_id, first_name, second_name, team, preference):
self.person_id = person_id
self.first_name = first_name
self.second_name = second_name
self.full_name = first_name + " " + second_name
self.preference = preference
self.team = team
def __str__(self):
return f"{self.full_name} ({self.team})"
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_name(self):
return self.full_name
def get_preference(self):
return self.preference
def get_team(self):
return self.team
def get_rounds_made(self):
return self.rounds_made
def get_table_headers(self):
return ["Name", "Team Name", "Fav. Drink"]
def get_details(self):
return [
self.get_name(),
self.get_team(),
self.get_preference()
]
def set_preference(self, preference):
self.preference = preference
def to_json(self):
return {
"people_id": self.person_id,
"first_name": self.first_name,
"second_name": self.second_name,
"preference_id": self.preference.drink_id,
"team_id": self.team.team_id
}
class Drink:
def __init__(self, drink_id, name):
self.drink_id = drink_id
self.name = name
def __str__(self):
return self.name
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_table_headers(self):
return ["Name"]
def get_details(self):
return [self.name]
def to_json(self):
return self.__dict__
class Team:
def __init__(self, team_id, name, location):
self.team_id = team_id
self.name = name
self.location = location
def __str__(self):
return self.name
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_table_headers(self):
return ["Name", "Location"]
def get_details(self):
return [self.name, self.location]
def to_json(self):
return self.__dict__
# A teams order is added to a round
class Round:
def __init__(self, round_id, maker, active, team):
self.round_id = round_id
self.maker = maker
self.orders = {}
self.active = active
self.team = team
def __str__(self):
return f"Team:{self.team} - Maker:{self.maker.get_name()}"
def __eq__(self, other):
return self.__dict__ == other.__dict__
def get_maker(self):
return self.maker
def add_order(self, order):
self.orders[order.order_id] = order
def get_team(self):
return self.team
def finish_round(self):
self.active = False
def get_table_headers(self):
return ["Team Name", "Maker", "Active"]
def get_details(self):
return [self.get_team(), self.get_maker(), self.active]
def get_order_size(self):
return len(self.orders);
def to_json(self):
return{
"round_id": self.round_id,
"maker": self.maker.to_json(),
"orders": [order.to_json() for order in list(self.orders.values())],
"active": self.active,
"team_id": self.team.team_id
}
# User makes an order for a drink
class Order:
def __init__(self, order_id, drink, person, round_id, notes):
self.order_id = order_id
self.person = person
self.drink = drink
self.notes = notes
self.round_id = round_id
# def change_order(self, new_drink):
# self.drink = new_drink
#
def get_table_headers(self):
return ["Person", "Drink", "Notes"]
def get_notes(self):
return self.notes
def get_details(self):
return [self.person, self.drink, self.get_notes()]
def __eq__(self, other):
return self.__dict__ == other.__dict__
def to_json(self):
return {
"order_id": self.order_id,
"person": self.person.to_json(),
"drink": self.drink.to_json(),
"notes": self.notes,
"round_id": self.round_id
}
|
05e1c4a7d673effc7d67b5c62674aae381ebf97b | saylorstarks/madlib-generator | /madlib generator.py | 1,677 | 3.96875 | 4 | import random
#User input
verbs = input('Number of verbs: ')
adverbs = input('Number of adverbs: ')
nouns = input('Number of nouns: ')
adjk = input('Number of adjectives: ')
f = open("adj.txt", "r") #opens word list
m = f.readlines() #reads amount of lines in file
l = []
n = []
for x in range(int(adjk)): #how many words needed
for i in range(0, len(m)-1): #range of document
x = m[i] #
z = len(x)
a = x[:z-1]
l.append(a)
l.append(m[i+1])
o = random.choice(l)
n.append(o)
print("Adjectives are: " + str(n))
f.close()
f = open("adv.txt", "r")
m = f.readlines()
l = []
n = []
for x in range(int(adverbs)):
for i in range(0, len(m)-1):
x = m[i] #chooses corrisponding word from list
z = len(x) #length of word
a = x[:z-1] #word minus \n
l.append(a) #adds chosen word to l
l.append(m[i+1])
o = random.choice(l) #random choice from l
n.append(o) #adds choice to n
print("Adverbs are: " + str(n))
f.close()
f = open("noun.txt", "r")
m = f.readlines()
l = []
n = []
for x in range(int(nouns)):
for i in range(0, len(m)-1):
x = m[i]
z = len(x)
a = x[:z-1]
l.append(a)
l.append(m[i+1])
o = random.choice(l)
n.append(o)
print("Nouns are: " + str(n))
f.close()
f = open("verb.txt", "r")
m = f.readlines()
l = []
n = []
for x in range(int(verbs)):
for i in range(0, len(m)-1):
x = m[i]
z = len(x)
a = x[:z-1]
l.append(a)
l.append(m[i+1])
o = random.choice(l)
n.append(o)
print("Verbs are: " + str(n))
f.close()
|
601a03ddfc39a516ca623b9f75323f7e00676019 | ghostghost664/CP3-Chinnawat-Pitisuwannarat | /Excercise73_Chinnawat_P.py | 628 | 3.9375 | 4 | menu1={"ข้าวไข่ข้น":35,"ข้าวไก่ย่างเทอริยากิ":45,"ยากิโซบะ":65,"ซาชิมิ":89}
menuList = []
print(menu1)
while True :
menuName = input("please Enter to Menu")
if(menuName.lower() == "exit"):
break
else:
menuList.append([menuName,menu1[menuName]])
def showBill():
print("-------MY FOOD-------")
total = 0
for number in range(len(menuList)):
print(menuList[number][0],menuList[number][1])
total += int(menuList[number][1])
print('total price :',total)
showBill()
|
826f00c9d48592bc7403b597ef7f5eda59b611ec | AidysM/Skillfactory-learning-mongush | /B2/pythonProject1/format.py | 190 | 3.8125 | 4 | day = 14
month = 2
year = 2012
print("%d.%02d.%d" % (day, month, year))
# 14.02.2012
print("%d-%02d-%d" % (year, month, day))
# 2012-02-14
print("%d/%d/%d" % (year, day, month))
# 2012/14/2 |
1fe48d3656b9437f43b79afa4ba5d9f2ffe13c2f | adamfitzhugh/python | /kirk-byers/Scripts/Week 1/exercise3.py | 942 | 4.375 | 4 | #!/usr/bin/env python
"""Create three different variables: the first variable should use all lower case characters with
underscore ( _ ) as the word separator. The second variable should use all upper case characters
with underscore as the word separator. The third variable should use numbers, letters, and
underscores, but still be a valid variable Python variable name.
Make all three variables be strings that refer to IPv6 addresses.
Use the from future technique so that any string literals in Python2 are unicode.
compare if variable1 equals variable2
compare if variable1 is not equal to variable3
"""
from __future__ import print_function
ipv_six_addr_1 = "2001:db8:1234::1"
IPV_SIX_ADDR_2 = "2001:db8:1234::2"
ipv_6_addr_3 = "2001:db8:1234::3"
print("")
print("Is var1 == var2: {}".format(ipv_six_addr_1 == IPV_SIX_ADDR_2))
print("Is var1 != var3: {}".format(ipv_six_addr_1 != ipv_6_addr_3))
print("")
|
d925d4b637199ad159b36b33dcb0438ccca0f95a | adamfitzhugh/python | /kirk-byers/Scripts/Week 5/exercise3.py | 1,788 | 4.15625 | 4 | """
Similar to lesson3, exercise4 write a function that normalizes a MAC address to the following
format:
01:23:45:67:89:AB
This function should handle the lower-case to upper-case conversion.
It should also handle converting from '0000.aaaa.bbbb' and from '00-00-aa-aa-bb-bb' formats.
The function should have one parameter, the mac_address. It should return the normalized MAC address
Single digit bytes should be zero-padded to two digits. In other words, this:
a:b:c:d:e:f
should be converted to:
0A:0B:0C:0D:0E:0F
Write several test cases for your function and verify it is working properly.
"""
from __future__ import print_function, unicode_literals
import re
def normal_mac_addr(mac_address):
mac_address = mac_address.upper
if ':' in mac_address or '-' in mac_address:
new = []
octets = re.split(r"[-:]", mac_address)
for octet in octects:
if len(octet) < 2:
octet = octet.zfill(2)
new.append(octet)
elif '.' in mac_address:
mac = []
sec = mac_address.split('.')
if len(sec) != 3:
raise ValueError("This went wrong")
for word in sec:
if len(word) < 4:
word = word.zfill(4)
new.append(word[:2])
mac.append(word[:2])
return ":".join(mac)
# Some tests
assert "01:23:02:34:04:56" == normal_mac_addr('123.234.456')
assert "AA:BB:CC:DD:EE:FF" == normal_mac_addr('aabb.ccdd.eeff')
assert "0A:0B:0C:0D:0E:0F" == normal_mac_addr('a:b:c:d:e:f')
assert "01:02:0A:0B:03:44" == normal_mac_addr('1:2:a:b:3:44')
assert "0A:0B:0C:0D:0E:0F" == normal_mac_addr('a-b-c-d-e-f')
assert "01:02:0A:0B:03:44" == normal_mac_addr('1-2-a-b-3-44')
print("Tests passed")
|
d6c95ea4b6f65152328bf1b0e70846fbc9a38e07 | adamfitzhugh/python | /kirk-byers/Scripts/Week 4/week_4_lab.py | 7,463 | 3.65625 | 4 | Exercises
Reference code for these exercises is posted on GitHub at:
https://github.com/ktbyers/pynet/tree/master/learning_python/lesson4
1. Create a dictionary representing a network device. The dictionary should have key-value pairs representing the 'ip_addr', 'vendor', 'username', and 'password' fields.
Print out the 'ip_addr' key from the dictionary.
If the 'vendor' key is 'cisco', then set the 'platform' to 'ios'. If the 'vendor' key is 'juniper', then set the 'platform' to 'junos'.
Create a second dictionary named 'bgp_fields'. The 'bgp_fields' dictionary should have a keys for 'bgp_as', 'peer_as', and 'peer_ip'.
Using the .update() method add all of the 'bgp_fields' dictionary key-value pairs to the network device dictionary.
Using a for-loop, iterate over the dictionary and print out all of the dictionary keys.
Using a single for-loop, iterate over the dictionary and print out all of the dictionary keys and values.
from __future__ import print_function, unicode_literals
devices = {
'ip': '10.10.10.10',
'vendor': 'cisco',
'username': 'cisco',
'password': 'cisco',
}
print()
print(devices['ip'])
print()
if devices['vendor'].lower() == 'cisco':
devices['platform'] = 'ios'
elif devices['vendor'].lower() == 'juniper'
devices['platform'] = 'junos'
bgp = {
'as': 42,
'peer': 100,
'peer_ip': '172.16.1.1'
}
devices.update(bgp)
print('-' * 80)
for key in devices:
print("{:>15}".format(key))
print('-' * 80)
print()
print('-' * 80)
for key, val in devices.items():
print("{key:>15} ---> {val:>15}".format(key=key, value=val))
print('-' * 80)
print()
2. Create three separate lists of IP addresses. The first list should be the IP addresses of the Houston data center routers, and it should have over ten RFC1918 IP addresses in it (including some duplicate IP addresses).
The second list should be the IP addresses of the Atlanta data center routers, and it should have at least eight RFC1918 IP addresses (including some addresses that overlap with the Houston data center).
The third list should be the IP addresses of the Chicago data center routers, and it should have at least eight RFC1918 IP addresses. The Chicago IP addresses should have some overlap with both the IP addresses in Houston and Atlanta.
Convert each of these three lists to a set.
Using a set operation, find the IP addresses that are duplicated between Houston and Atlanta.
Using set operations, find the IP addresses that are duplicated in all three sites.
Using set operations, find the IP addresses that are entirely unique in Chicago.
from __future__ import unicode_literals, print_function
houston_ips = [
'10.10.10.1',
'10.10.20.1',
'10.10.30.1',
'10.10.40.1',
'10.10.50.1',
'10.10.60.1',
'10.10.70.1',
'10.10.80.1',
'10.10.10.1',
'10.10.20.1',
'10.10.70.1',
]
atlanta_ips = [
'10.10.10.1',
'10.10.20.1',
'10.10.30.1',
'10.10.140.1',
'10.10.150.1',
'10.10.160.1',
'10.10.170.1',
'10.10.180.1',
]
chicago_ips = [
'10.10.10.1',
'10.10.20.1',
'10.10.140.1',
'10.10.150.1',
'10.10.210.1',
'10.10.220.1',
'10.10.230.1',
'10.10.240.1',
]
houston_ips = set(houston_ips)
atlanta_ips = set(atlanta_ips)
chicago_ips = set(chicago_ips)
print()
print("-" * 80)
print("Duplicate IPs at Houston and Atlanta sites:\n\n{}".format(houston_ips & atlanta_ips))
print("-" * 80)
print()
print("-" * 80)
print("Duplicate IPs at all three sites:\n\n{}".format(houston_ips & atlanta_ips & chicago_ips))
print("-" * 80)
print()
print("-" * 80)
print("Chicago unique IP addresses:\n\n{}".format(
chicago_ips.difference(houston_ips).difference(atlanta_ips)))
print("-" * 80)
print()
3. Read in the 'show_version.txt' file. From this file, use regular expressions to extract the OS version, serial number, and configuration register values.
Your output should look as follows:
OS Version: 15.4(2)T1
Serial Number: FTX0000038X
Config Register: 0x2102
from __future__ import print_function, unicode_literals
import re
with open("show_version.txt") as f:
show_ver = f.read()
match = re.search(r"^Cisco IOS Software,.* Version (.*),", show_ver, flags=re.M)
if match:
os = match.group(1)
match = re.search(r"^Processor board ID (.*)\s\$", show_ver, flags=re.M)
if match:
sn = match.group(1)
match = re.search(r"^Configuration register is (.*)\s*$", show_ver, flags=re.M)
if match:
conf_reg = match.group(1)
print()
print("{:>20}: {:15}".format("OS Version", os))
print("{:>20}: {:15}".format("Serial Number", sn))
print("{:>20}: {:15}".format("Configuration Register", conf_reg))
print()
4. Using a named regular expression (?P<name>), extract the model from the below string:
show_version = '''
Cisco 881 (MPC8300) processor (revision 1.0) with 236544K/25600K bytes of memory.
Processor board ID FTX0000038X
5 FastEthernet interfaces
1 Virtual Private Network (VPN) Module
256K bytes of non-volatile configuration memory.
126000K bytes of ATA CompactFlash (Read/Write)
'''
Note that, in this example, '881' is the relevant model. Your regular expression should not, however, include '881' in its search pattern since this number changes across devices.
Using a named regular expression, also extract the '236544K/25600K' memory string.
Once again, none of the actual digits of the memory on this device should be used in the regular expression search pattern.
Print both the model number and the memory string to the screen.
from __future__ import print_function, unicode_literals
import re
show_ver = """
Cisco 881 (MPC8300) processor (revision 1.0) with 236544K/25600K bytes of memory.
Processor board ID FTX0000038X
5 FastEthernet interfaces
1 Virtual Private Network (VPN) Module
256K bytes of non-volatile configuration memory.
126000K bytes of ATA CompactFlash (Read/Write)
"""
match = re.search(r"^Cisco (?P<model>\S+).* with (?P<memory>\S+) bytes of memory",
show_ver, flags=re.M)
model = match.groupdict()['model']
memory = match.groupdict()['memory']
print()
print('-' * 80)
print("Model: {}".format(model))
print("Memory: {}".format(memory))
print('-' * 80)
print()
5. Read the 'show_ipv6_intf.txt' file.
From this file, use Python regular expressions to extract the two IPv6 addresses.
The two relevant IPv6 addresses you need to extract are:
2001:11:2233::a1/24
2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64
Try to use re.DOTALL flag as part of your search. Your search pattern should not include any of the literal characters in the IPv6 address.
From this, create a list of IPv6 addresses that includes only the above two addresses.
Print this list to the screen.
from __future__ import print_function, unicode_literals
import re
with open("show_ipv6_intf.txt") as f:
output = f.read()
match = re.search(r"IPv6 address:\s+(.*)\s+IPv6 subnet:", output, flags=re.DOTALL)
ipv6_address = match.group(1).strip()
ipv6_list = ipv6_address.splitlines()
for i in enumerate(ipv6_list[:]):
addr = re.sub(r"\[VALID\]", "", addr)
ipv6_list[i] = addr.strip()
print()
print('-' * 80)
print(ipv6_list)
print('-' * 80)
print()
|
2c9a6858ef76026d57e96ce85724e7c062e657d5 | nileshmahale03/Python | /Python/PythonProject/5 Dictionary.py | 1,487 | 4.21875 | 4 |
"""
Dictionary:
1. Normal variable holds 1 value; dictionary holds collection of key-value pairs; all keys must be distinct but values may be repeated
2. {} - curly bracket
3. Unordered
4. Mutable
5. uses Hashing internally
6. Functions:
1. dict[] : returns value at specified index
2. len() : returns length of dictionary
min() : returns min value in dictionary
max() : returns max value in dictionary
sum() : returns sum of values in dictionary
3. dict.reverse() : 'dict' object has no attribute 'reverse'
4. dict.sort() : 'dict' object has no attribute 'sort'
5. in : operator returns bool stating if specified value present in dictionary or not
6. dict[key] = value : add value with specified key
7. dict[key] : get value from dict with specified key
dict.get(key) returns None if key dosen't exists
11. dict.pop(key) : dict.pop()
dict.popitem() pop() will remove last value
12. del dict[key] : delete
"""
dict = {10:"abc", 20:"xyz", 30:"pqr"}
print(dict)
print(type(dict))
print(dict[10])
print(dict, len(dict), min(dict), max(dict), sum(dict))
dict[40] = "def"
print(dict)
print(dict[30], dict.get(30))
print(dict.get(50), dict.get(60, "Not Available"))
#dict.reverse()
#dict.sort()
print(20 in dict, 80 in dict)
dict.popitem()
print(dict)
dict.pop(10)
print(dict)
del dict[30]
print(dict) |
51f13de4475c9679caadaf953d2ff8f5401b2996 | Aitous/CS229 | /ps2/ps2/src/stability/stability_modified.py | 3,463 | 4.03125 | 4 | # Important note: you do not have to modify this file for your homework.
import util
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def calc_grad(X, Y, theta):
"""Compute the gradient of the loss with respect to theta."""
count, _ = X.shape
probs = 1. / (1 + np.exp(-X.dot(theta)))
grad = (Y - probs).dot(X) #- 0.01*theta
return grad
def sigmoid(x):
return 1/(1+np.exp(-x))
def logistic_regression(X, Y):
"""Train a logistic regression model."""
theta = np.zeros(X.shape[1])
learning_rate = 0.1
thetas =[]
i = 0
# cost = []
while True:
i += 1
prev_theta = theta
grad = calc_grad(X, Y, theta)
theta = theta + learning_rate * grad
if i % 10000 == 0:
print('Finished %d iterations' % i)
# cost.append(-1/X.shape[0]*(Y@np.log(sigmoid(X.dot(theta))) + (1-Y)@np.log(1 - sigmoid(X.dot(theta)))) \
# + 0.005*np.linalg.norm(theta, 2))
thetas.append(theta)
if np.linalg.norm(prev_theta - theta) < 1e-15 or i == 30000:
print('Converged in %d iterations' % i)
break
# if i % 5000 == 0:
# plt.plot([i for i in range(i//100)], cost, '--r', label='cost function')
# plt.xlabel('iterations (x100)')
# plt.ylabel('cost value')
# plt.title("cost function vs iterations")
# plt.legend()
# plt.grid()
# plt.savefig('cost_function_vs_time')
# plt.show()
return thetas
def J(th0, th1, X, Y):
# J = np.zeros((len(th0), len(th1), len(th2)))
J = np.zeros_like(th0)
for i,x in enumerate(th0[0,:]):
for j,y in enumerate(th1[0,:]):
theta = [x,y]
J[i,j] = (Y@np.log(sigmoid(X.dot(theta))) + (1-Y)@np.log(1 - sigmoid(X.dot(theta))))
# for i in range(len(th0)):
# for j in range(len(th1)):
# for k in range(len(th2)):
# theta = [th0[i], th1[j], th2[k]]
# J[i, j, k] = (Y@np.log(sigmoid(X.dot(theta))) + (1-Y)@np.log(1 - sigmoid(X.dot(theta))))
return J
def plot_cost(X, Y):
theta0 = np.linspace(-100, 100, 100)
theta1 = np.linspace(-100, 100, 100)
# theta2 = np.linspace(-100, 100, 100)
th0, th1 = np.meshgrid(theta0, theta1)
J_theta = -1/X.shape[0]*J(th0, th1, X[:,1:], Y)
# J_theta = -1/X.shape[0]*J(theta0, theta1, theta2, X, Y)
fig = plt.figure(figsize=(8,8))
ax = plt.axes(projection='3d')
ax.contour3D(th0, th1, J_theta, 50, cmap='RdBu')
# ax.scatter(theta0, theta1, theta2, c=J_theta, cmap=plt.hot())
ax.set_xlabel('theta0')
ax.set_ylabel('theta1')
ax.set_zlabel('theta2');
ax.view_init(0, 0)
def main():
print('==== Training model on data set A ====')
Xa, Ya = util.load_csv('ds1_a.csv', add_intercept=True)
plot_cost(Xa, Ya)
# util.plot_points(Xa[:,1:], Ya, theta)
plt.show()
thetas = logistic_regression(Xa, Ya)
# util.plot_points(Xa[:,1:], Ya, thetas)
print('\n==== Training model on data set B ====')
Xb, Yb = util.load_csv('ds1_b.csv', add_intercept=True)
plot_cost(Xb, Yb)
# Xb += np.random.normal(scale=0.03, size=Xb.shape)
# util.plot_points(Xb[:,1:], Yb)
plt.show()
thetas = logistic_regression(Xb, Yb)
# util.plot_points(Xb[:,1:], Yb, thetas)
if __name__ == '__main__':
main()
|
1c3d6f236781497b59ec7c71caa09c4fd0041b0c | Dextro1597/Caesar-Cipher | /caeser.py | 709 | 4.0625 | 4 | #code by Ahad Patel
plaintext = input('Enter the plain text:-')
alphabets = 'abcdefghijklmnopqrstuvwxyz'
alphabetc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cipher = ''
dicipher = ''
key = input('Enter the key Size:-')
for c in plaintext:
if c in alphabets:
cipher += alphabets[(alphabets.index(c)+int(key))%(len(alphabets))]
else:
cipher += alphabetc[(alphabetc.index(c)+int(key))%(len(alphabetc))]
for c in cipher:
if c in alphabets:
dicipher += alphabets[(alphabets.index(c)-int(key))%(len(alphabets))]
else:
dicipher += alphabetc[(alphabetc.index(c)-int(key))%(len(alphabetc))]
print('The key size is:-' +str(key))
print('The cipher text generated is:-' +cipher)
print('The dicipher text is:-' +dicipher)
|
bf0707a538786f1cb851b7033e84375253eb7149 | Miguel-Evans-Yikes/Python-projects | /treinamento_aleatório_python/DATABASE_MANAGER/database_manager.py | 960 | 3.671875 | 4 | import sqlite3
from sqlite3 import Error
import os
path = os.path.dirname(__file__)
sql_insert_query = """INSERT INTO users(NOME,SENHA,IDADE,ENDEREÇO,ESTATUS,CONTRATAÇÃO) VALUES (?,?,?,?,?,?)"""
sql_read_query = """SELECT * FROM users WHERE NOME==? AND SENHA==?"""
def sql_update_query():
return """ """
def sql_delete_query():
return """ """
#inicia a conexão com o banco de dados
def db_conection(_path):
try:
con = sqlite3.connect(_path)
except Error as ex:
print(ex)
return con
#insere o registro no banco de dados recebe db_conection e
# create_sql_query
def insert_data(conection,sql,values):
try:
c = conection.cursor()
c.execute(sql,values)
conection.commit()
print('Novo registro inserido!')
return True
except Error as ex:
print(ex)
return False
def read():
pass
def update():
pass
def delete():
pass
|
09b9dc627bd61e2fce91e204fcdbc2dd572479c8 | tedrickzhu/Algrithm_test | /jianzhioffer/testMovingCount.py | 1,443 | 3.5625 | 4 | #encoding=utf-8
#author:Tedrick
#software:Pycharm
#file:testMovingCount.py
#time:2019/3/21 下午8:37
# -*- coding:utf-8 -*-
class Solution:
def movingCount(self, threshold, rows, cols):
# write code here深度优先遍历
if threshold<0:
return 0
i = 0
j = 0
visited = [(0, 0)]
stack = [(0, 0)]
count = 1
while len(stack) > 0:
right = (i, j + 1)
left = (i, j - 1)
up = (i - 1, j)
down = (i + 1, j)
nextlist = [right, left, up, down]
for next in nextlist:
if next not in visited and self.get_ij_sum(next) <= threshold and 0 <= next[0] < rows and 0 <= next[
1] < cols:
stack.append(next)
visited.append(next)
print(next)
count = count + 1
newnext = stack.pop()
i = newnext[0]
j = newnext[1]
print(count)
return count
def get_ij_sum(self, (chushu_i, chushu_j)):
sum_r = 0
sum_c = 0
while chushu_i > 0:
sum_r = sum_r + chushu_i % 10
chushu_i = chushu_i / 10
while chushu_j > 0:
sum_c = sum_c + chushu_j % 10
chushu_j = chushu_j / 10
return sum_r + sum_c
if __name__ == '__main__':
sol = Solution()
sol.movingCount(5,10,10) |
ed1f3aa858237333a0cb6315d3ef34eed3996a60 | tedrickzhu/Algrithm_test | /SortAlgrithm/ChooseSort.py | 1,066 | 3.828125 | 4 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 18-8-22
# @Author : zhengyi
# @File : ChooseSort.py
# @Software: PyCharm
'''
算法描述
n个记录的直接选择排序可经过n-1趟直接选择排序得到有序结果。具体算法描述如下:
初始状态:无序区为R[1..n],有序区为空;
第i趟排序(i=1,2,3…n-1)开始时,当前有序区和无序区分别为R[1..i-1]和R(i..n)。
该趟排序从当前无序区中-选出关键字最小的记录 R[k],将它与无序区的第1个记录R交换,
使R[1..i]和R[i+1..n)分别变为记录个数增加1个的新有序区和记录个数减少1个的新无序区;
n-1趟结束,数组有序化了。
核心思想:每次选择一个最小的放到已排好序的数组的尾部,依次循环
'''
def choose_sort(array):
for i in range(len(array)):
minIndex=i
for j in range(i+1,len(array)):
if array[j]<array[minIndex]:
minIndex = j
array[i],array[minIndex]=array[minIndex],array[i]
return array
pass
data =[9,4,6,2,7,3,8,5,2,1]
print(choose_sort(data))
|
b37fac91f3f75eaaa6cb47eb988686623d35f041 | ilyxych96/Python-basics-and-applications | /3_2.py | 963 | 3.828125 | 4 | '''
3.2.1 Вашей программе на вход подаются три строки s, a, b, состоящие из строчных латинских букв.
За одну операцию вы можете заменить все вхождения строки a в строку s на строку b.
'''
s = input()
a = input()
b = input()
i = 0
while s.count(a) != 0:
if i < 1000:
s = s.replace(a,b)
i += 1
else:
i = 'Impossible'
break
print(i)
'''
3.2.2 Вашей программе на вход подаются две строки s и t, состоящие из строчных латинских букв.
Выведите одно число – количество вхождений строки t в строку s
'''
s = input()
t = input()
pos = 0
count = 0
while pos < len(s):
if s[pos:].startswith(t):
count += 1
pos += 1
print(count)
|
8cac3e2dcad66b5914692ec582dc22b9654fc560 | dylanhudson/notes | /hashid.py | 19,231 | 3.53125 | 4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 21:46:49 2019
@author: dylan
"""
# -*- coding: utf-8 -*-
"""
extract-hashes.py: Extracts hashes from a text file.
Version 0.3 - Nov/2014
Author: Daniel Marques (@0xc0da)
daniel _at_ codalabs _dot_ net - http://codalabs.net
Modified by Dylan in Nov 2019 to incorporate more regexs.
The script reads a file and tries to extract hashes from it using regex.
Results are stored in separate files named as 'format-original_filename.txt'.
WARNING: Use carefully. It might return garbage or miss some hashes.
"""
import re
import sys
from os import path
def extract_hashes(source_file):
# TODO - modify so all possibilities are printed, not just the first match.
regex_list = {
'CRC-16' : '^[a-f0-9]{4}$',
'CRC-16-CCITT' : '^[a-f0-9]{4}$',
'FCS-16' : '^[a-f0-9]{4}$',
'Adler-32' : '^[a-f0-9]{8}$',
'CRC-32B' : '^[a-f0-9]{8}$',
'FCS-32' : '^[a-f0-9]{8}$',
'GHash-32-3' : '^[a-f0-9]{8}$',
'GHash-32-5' : '^[a-f0-9]{8}$',
'FNV-132' : '^[a-f0-9]{8}$',
'Fletcher-32' : '^[a-f0-9]{8}$',
'Joaat' : '^[a-f0-9]{8}$',
'ELF-32' : '^[a-f0-9]{8}$',
'XOR-32' : '^[a-f0-9]{8}$',
'CRC-24' : '^[a-f0-9]{6}$',
'CRC-32' : '^(\$crc32\$[a-f0-9]{8}.)?[a-f0-9]{8}$',
'Eggdrop IRC Bot' : '^\+[a-z0-9\/.]{12}$',
'DES(Unix)' : '^[a-z0-9\/.]{13}$',
'Traditional DES' : '^[a-z0-9\/.]{13}$',
'DEScrypt' : '^[a-z0-9\/.]{13}$',
'MySQL323' : '^[a-f0-9]{16}$',
'DES(Oracle)' : '^[a-f0-9]{16}$',
'Half MD5' : '^[a-f0-9]{16}$',
'Oracle 7-10g' : '^[a-f0-9]{16}$',
'FNV-164' : '^[a-f0-9]{16}$',
'CRC-64' : '^[a-f0-9]{16}$',
'Cisco-PIX(MD5)' : '^[a-z0-9\/.]{16}$',
'Lotus Notes/Domino 6' : '^\([a-z0-9\/+]{20}\)$',
'BSDi Crypt' : '^_[a-z0-9\/.]{19}$',
'CRC-96(ZIP)' : '^[a-f0-9]{24}$',
'Crypt16' : '^[a-z0-9\/.]{24}$',
'MD2' : '^(\$md2\$)?[a-f0-9]{32}$',
'MD5' : '^[a-f0-9]{32}(:.+)?$',
'MD4' : '^[a-f0-9]{32}(:.+)?$',
'Double MD5' : '^[a-f0-9]{32}(:.+)?$',
'LM' : '^[a-f0-9]{32}(:.+)?$',
'RIPEMD-128' : '^[a-f0-9]{32}(:.+)?$',
'Haval-128' : '^[a-f0-9]{32}(:.+)?$',
'Tiger-128' : '^[a-f0-9]{32}(:.+)?$',
'Skein-256(128)' : '^[a-f0-9]{32}(:.+)?$',
'Skein-512(128)' : '^[a-f0-9]{32}(:.+)?$',
'Lotus Notes/Domino 5' : '^[a-f0-9]{32}(:.+)?$',
'Skype' : '^[a-f0-9]{32}(:.+)?$',
'ZipMonster' : '^[a-f0-9]{32}(:.+)?$',
'PrestaShop' : '^[a-f0-9]{32}(:.+)?$',
'md5(md5(md5($pass)))' : '^[a-f0-9]{32}(:.+)?$',
'md5(strtoupper(md5($pass)))' : '^[a-f0-9]{32}(:.+)?$',
'md5(sha1($pass))' : '^[a-f0-9]{32}(:.+)?$',
'md5($pass.$salt)' : '^[a-f0-9]{32}(:.+)?$',
'md5($salt.$pass)' : '^[a-f0-9]{32}(:.+)?$',
'md5(unicode($pass).$salt)' : '^[a-f0-9]{32}(:.+)?$',
'md5($salt.unicode($pass))' : '^[a-f0-9]{32}(:.+)?$',
'HMAC-MD5 (key = $pass)' : '^[a-f0-9]{32}(:.+)?$',
'HMAC-MD5 (key = $salt)' : '^[a-f0-9]{32}(:.+)?$',
'md5(md5($salt).$pass)' : '^[a-f0-9]{32}(:.+)?$',
'md5($salt.md5($pass))' : '^[a-f0-9]{32}(:.+)?$',
'md5($pass.md5($salt))' : '^[a-f0-9]{32}(:.+)?$',
'md5($salt.$pass.$salt)' : '^[a-f0-9]{32}(:.+)?$',
'md5(md5($pass).md5($salt))' : '^[a-f0-9]{32}(:.+)?$',
'md5($salt.md5($salt.$pass))' : '^[a-f0-9]{32}(:.+)?$',
'md5($salt.md5($pass.$salt))' : '^[a-f0-9]{32}(:.+)?$',
'md5($username.0.$pass)' : '^[a-f0-9]{32}(:.+)?$',
'Snefru-128' : '^(\$snefru\$)?[a-f0-9]{32}$',
'NTLM' : '^(\$NT\$)?[a-f0-9]{32}$',
'Domain Cached Credentials' : '^([^\/:*?"<>|]{1,20}:)?[a-f0-9]{32}(:[^\/:*?"<>|]{1,20})?$',
'Domain Cached Credentials 2' : '^([^\/:*?"<>|]{1,20}:)?(\$DCC2\$10240#[^\/:*?"<>|]{1,20}#)?[a-f0-9]{32}$',
'SHA-1(Base64)' : '^{SHA}[a-z0-9\/+]{27}=$',
'Netscape LDAP SHA' : '^{SHA}[a-z0-9\/+]{27}=$',
'MD5 Crypt' : '^\$1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}(:.*)?$',
'Cisco-IOS(MD5)' : '^\$1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}(:.*)?$',
'FreeBSD MD5' : '^\$1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}(:.*)?$',
'Lineage II C4' : '^0x[a-f0-9]{32}$',
'phpBB v3.x' : '^\$H\$[a-z0-9\/.]{31}$',
'Wordpress v2.6.0/2.6.1' : '^\$H\$[a-z0-9\/.]{31}$',
'Wordpress ≥ v2.6.2' : '^\$P\$[a-z0-9\/.]{31}$',
'Joomla ≥ v2.5.18' : '^\$P\$[a-z0-9\/.]{31}$',
'osCommerce' : '^[a-f0-9]{32}:[a-z0-9]{2}$',
'xt:Commerce' : '^[a-f0-9]{32}:[a-z0-9]{2}$',
'MD5(APR)' : '^\$apr1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}$',
'Apache MD5' : '^\$apr1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}$',
'md5apr1' : '^\$apr1\$[a-z0-9\/.]{0,8}\$[a-z0-9\/.]{22}$',
'AIX(smd5)' : '^{smd5}[a-z0-9$\/.]{31}$',
'WebEdition CMS' : '^[a-f0-9]{32}:[a-f0-9]{32}$',
'IP.Board ≥ v2+' : '^[a-f0-9]{32}:.{5}$',
'MyBB ≥ v1.2+' : '^[a-f0-9]{32}:.{8}$',
'CryptoCurrency(Adress)' : '^[a-z0-9]{34}$',
'SHA-1' : '^[a-f0-9]{40}(:.+)?$',
'Double SHA-1' : '^[a-f0-9]{40}(:.+)?$',
'RIPEMD-160' : '^[a-f0-9]{40}(:.+)?$',
'Haval-160' : '^[a-f0-9]{40}(:.+)?$',
'Tiger-160' : '^[a-f0-9]{40}(:.+)?$',
'HAS-160' : '^[a-f0-9]{40}(:.+)?$',
'LinkedIn' : '^[a-f0-9]{40}(:.+)?$',
'Skein-256(160)' : '^[a-f0-9]{40}(:.+)?$',
'Skein-512(160)' : '^[a-f0-9]{40}(:.+)?$',
'MangosWeb Enhanced CMS' : '^[a-f0-9]{40}(:.+)?$',
'sha1(sha1(sha1($pass)))' : '^[a-f0-9]{40}(:.+)?$',
'sha1(md5($pass))' : '^[a-f0-9]{40}(:.+)?$',
'sha1($pass.$salt)' : '^[a-f0-9]{40}(:.+)?$',
'sha1($salt.$pass)' : '^[a-f0-9]{40}(:.+)?$',
'sha1(unicode($pass).$salt)' : '^[a-f0-9]{40}(:.+)?$',
'sha1($salt.unicode($pass))' : '^[a-f0-9]{40}(:.+)?$',
'HMAC-SHA1 (key = $pass)' : '^[a-f0-9]{40}(:.+)?$',
'HMAC-SHA1 (key = $salt)' : '^[a-f0-9]{40}(:.+)?$',
'sha1($salt.$pass.$salt)' : '^[a-f0-9]{40}(:.+)?$',
'MySQL5.x' : '^\*[a-f0-9]{40}$',
'MySQL4.1' : '^\*[a-f0-9]{40}$',
'Cisco-IOS(SHA-256)' : '^[a-z0-9]{43}$',
'SSHA-1(Base64)' : '^{SSHA}[a-z0-9\/+]{38}==$',
'Netscape LDAP SSHA' : '^{SSHA}[a-z0-9\/+]{38}==$',
'nsldaps' : '^{SSHA}[a-z0-9\/+]{38}==$',
'Fortigate(FortiOS)' : '^[a-z0-9=]{47}$',
'Haval-192' : '^[a-f0-9]{48}$',
'Tiger-192' : '^[a-f0-9]{48}$',
'SHA-1(Oracle)' : '^[a-f0-9]{48}$',
'OSX v10.4' : '^[a-f0-9]{48}$',
'OSX v10.5' : '^[a-f0-9]{48}$',
'OSX v10.6' : '^[a-f0-9]{48}$',
'Palshop CMS' : '^[a-f0-9]{51}$',
'CryptoCurrency(PrivateKey)' : '^[a-z0-9]{51}$',
'AIX(ssha1)' : '^{ssha1}[0-9]{2}\$[a-z0-9$\/.]{44}$',
'MSSQL(2005)' : '^0x0100[a-f0-9]{48}$',
'MSSQL(2008)' : '^0x0100[a-f0-9]{48}$',
'Sun MD5 Crypt' : '^(\$md5,rounds=[0-9]+\$|\$md5\$rounds=[0-9]+\$|\$md5\$)[a-z0-9\/.]{0,16}(\$|\$\$)[a-z0-9\/.]{22}$',
'SHA-224' : '^[a-f0-9]{56}$',
'Haval-224' : '^[a-f0-9]{56}$',
'SHA3-224' : '^[a-f0-9]{56}$',
'Skein-256(224)' : '^[a-f0-9]{56}$',
'Skein-512(224)' : '^[a-f0-9]{56}$',
'Blowfish(OpenBSD)' : '^(\$2[axy]|\$2)\$[0-9]{2}\$[a-z0-9\/.]{53}$',
'Woltlab Burning Board 4.x' : '^(\$2[axy]|\$2)\$[0-9]{2}\$[a-z0-9\/.]{53}$',
'bcrypt' : '^(\$2[axy]|\$2)\$[0-9]{2}\$[a-z0-9\/.]{53}$',
'Android PIN' : '^[a-f0-9]{40}:[a-f0-9]{16}$',
'Oracle 11g/12c' : '^(S:)?[a-f0-9]{40}(:)?[a-f0-9]{20}$',
'bcrypt(SHA-256)' : '^\$bcrypt-sha256\$(2[axy]|2)\,[0-9]+\$[a-z0-9\/.]{22}\$[a-z0-9\/.]{31}$',
'vBulletin < v3.8.5' : '^[a-f0-9]{32}:.{3}$',
'vBulletin ≥ v3.8.5' : '^[a-f0-9]{32}:.{30}$',
'Snefru-256' : '^(\$snefru\$)?[a-f0-9]{64}$',
'SHA-256' : '^[a-f0-9]{64}(:.+)?$',
'RIPEMD-256' : '^[a-f0-9]{64}(:.+)?$',
'Haval-256' : '^[a-f0-9]{64}(:.+)?$',
'GOST R 34.11-94' : '^[a-f0-9]{64}(:.+)?$',
'GOST CryptoPro S-Box' : '^[a-f0-9]{64}(:.+)?$',
'SHA3-256' : '^[a-f0-9]{64}(:.+)?$',
'Skein-256' : '^[a-f0-9]{64}(:.+)?$',
'Skein-512(256)' : '^[a-f0-9]{64}(:.+)?$',
'Ventrilo' : '^[a-f0-9]{64}(:.+)?$',
'sha256($pass.$salt)' : '^[a-f0-9]{64}(:.+)?$',
'sha256($salt.$pass)' : '^[a-f0-9]{64}(:.+)?$',
'sha256(unicode($pass).$salt)' : '^[a-f0-9]{64}(:.+)?$',
'sha256($salt.unicode($pass))' : '^[a-f0-9]{64}(:.+)?$',
'HMAC-SHA256 (key = $pass)' : '^[a-f0-9]{64}(:.+)?$',
'HMAC-SHA256 (key = $salt)' : '^[a-f0-9]{64}(:.+)?$',
'Joomla < v2.5.18' : '^[a-f0-9]{32}:[a-z0-9]{32}$',
'SAM(LM_Hash:NT_Hash)' : '^[a-f-0-9]{32}:[a-f-0-9]{32}$',
'MD5(Chap)' : '^(\$chap\$0\*)?[a-f0-9]{32}[\*:][a-f0-9]{32}(:[0-9]{2})?$',
'iSCSI CHAP Authentication' : '^(\$chap\$0\*)?[a-f0-9]{32}[\*:][a-f0-9]{32}(:[0-9]{2})?$',
'EPiServer 6.x < v4' : '^\$episerver\$\*0\*[a-z0-9\/=+]+\*[a-z0-9\/=+]{27,28}$',
'AIX(ssha256)' : '^{ssha256}[0-9]{2}\$[a-z0-9$\/.]{60}$',
'RIPEMD-320' : '^[a-f0-9]{80}$',
'EPiServer 6.x ≥ v4' : '^\$episerver\$\*1\*[a-z0-9\/=+]+\*[a-z0-9\/=+]{42,43}$',
'MSSQL(2000)' : '^0x0100[a-f0-9]{88}$',
'SHA-384' : '^[a-f0-9]{96}$',
'SHA3-384' : '^[a-f0-9]{96}$',
'Skein-512(384)' : '^[a-f0-9]{96}$',
'Skein-1024(384)' : '^[a-f0-9]{96}$',
'SSHA-512(Base64)' : '^{SSHA512}[a-z0-9\/+]{96}$',
'LDAP(SSHA-512)' : '^{SSHA512}[a-z0-9\/+]{96}$',
'AIX(ssha512)' : '^{ssha512}[0-9]{2}\$[a-z0-9\/.]{16,48}\$[a-z0-9\/.]{86}$',
'SHA-512' : '^[a-f0-9]{128}(:.+)?$',
'Whirlpool' : '^[a-f0-9]{128}(:.+)?$',
'Salsa10' : '^[a-f0-9]{128}(:.+)?$',
'Salsa20' : '^[a-f0-9]{128}(:.+)?$',
'SHA3-512' : '^[a-f0-9]{128}(:.+)?$',
'Skein-512' : '^[a-f0-9]{128}(:.+)?$',
'Skein-1024(512)' : '^[a-f0-9]{128}(:.+)?$',
'sha512($pass.$salt)' : '^[a-f0-9]{128}(:.+)?$',
'sha512($salt.$pass)' : '^[a-f0-9]{128}(:.+)?$',
'sha512(unicode($pass).$salt)' : '^[a-f0-9]{128}(:.+)?$',
'sha512($salt.unicode($pass))' : '^[a-f0-9]{128}(:.+)?$',
'HMAC-SHA512 (key = $pass)' : '^[a-f0-9]{128}(:.+)?$',
'HMAC-SHA512 (key = $salt)' : '^[a-f0-9]{128}(:.+)?$',
'OSX v10.7' : '^[a-f0-9]{136}$',
'MSSQL(2012)' : '^0x0200[a-f0-9]{136}$',
'MSSQL(2014)' : '^0x0200[a-f0-9]{136}$',
'OSX v10.8' : '^\$ml\$[0-9]+\$[a-f0-9]{64}\$[a-f0-9]{128}$',
'OSX v10.9' : '^\$ml\$[0-9]+\$[a-f0-9]{64}\$[a-f0-9]{128}$',
'Skein-1024' : '^[a-f0-9]{256}$',
'GRUB 2' : '^grub\.pbkdf2\.sha512\.[0-9]+\.([a-f0-9]{128,2048}\.|[0-9]+\.)?[a-f0-9]{128}$',
'Django(SHA-1)' : '^sha1\$[a-z0-9]+\$[a-f0-9]{40}$',
'Citrix Netscaler' : '^[a-f0-9]{49}$',
'Drupal > v7.x' : '^\$S\$[a-z0-9\/.]{52}$',
'SHA-256 Crypt' : '^\$5\$(rounds=[0-9]+\$)?[a-z0-9\/.]{0,16}\$[a-z0-9\/.]{43}$',
'Sybase ASE' : '^0x[a-f0-9]{4}[a-f0-9]{16}[a-f0-9]{64}$',
'SHA-512 Crypt' : '^\$6\$(rounds=[0-9]+\$)?[a-z0-9\/.]{0,16}\$[a-z0-9\/.]{86}$',
'Minecraft(AuthMe Reloaded)' : '^\$sha\$[a-z0-9]{1,16}\$([a-f0-9]{32}|[a-f0-9]{40}|[a-f0-9]{64}|[a-f0-9]{128}|[a-f0-9]{140})$',
'Django(SHA-256)' : '^sha256\$[a-z0-9]+\$[a-f0-9]{64}$',
'Django(SHA-384)' : '^sha384\$[a-z0-9]+\$[a-f0-9]{96}$',
'Clavister Secure Gateway' : '^crypt1:[a-z0-9+=]{12}:[a-z0-9+=]{12}$',
'Cisco VPN Client(PCF-File)' : '^[a-f0-9]{112}$',
'Microsoft MSTSC(RDP-File)' : '^[a-f0-9]{1329}$',
'NetNTLMv1-VANILLA / NetNTLMv1+ESS' : '^[^\/:*?"<>|]{1,20}[:]{2,3}([^\/:*?"<>|]{1,20})?:[a-f0-9]{48}:[a-f0-9]{48}:[a-f0-9]{16}$',
'NetNTLMv2' : '^([^\/:*?"<>|]{1,20}\)?[^\/:*?"<>|]{1,20}[:]{2,3}([^\/:*?"<>|]{1,20}:)?[^\/:*?"<>|]{1,20}:[a-f0-9]{32}:[a-f0-9]+$',
'Kerberos 5 AS-REQ Pre-Auth' : '^\$(krb5pa|mskrb5)\$([0-9]{2})?\$.+\$[a-f0-9]{1,}$',
'SCRAM Hash' : '^\$scram\$[0-9]+\$[a-z0-9\/.]{16}\$sha-1=[a-z0-9\/.]{27},sha-256=[a-z0-9\/.]{43},sha-512=[a-z0-9\/.]{86}$',
'Redmine Project Management Web App' : '^[a-f0-9]{40}:[a-f0-9]{0,32}$',
'SAP CODVN B (BCODE)' : '^(.+)?\$[a-f0-9]{16}$',
'SAP CODVN F/G (PASSCODE)' : '^(.+)?\$[a-f0-9]{40}$',
'Juniper Netscreen/SSG(ScreenOS)' : '^(.+\$)?[a-z0-9\/.+]{30}(:.+)?$',
'EPi' : '^0x[a-f0-9]{60}\s0x[a-f0-9]{40}$',
'SMF ≥ v1.1' : '^[a-f0-9]{40}:[^*]{1,25}$',
'Woltlab Burning Board 3.x' : '^(\$wbb3\$\*1\*)?[a-f0-9]{40}[:*][a-f0-9]{40}$',
'IPMI2 RAKP HMAC-SHA1' : '^[a-f0-9]{130}(:[a-f0-9]{40})?$',
'Lastpass' : '^[a-f0-9]{32}:[0-9]+:[a-z0-9_.+-]+@[a-z0-9-]+\.[a-z0-9-.]+$',
'Cisco-ASA(MD5)' : '^[a-z0-9\/.]{16}([:$].{1,})?$',
'VNC' : '^\$vnc\$\*[a-f0-9]{32}\*[a-f0-9]{32}$',
'DNSSEC(NSEC3)' : '^[a-z0-9]{32}(:([a-z0-9-]+\.)?[a-z0-9-.]+\.[a-z]{2,7}:.+:[0-9]+)?$',
'RACF' : '^(user-.+:)?\$racf\$\*.+\*[a-f0-9]{16}$',
'NTHash(FreeBSD Variant)' : '^\$3\$\$[a-f0-9]{32}$',
'SHA-1 Crypt' : '^\$sha1\$[0-9]+\$[a-z0-9\/.]{0,64}\$[a-z0-9\/.]{28}$',
'hMailServer' : '^[a-f0-9]{70}$',
'MediaWiki' : '^[:\$][AB][:\$]([a-f0-9]{1,8}[:\$])?[a-f0-9]{32}$',
'Minecraft(xAuth)' : '^[a-f0-9]{140}$',
'PBKDF2-SHA1(Generic)' : '^\$pbkdf2(-sha1)?\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{27}$',
'PBKDF2-SHA256(Generic)' : '^\$pbkdf2-sha256\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{43}$',
'PBKDF2-SHA512(Generic)' : '^\$pbkdf2-sha512\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{86}$',
'PBKDF2(Cryptacular)' : '^\$p5k2\$[0-9]+\$[a-z0-9\/+=-]+\$[a-z0-9\/+-]{27}=$',
'PBKDF2(Dwayne Litzenberger)' : '^\$p5k2\$[0-9]+\$[a-z0-9\/.]+\$[a-z0-9\/.]{32}$',
'Fairly Secure Hashed Password' : '^{FSHP[0123]\|[0-9]+\|[0-9]+}[a-z0-9\/+=]+$',
'PHPS' : '^\$PHPS\$.+\$[a-f0-9]{32}$',
'1Password(Agile Keychain)' : '^[0-9]{4}:[a-f0-9]{16}:[a-f0-9]{2080}$',
'1Password(Cloud Keychain)' : '^[a-f0-9]{64}:[a-f0-9]{32}:[0-9]{5}:[a-f0-9]{608}$',
'IKE-PSK MD5' : '^[a-f0-9]{256}:[a-f0-9]{256}:[a-f0-9]{16}:[a-f0-9]{16}:[a-f0-9]{320}:[a-f0-9]{16}:[a-f0-9]{40}:[a-f0-9]{40}:[a-f0-9]{32}$',
'IKE-PSK SHA1' : '^[a-f0-9]{256}:[a-f0-9]{256}:[a-f0-9]{16}:[a-f0-9]{16}:[a-f0-9]{320}:[a-f0-9]{16}:[a-f0-9]{40}:[a-f0-9]{40}:[a-f0-9]{40}$',
'PeopleSoft' : '^[a-z0-9\/+]{27}=$',
'Django(DES Crypt Wrapper)' : '^crypt\$[a-f0-9]{5}\$[a-z0-9\/.]{13}$',
'Django(PBKDF2-HMAC-SHA256)' : '^(\$django\$\*1\*)?pbkdf2_sha256\$[0-9]+\$[a-z0-9]+\$[a-z0-9\/+=]{44}$',
'Django(PBKDF2-HMAC-SHA1)' : '^pbkdf2_sha1\$[0-9]+\$[a-z0-9]+\$[a-z0-9\/+=]{28}$',
'Django(bcrypt)' : '^bcrypt(\$2[axy]|\$2)\$[0-9]{2}\$[a-z0-9\/.]{53}$',
'Django(MD5)' : '^md5\$[a-f0-9]+\$[a-f0-9]{32}$',
'PBKDF2(Atlassian)' : '^\{PKCS5S2\}[a-z0-9\/+]{64}$',
'PostgreSQL MD5' : '^md5[a-f0-9]{32}$',
'Lotus Notes/Domino 8' : '^\([a-z0-9\/+]{49}\)$',
'scrypt' : '^SCRYPT:[0-9]{1,}:[0-9]{1}:[0-9]{1}:[a-z0-9:\/+=]{1,}$',
'Cisco Type 8' : '^\$8\$[a-z0-9\/.]{14}\$[a-z0-9\/.]{43}$',
'Cisco Type 9' : '^\$9\$[a-z0-9\/.]{14}\$[a-z0-9\/.]{43}$',
'Microsoft Office 2007' : '^\$office\$\*2007\*[0-9]{2}\*[0-9]{3}\*[0-9]{2}\*[a-z0-9]{32}\*[a-z0-9]{32}\*[a-z0-9]{40}$',
'Microsoft Office 2010' : '^\$office\$\*2010\*[0-9]{6}\*[0-9]{3}\*[0-9]{2}\*[a-z0-9]{32}\*[a-z0-9]{32}\*[a-z0-9]{64}$',
'Microsoft Office 2013' : '^\$office\$\*2013\*[0-9]{6}\*[0-9]{3}\*[0-9]{2}\*[a-z0-9]{32}\*[a-z0-9]{32}\*[a-z0-9]{64}$',
'Android FDE ≤ 4.3' : '^\$fde\$[0-9]{2}\$[a-f0-9]{32}\$[0-9]{2}\$[a-f0-9]{32}\$[a-f0-9]{3072}$',
'Microsoft Office ≤ 2003 (MD5+RC4)' : '^\$oldoffice\$[01]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{32}$',
'Microsoft Office ≤ 2003 (MD5+RC4) collider-mode #1' : '^\$oldoffice\$[01]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{32}$',
'Microsoft Office ≤ 2003 (MD5+RC4) collider-mode #2' : '^\$oldoffice\$[01]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{32}$',
'Microsoft Office ≤ 2003 (SHA1+RC4)' : '^\$oldoffice\$[34]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{40}$',
'Microsoft Office ≤ 2003 (SHA1+RC4) collider-mode #1' : '^\$oldoffice\$[34]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{40}$',
'Microsoft Office ≤ 2003 (SHA1+RC4) collider-mode #2' : '^\$oldoffice\$[34]\*[a-f0-9]{32}\*[a-f0-9]{32}\*[a-f0-9]{40}$',
'RAdmin v2.x' : '^(\$radmin2\$)?[a-f0-9]{32}$',
'SAP CODVN H (PWDSALTEDHASH) iSSHA-1' : '^{x-issha,\s[0-9]{4}}[a-z0-9\/+=]+$',
'CRAM-MD5' : '^\$cram_md5\$[a-z0-9\/+=-]+\$[a-z0-9\/+=-]{52}$',
'SipHash' : '^[a-f0-9]{16}:2:4:[a-f0-9]{32}$',
'Cisco Type 7' : '^[a-f0-9]{4,}$',
'BigCrypt' : '^[a-z0-9\/.]{13,}$',
'Cisco Type 4' : '^(\$cisco4\$)?[a-z0-9\/.]{43}$',
'Django(bcrypt-SHA256)' : '^bcrypt_sha256\$\$(2[axy]|2)\$[0-9]+\$[a-z0-9\/.]{53}$',
'PostgreSQL Challenge-Response Authentication (MD5)' : '^\$postgres\$.[^\*]+[*:][a-f0-9]{1,32}[*:][a-f0-9]{32}$',
'Siemens-S7' : '^\$siemens-s7\$[0-9]{1}\$[a-f0-9]{40}\$[a-f0-9]{40}$',
'Microsoft Outlook PST' : '^(\$pst\$)?[a-f0-9]{8}$',
'PBKDF2-HMAC-SHA256(PHP)' : '^sha256[:$][0-9]+[:$][a-z0-9\/+]+[:$][a-z0-9\/+]{32,128}$',
'Dahua' : '^(\$dahua\$)?[a-z0-9]{8}$',
'MySQL Challenge-Response Authentication (SHA1)' : '^\$mysqlna\$[a-f0-9]{40}[:*][a-f0-9]{40}$',
'PDF 1.4 - 1.6 (Acrobat 5 - 8)' : '^\$pdf\$[24]\*[34]\*128\*[0-9-]{1,5}\*1\*(16|32)\*[a-f0-9]{32,64}\*32\*[a-f0-9]{64}\*(8|16|32)\*[a-f0-9]{16,64}$'
}
result = {}
fh = open(source_file, 'r')
source_file_contents = fh.read()
fh.close()
for format in regex_list.keys():
hashes = []
regex = re.compile(regex_list[format])
hashes = regex.findall(source_file_contents)
if hashes:
result[format] = hashes
return result
def hashes_to_files(hashes, original_file):
for format in hashes.keys():
prefix = path.splitext(path.basename(original_file))[0]
filename = '%s-%s.txt' % (format, prefix)
with open(filename,'w') as output_file:
for found_hash in hashes[format]:
line = '%s\n' % found_hash
output_file.write(line)
def main():
extracted_hashes = {}
print(len(sys.argv))
if len(sys.argv) != 3:
print ("Missing input file.")
print ('Use: %s <filename>' % sys.argv[0])
sys.exit(1)
if not path.exists(sys.argv[1]):
print ('File %s does not exists.' % sys.argv[1])
sys.exit(1)
extracted_hashes = extract_hashes(sys.argv[1])
if extracted_hashes:
hashes_to_files(extracted_hashes, sys.argv[1])
print ('\nExtracted hashes:')
for format in extracted_hashes.keys():
print ('\t%s: %s' % (format, len(extracted_hashes[format])))
if __name__ == '__main__':
main()
|
061dc408953410b0152b5fb0baa11132a8ba240d | Hinks/beaver_coffee_manager_app | /src/cru/stock.py | 5,262 | 3.59375 | 4 | from datetime import datetime
from toolz.dicttoolz import assoc
def read(db, shop_id):
shop = fetch_shop(db, shop_id)
if shop:
return shop.get('stock_quantities')
else:
return 'error, no shop with that id exists.'
def update(db, shop_id):
date_input_str = input('Enter the date of today, format: yyyy-mm-dd: ')
date = convert_to_datetime(date_input_str)
sku_input = input('Enter product sku to update: ')
qty_input = input('Enter product qty to increment by(possitive or negative)')
update_shopstock_and_stockhistory(db, shop_id, date, sku_input, int(qty_input))
return f'Stock qty for {sku_input} has been updated by {qty_input} and logged on {date_input_str}.'
def update_shopstock_and_stockhistory(db, shop_id, date, sku, qty):
""" Update stock quantities in shop and logg that change to the stock_history collection."""
stock_history_at_date = fetch_historical_shop_stock(db, shop_id, date)
# If the logg/history document(usually the document at the current/todays date)
# for the specified date exist, then update both the stock quantity for the
# product in the current shop inventory and in "todays" (history/logg date) document.
if stock_history_at_date:
update_shop_stock(db, shop_id, sku, qty)
update_stock_history(db, shop_id, date, sku, qty)
# If the requested stock history document(usually the document at the current/todays date)
# for the shop doesn't exist, we update the current shops inventory
# and insert todays logg ducument with the correct init-stock-quantities.
# Example: if yesterdays stock qty for product with sku 1001 = 100 and
# at todays very first puchase of that product is a quantity of 5,
# the new insteted logg document's init-stock-quantity for that product has
# to be 95.
else:
last_histoty_stock_date = fetch_latest_historical_shop_stock(db, shop_id)
# If we find the latest logg document, the inventory in the shop is
# directly updated, and we use the latest logg document to calculate
# "today's stock quantities" and insert the logg document for today with the
# correct stock-init values. As the example above.
if last_histoty_stock_date:
update_shop_stock(db, shop_id, sku, qty)
old_stock = last_histoty_stock_date.get('stock_quantities')
new_stock = update_stock(old_stock, sku, qty)
insert_new_history_doc(db, shop_id, date, new_stock)
# If no logg document for the shop exist in the stock_history-collection,
# the init-stock-values for today logg document has to be fetched from
# the current shop inventory instead of a previous logg ducument before
# beeing inserted.
else:
update_shop_stock(db, shop_id, sku, qty)
shop = fetch_shop(db, shop_id)
insert_new_history_doc(db, shop_id, date, shop.get('stock_quantities'))
def fetch_shop(db, shop_id):
""" Returns the specified shop as a dictionary."""
return db.shops.find_one({'_id': shop_id})
def fetch_historical_shop_stock(db, shop_id, date):
""" Returns the stock quantities for the shop at the specified date
if the document exists.
"""
return db.stock_history.find_one({'shop_id': shop_id, 'date': date})
def fetch_latest_historical_shop_stock(db, shop_id):
""" Returns the latest stock quantities for the shop.
If no historical stock quantities exists, None will be returned.
"""
try:
cursor = db.stock_history.find({'shop_id': shop_id}).sort('date', -1).limit(1)
return cursor[0]
except IndexError as e:
print('No historical stock quantities exist for this shop.')
return None
def update_shop_stock(db, shop_id, sku, qty):
""" Updates the stock quantity for a product in the shop."""
return db.shops.update({'_id': shop_id},
{'$inc': {f'stock_quantities.{sku}': qty}}
)
def update_stock_history(db, shop_id, date, sku, qty):
""" Updates the stock quantity for a product in the shop at the specified date."""
return db.stock_history.update({'shop_id': shop_id, 'date': date},
{'$inc': {f'stock_quantities.{sku}': qty}}
)
def insert_new_history_doc(db, shop_id, date, stock):
""" Inserts a new stock_history document for the shop in the stock_history
collection. A new history document for the shop will be inserted if it doesn't exist.
This is performed at the very first buy or product stock change every day.
"""
return db.stock_history.insert_one({
'date': date,
'shop_id': shop_id,
'stock_quantities': stock
})
def update_stock(stock, sku, qty):
""" Returns an updated version of the stock, there the quantity for the specified
sku is updated.
>>> update_stock({'1001': 1000, '1002': 1250}, '1001', 50)
{'1001': 1050, '1002': 1250}
>>> update_stock({'1001': 1000, '1002': 1250}, '1001', -50)
{'1001': 950, '1002': 1250}
"""
old_sku_qty = stock.get(sku)
return assoc(stock, sku, old_sku_qty + qty)
def convert_to_datetime(date_str):
year, month, day = map(int, date_str.split('-'))
return datetime(year, month, day)
|
eb70cb40c86d3e85d4b6ef57db6d9678daa5645a | AjithKalarikal/EXERCISM | /pangram.py | 154 | 3.5625 | 4 | def is_pangram(sentence):
a = 'abcdefghijklmnopqrstuvwxys'
if len(set(a)-set(sentence.lower())) == 0:
return True
else:
return False
|
e3f5d349f45c8d01cd939727a9bbd644ddaa0bdd | changjunxia/auto_test_example1 | /test1.py | 228 | 4.125 | 4 | def is_plalindrome(string):
string = list(string)
length = len(string)
left = 0
right = length - 1
while left < right:
if string[left] != string[right]:
return False
left += 1
right -= 1
return True |
a1a7e5faad35847f22301b117952e223857d951a | nestorsgarzonc/leetcode_problems | /6.zigzag_convertion.py | 1,459 | 4.375 | 4 | """
The string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)
P A H N
A P L S I I G
Y I R
And then read line by line: "PAHNAPLSIIGYIR"
Write the code that will take a string and make this conversion given a number of rows:
string convert(string s, int numRows);
Example 1:
Input: s = "PAYPALISHIRING", numRows = 3
Output: "PAHNAPLSIIGYIR"
Example 2:
Input: s = "PAYPALISHIRING", numRows = 4
Output: "PINALSIGYAHRPI"
Explanation:
P I N
A L S I G
Y A H R
P I
"""
"""
1158 / 1158 test cases passed.
Status: Accepted
Runtime: 88 ms
Memory Usage: 13.9 MB
"""
# Solution brute force
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
arrResult = [[] for i in range(numRows)]
counter = 0
numRows -= 1
reverse = False
for i in s:
if counter == numRows:
reverse = True
if counter == 0:
reverse = False
if counter < numRows and reverse == False:
arrResult[counter].append(i)
counter += 1
if counter > 0 and reverse == True:
arrResult[counter].append(i)
counter -= 1
myResult = ''
for i in arrResult:
myResult.join(i)
return myResult
|
26a980f33d77005abdc68ba2b089b6bfa95b722e | ajboloor/python-simple | /simple-weather/simple-weather.py | 1,478 | 3.765625 | 4 |
def get_weather(location_name):
internet_status = 0
try:
import httplib
except:
import http.client as httplib
conn = httplib.HTTPConnection("www.google.com", timeout=5)
try:
conn.request("HEAD", "/")
conn.close()
internet_status = 1
except:
conn.close()
print("ERROR: Internet access not available.")
if internet_status == 1:
from weather import Weather
import matplotlib.pyplot as plt
weather = Weather(unit='c') # Change c to f for Fahrenheit
location = weather.lookup_by_location(location_name)
condition = location.condition
print "Weather in " + location.location.city + ", " + location.location.country
print "The condition is " + condition.text
print "The temperature is " + condition.temp + " C"
highs = []
lows = []
dates = []
for forecast in location.forecast:
highs.append(int(forecast.high))
lows.append(int(forecast.low))
dates.append(str(forecast.date))
# Uncomment for debugging:
# print highs
# print lows
# print dates
plt.plot(dates, highs, color='red', label='High')
plt.plot(dates, lows, color='blue', label='Low')
plt.legend()
plt.title("Weather Forecast for " + location_name)
plt.ylabel("Temperature in Celsius")
plt.show()
get_weather('St. Louis')
|
a555b29adc4683036d4b5858cd12dd3b87f29f57 | llNeeleshll/Python | /section_4/if_statement.py | 170 | 4.0625 | 4 | name = input("What is your name?")
age = int(input(f"How old are you, {name}"))
if age >= 18:
print("You can vote")
else:
print(f"Come back in {18 - age} years") |
e2349b63116bb7f3e83aa436c41175efda4a8d9d | llNeeleshll/Python | /section_3/string_play.py | 290 | 4.15625 | 4 | text = "This is awesome."
# Getting the substring
print(text[8:])
print(text[0:4])
# text[start:end:step]
print(text[0:14:2])
print(text[0::2])
# Reversing the string
print(text[::-1])
# Print a word 10 times?
print("Hello " * 10)
print("Hello " * 10 + "World!")
print("awe" in text) |
013d498ccae9199fd92b4dd4b3c45a01f6808f19 | DuncanHu/Machine-learning-practice-notes | /MachineLearning_notes/KNN/kNN.py | 1,514 | 3.5625 | 4 | '''
机器学习实战——KNN基础代码实现
'''
from numpy import *
import numpy as np
import operator
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]]) # 特征矩阵
labels = ['A', 'A', 'B', 'B'] # label向量
return group, labels
# 参数:inX:测试数据 dataSet:特征矩阵 labels:label向量 k:k个近邻
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] #获取特征矩阵的行数
# 计算欧式距离(两种方法)
# 方法一
# diffMat = tile(inX,(dataSetSize,1)) - dataSet
# sqDiffMat = diffMat**2
# sqDistances = sqDiffMat.sum(axis=1)
# distances = sqDistances ** 0.5
# 方法二
diffMat = inX - dataSet # 矩阵的广播机制
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances ** 0.5
# 对计算出的距离进行排序,以得到最近的点
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1 # get(value,default) value:返回此键的值,default:如果此键不存在返回0
# 根据字典的值对字典进行排序
sortedClassCount = sorted(classCount.items(), key = lambda item:item[1], reverse = True)
print(sortedClassCount)
return sortedClassCount[0][0]
group, labels=createDataSet()
result=classify0(np.array([[10,0]]),group,labels,3)
print(result) |
b0f6c27a6fa3c99c5612d321f34b67ba5201a22b | Jamie-Matthews-AU/MILPE | /interpreter_brainfuck/Brainfuck_Classes.py | 6,748 | 3.5 | 4 | import io
import queue
class Brainfuck:
def __init__(self, instructions, tape=None, tape_pointer=0, behavior=None, instruction_pointer=0):
"""
:type tape: list[int]
the tape of the program
:type instructions: list["str"]
queue of instructions, left to right
:type behavior: list[int]
what to fill new tape slots with, based on (1-indexed) index of tape modulo length of behavior
"""
self.instructions = instructions
self.tape = tape
self.tape_pointer = tape_pointer
self.behavior = behavior
self.instruction_pointer = instruction_pointer
self.behavior_pointer = 0
self.io_in_queue = queue.Queue(0)
self.io_out_queue = queue.Queue(0)
self.while_locations = None
def read(self):
try:
return str(self.io_out_queue.get(block=False))
except queue.Empty:
return None
def can_read(self):
return self.io_out_queue.qsize() > 0
def write(self, item: str):
for c in item:
self.io_in_queue.put(ord(c), block=False)
def append_tape(self, left_append=False):
pass
def next_instruction(self):
return True
def current_tape_val(self):
return 0
def find_one_after_matching_right_square_bracket(self):
depth = 1
ip = self.instruction_pointer # Begin at point after left square bracket
while depth > 0:
if ip >= len(self.instructions):
raise Exception("No matching right square bracket for square bracket at "
+ str(self.instruction_pointer))
instruction = self.instructions[ip]
ip += 1
if instruction == "]":
depth -= 1
elif instruction == "[":
depth += 1
return ip
class One_Dimensional_Brainfuck(Brainfuck):
def __init__(self, instructions, tape=None, tape_pointer=0, behavior=None, instruction_pointer=0):
"""
:type tape: list[int]
the tape of the program
:type instructions: list["str"]
queue of instructions, left to right
:type behavior: list[int]
what to fill new tape slots with, based on (1-indexed) index of tape modulo length of behavior
"""
self.instructions = instructions
if behavior is None:
self.behavior = [0]
else:
self.behavior = behavior
self.behavior_pointer = 0 # Behavior pointer is the index in behavior list which corresponds to index 0 in tape
if tape is None:
self.tape = [self.behavior[0]]
else:
self.tape = tape
self.tape_pointer = tape_pointer
self.io_in_queue = queue.Queue(0)
self.io_out_queue = queue.Queue(0)
self.while_locations = []
self.instruction_pointer = instruction_pointer
# ENFORCE pointer must begin within tape by extending tape
while self.tape_pointer > len(self.tape):
self.append_tape()
while self.tape_pointer < 0:
self.append_tape(True)
self.tape_pointer += 1
def read(self):
try:
return str(self.io_out_queue.get(block=False))
except queue.Empty:
return None
def can_read(self):
return self.io_out_queue.qsize() > 0
def write(self, item: str):
for c in item:
self.io_in_queue.put(ord(c), block=False)
def append_tape(self, left_append=False):
if left_append:
self.behavior_pointer = self.behavior_pointer - 1
next_item = self.behavior[self.behavior_pointer]
self.tape.insert(0, next_item)
if len(self.behavior) + self.behavior_pointer == 0:
self.behavior_pointer = 0 # Wraparound has occurred
else:
ind = (len(self.tape) % len(self.behavior)) - self.behavior_pointer
self.tape.append(self.behavior[ind])
def next_instruction(self):
if self.instruction_pointer >= len(self.instructions):
return True # Finished = True
instruction = self.instructions[self.instruction_pointer]
self.instruction_pointer += 1
if instruction == "<":
if self.tape_pointer == 0:
self.append_tape(True)
else:
self.tape_pointer -= 1
elif instruction == ">":
if self.tape_pointer == len(self.tape) - 1:
self.append_tape()
self.tape_pointer += 1
elif instruction == "+":
self.tape[self.tape_pointer] = (self.current_tape_val() + 1) % 256
elif instruction == "-":
self.tape[self.tape_pointer] = (self.current_tape_val() - 1) % 256
elif instruction == ".":
self.io_out_queue.put(chr(self.current_tape_val()))
elif instruction == ",":
if self.io_in_queue.qsize() > 0:
read_in = self.io_in_queue.get(block=False)
self.tape[self.tape_pointer] = read_in
else:
raise BufferError("No items to read in")
elif instruction == "[":
if self.current_tape_val() == 0:
matching = self.find_one_after_matching_right_square_bracket()
self.instruction_pointer = matching
else:
self.while_locations.append(self.instruction_pointer - 1)
elif instruction == "]":
if len(self.while_locations) > 0:
back = self.while_locations.pop()
if self.current_tape_val() != 0:
self.instruction_pointer = back
else:
raise Exception("Attempted to break from while loop that does not have entry point at "
+ str(self.instruction_pointer))
if self.instruction_pointer >= len(self.instructions):
return True
else:
return False
def current_tape_val(self):
return self.tape[self.tape_pointer]
def find_one_after_matching_right_square_bracket(self):
depth = 1
ip = self.instruction_pointer # Begin at point after left square bracket
while depth > 0:
if ip >= len(self.instructions):
raise Exception("No matching right square bracket for square bracket at "
+ str(self.instruction_pointer))
instruction = self.instructions[ip]
ip += 1
if instruction == "]":
depth -= 1
elif instruction == "[":
depth += 1
return ip |
21f46522996d73fbe2665ee02ad39ddae390cebb | omeinsotelo/holbertonschool-higher_level_programming | /0x0A-python-inheritance/4-inherits_from.py | 221 | 3.625 | 4 | #!/usr/bin/python3
def inherits_from(obj, a_class):
""" Function that returns True if the object is an instance
of a class that inherited
"""
return not (type(obj) == a_class) and isinstance(obj, a_class)
|
45af3c549687103a8990f13431304a62dbd1e2e4 | omeinsotelo/holbertonschool-higher_level_programming | /0x07-python-test_driven_development/tests/6-max_integer_test.py | 1,079 | 3.78125 | 4 | #!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
def testing_max_integer(self):
"""
testing with assetEquals the max_integer function
self: object
"""
self.assertEqual(max_integer([]), None)
self.assertEqual(max_integer([1, 2, 3, 4, 5, 6, 7, 8]), 8)
self.assertEqual(max_integer([-80, 78, 500, 230]), 500)
self.assertEqual(max_integer([-3300, 7]), 7)
self.assertEqual(max_integer([0, 0, 0, 0]), 0)
self.assertEqual(max_integer([-3, -10, -9, -20, -8]), -3)
self.assertEqual(max_integer([9]), 9)
self.assertEqual(max_integer([-5]), -5)
self.assertEqual(max_integer([-7.5, -9.3]), -7.5)
self.assertEqual(max_integer([2.6, 2.1, 9.4, 1.4]), 9.4)
self.assertEqual(max_integer([1.1, 1.2, 1.3, float(1)]), float(1.3))
self.assertEqual(max_integer([1.1, 1, 2.2, 2]), 2.2)
self.assertEqual(max_integer([3/2, 1, 0.4, 1-0]), 3/2)
|
8d2a90308ce64f945bf74ae562a32596722b4fb1 | omeinsotelo/holbertonschool-higher_level_programming | /0x01-python-if_else_loops_functions/8-uppercase.py | 229 | 3.953125 | 4 | #!/usr/bin/python3
def uppercase(str):
for i in range(0, len(str)):
upp = ord(str[i])
print("{:c}".format(upp - 32 if upp > 95 else upp), end="")
print("{:s}".format("\n"), end="")
|
4a2238f4157019e743a17d11394c038650ac306e | ragona/cryptopals_python3 | /c36.py | 3,174 | 3.5 | 4 | from pals.srp import SRPClient, SRPServer, SRPSession
I = 'me@here.com'
P = 'p@$$W0RD'
#create client and server
server = SRPServer()
client = SRPClient(I, P)
#create a user on the server
server.add_user(I, P)
#create a session
session = SRPSession(client, server)
#do the initial handshake
session.handshake()
#validate the session
session.validate()
'''
SIMPLIFIED:
0. create user on server
1. start session - public ephemeral key exchange
2. validate session key
Implement Secure Remote Password (SRP)
To understand SRP, look at how you generate an AES key from DH;
now, just observe you can do the "opposite" operation an generate
a numeric parameter from a hash. Then:
Replace A and B with C and S (client & server)
C & S
INITIAL CONSTANTS
Agree on N=[NIST Prime], g=2, k=3, I (email), P (password)
GENERATE SALT AND VALIDATOR FOR USER
S
Generate salt as random integer
Generate string xH=SHA256(salt|password)
Convert xH to integer x somehow (put 0x on hexdigest)
Generate v=g**x % N
Save everything but x, xH
CLIENT SEND EMAIL AND EPHEMERAL KEY
C->S
Send I, A=g**a % N (a la Diffie Hellman)
SERVER SEND SALT AND EPHEMERAL KEY
S->C
Send salt, B=kv + g**b % N
BOTH GENERATE SCRAMBLING PARAMETER
S, C
Compute string uH = SHA256(A|B), u = integer of uH
CLIENT GENERATES SESSION KEY
C
Generate string xH=SHA256(salt|password)
Convert xH to integer x somehow (put 0x on hexdigest)
Generate S = (B - k * g**x)**(a + u * x) % N
Generate K = SHA256(S)
SERVER GENERATES SESSION KEY
S
Generate S = (A * v**u) ** b % N
Generate K = SHA256(S)
CLIENT SENDS PROOF OF SESSION KEY
C->S
Send HMAC-SHA256(K, salt)
SERVER VALIDATES SESSION KEY
S->C
Send "OK" if HMAC-SHA256(K, salt) validates
You're going to want to do this at a REPL of some sort; it may
take a couple tries.
It doesn't matter how you go from integer to string or string to
integer (where things are going in or out of SHA256) as long as
you do it consistently. I tested by using the ASCII decimal
representation of integers as input to SHA256, and by converting
the hexdigest to an integer when processing its output.
This is basically Diffie Hellman with a tweak of mixing the password
into the public keys. The server also takes an extra step to avoid
storing an easily crackable password-equivalent.
'''
'''
Carol Steve
1. C --> (lookup s, v)
2. x = H(s, P) <-- s
3. A = g^a A -->
4. <-- B, u B = v + g^b
5. S = (B - g^x)^(a + ux) S = (A · v^u)^b
6. K = H(S) K = H(S)
7. M[1] = H(A, B, K) M[1] --> (verify M[1])
8. (verify M[2]) <-- M[2] M[2] = H(A, M[1], K)
Table 4: The Secure Remote Password Protocol
n A large prime number. All computations are performed modulo n.
g A primitive root modulo n (often called a generator)
s A random string used as the user's salt
P The user's password
x A private key derived from the password and salt
v The host's password verifier
u Random scrambling parameter, publicly revealed
a,b Ephemeral private keys, generated randomly and not publicly revealed
A,B Corresponding public keys
H() One-way hash function
m,n The two quantities (strings) m and n concatenated
K Session key
'''
|
2ed5d0bd93ebf60d5eb1944a5b5a777865370fa3 | ragona/cryptopals_python3 | /c40.py | 2,530 | 3.8125 | 4 | '''
why in god's name does this work
I gotta go back to math class
what is a modular inverse even
I need to go redo c39 and focus on egcd + invmod
edit 6/12:
Oh, this works because with an exponent of 3 RSA
is just encryption is just cubing a number mod
the public encryption modulus:
c = m ** 3 % n
... and then some CRT shit happens and I don't
quite get it again.
'''
from Crypto.Util.number import inverse
from pals.RSA import RSA
from pals.utils import cbrt, int_to_bytes
msg = b'some other secret message'
#three separate key pairs, store the public key
#in theory only the public key would be available
pub_0 = RSA.generate_keys(1024, 3)[0]
pub_1 = RSA.generate_keys(1024, 3)[0]
pub_2 = RSA.generate_keys(1024, 3)[0]
#the ciphertexts
c_0 = RSA.encrypt(msg, pub_0)
c_1 = RSA.encrypt(msg, pub_1)
c_2 = RSA.encrypt(msg, pub_2)
#the n values from each key
n_0 = pub_0[1]
n_1 = pub_1[1]
n_2 = pub_2[1]
#okay I don't understand the part below here, and I assume this
#is the chinese remainder theorem part.
m_s_0 = n_1 * n_2
m_s_1 = n_0 * n_2
m_s_2 = n_0 * n_1
N = n_0 * n_1 * n_2
r_0 = c_0 * m_s_0 * inverse(m_s_0, n_0)
r_1 = c_1 * m_s_1 * inverse(m_s_1, n_1)
r_2 = c_2 * m_s_2 * inverse(m_s_2, n_2)
result = (r_0 + r_1 + r_2) % N
print(int_to_bytes(cbrt(result)))
'''
Implement an E=3 RSA Broadcast attack
Assume you're a Javascript programmer. That is, you're using a naive
handrolled RSA to encrypt without padding.
Assume you can be coerced into encrypting the same plaintext three
times, under three different public keys. You can; it's happened.
Then an attacker can trivially decrypt your message, by:
Capturing any 3 of the ciphertexts and their corresponding pubkeys
Using the CRT to solve for the number represented by the three
ciphertexts (which are residues mod their respective pubkeys)
Taking the cube root of the resulting number
The CRT says you can take any number and represent it as the combination
of a series of residues mod a series of moduli. In the three-residue
case, you have:
result =
(c_0 * m_s_0 * invmod(m_s_0, n_0)) +
(c_1 * m_s_1 * invmod(m_s_1, n_1)) +
(c_2 * m_s_2 * invmod(m_s_2, n_2)) mod N_012
where:
c_0, c_1, c_2 are the three respective residues mod
n_0, n_1, n_2
m_s_n (for n in 0, 1, 2) are the product of the moduli
EXCEPT n_n --- ie, m_s_1 is n_0 * n_2
N_012 is the product of all three moduli
To decrypt RSA using a simple cube root, leave off the final modulus
operation; just take the raw accumulated result and cube-root it.
''' |
db9ba254ca8d1f467239ce834c65fc6e5f31946d | vigneshsadasivam/player | /83.py | 117 | 3.671875 | 4 | n=int(input("enter n"))
a=[]
for i in range(0,n):
b=int(input())
a.append(b)
s=a[0]
for i in a:
s=s|i
print(s)
|
20c1c6d822336fab9a8b7c36f326ecbbd8b7d016 | vigneshsadasivam/player | /74.py | 104 | 3.796875 | 4 |
n=input("Enter the n:")
for i in n:
if(n.count(i)>1):
print("yes")
break
else:
print("no")
|
9f084c12bb463631ecdf20e2a97a4ec33595ea82 | Vadim91200/BlackJack | /ISN - Black Jack.py | 5,234 | 3.59375 | 4 | # -*- coding: utf-8 -*-
from random import *
jeu = [11, 11, 11, 11]
#Création de la liste jeu
def ajout_carte(main):
a = choice(jeu) #On sélectionne aléatoirement une carte dans la liste 'jeu'
main.append(a) #On ajoute cette carte a la liste 'main'
jeu.remove(a) #On retire cette carte de la liste 'jeu' pour qu'elle ne soit pas retiré
return main #On retourne la liste 'main'
def tirage():
cartes = [] #On crée la liste vide 'cartes'
for i in range(2):#On répète deux fois le tirage
cartes.append(ajout_carte([]))#On affecte le résultat de ce tirage dans la lise 'cartes'
print(cartes)
if cartes[0] == 11 and cartes[1] == 11:#On vérifie si les deux cartes de la liste sont des 11
cartes[0] = 1#si c'est le cas on remplace le premier 11 par 1
return cartes #on retourne la liste 'cartes'
def total(main):
somme = sum(main) #On fait la somme de la liste 'main'
if somme > 21 and main.count(11)!=0: #On teste si la somme est supérieur à 21 et que la liste contient un 11
main[main.index(11)] = 1 #Si c'est le cas on remplace le 11 par 1
somme = sum(main) #On refait la somme de la liste modifié
return somme # On retourne la somme
def jouer():
print("Bonjour et bienvenue au jeu de Black Jack!")
k = input("Souhaitez vous débuter une partie? OUI ou NON:")
while k != "OUI":
k = input("Souhaitez vous débuter une partie? OUI ou NON:")
if k == "OUI":
x = tirage()# La variable 'x' contiendra les deux cartes du joueur
y = tirage()#La variable 'y' contiendra les deux cartes de l'ordinateur
j = []#On crée la liste 'j' qui contiendra les cartes du joueur
o = []#On crée la liste 'o' qui contiendra les cartes de l'ordinateur
t = 0 #On crée la variable 't' qui contiendra le total des cartes du joueur
d = 0#On crée la variable 'd' qui contiendra le total des cartes de l'ordinateur
for k in y:
o.append(k[0])#On transforme la liste de liste 'y' en liste simple 'o' qui contient les cartes de l'ordinateur
for k in x:
j.append(k[0])#On transforme la liste de liste 'x' en liste simple 'j' qui contient les cartes du joueur
d = total(o) #On fait le total des cartes de l'ordinateur
t = total(j)#On fait le total des cartes du joueur
print("Vos cartes sont", j, "Le total est de:",t)#On affiche les cartes du joueur et leurs total
print("La première cartes de l'ordinateur est", o[0])#On affiche la première carte de l'ordinateur
s = input("Souhaitez vous une nouvelle carte? Tapez N pour une nouvelle carte; P pour ne pas avoir de nouvelle carte:")
#On demande au joueur s'il souhaite une nouvelle carte
while s == "N":
x.append(ajout_carte([]))#On ajoute une nouvelle carte à la liste de liste 'x'
a = x.pop()#On isole la liste contenant la nouvelle carte
for i in a:
v = i #La variable 'v' contient la nouvelle carte sous forme d'int
j.append(v)#On ajoute la nouvelle carte sous forme d'int a la liste simple 'j'
print("Vos cartes sont", j)#On affiche la nouvelle main du joueur
t = total(j)#On calcule le nouveau total
if t > 21:#Si le total est supérieur à 21
print("Vous avez perdu la partie, votre score total est de: ", t)
#On affiche que le joueur a perdu car il a dépassé 21
s=="P"#On arrête la boucle while
else:
print("Votre score total est de:", t)#On affiche le total des cartes du joueur
s = input("Voulez vous une carte supplémentaire ?")#On lui demande s'il en veut une encore
while d < 17: #Si le total des cartes de l'ordinateur est inférieur à 17
y.append(ajout_carte([])) #On ajoute une nouvelle carte à la liste de liste y
a=y.pop()#On isole la liste contenant la nouvelle carte
for i in a:
v=i #La variable 'v' contient la nouvelle carte sous forme d'int
o.append(v)#On ajoute la nouvelle carte sous forme d'int à la liste simple 'o'
d=total(o)#On calcule le nouveau total
if d>21:#Si le total des cartes de l'ordinateur est supérieur à 21
print("L'ordinateur a dépassé 21 vous avez gagné")#On affiche que l'ordinateur a perdu car il a dépassé 21
elif d<t:#si le total des cartes de l'ordinateur est inférieur au total des cartes du joueur
print("Vous avez gagné la partie, votre score est de:", t ,"Le score de l'ordinateur est de:",d,".")
#On affiche que le le joueur a gagné
elif t<d:#si le total des cartes de l'ordinateur est supérieur au total des cartes du joueur
print("Vous avez perdu, votre score est de:",t ,"Le score de l'ordinateur est de:",d,".")
#On affiche que l'ordinateur a gagné
elif t==d:#si le total des cartes de l'ordinateur est égal au total des cartes du joueur
print("Il y a égalité")#On affiche qu'il y a égalité
jouer()
|
a90b95f96d4da05f81a5277941dcfcebfc006bdf | GooseHuang/Udacity-Data-Structures-Algorithms-Nanodegree-Program | /P2 Problems vs. Algorithms/problem_6.py | 894 | 4.03125 | 4 | def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if not ints:
return (None,None)
min_number = ints[0]
max_number = ints[0]
for x in ints[1:]:
if x > max_number:
max_number = x
if x < min_number:
min_number = x
return (min_number,max_number)
## Example Test Case of Ten Integers
import random
l = [i for i in range(0, 10)] # a list containing 0 - 9
random.shuffle(l)
print ("Pass" if ((0, 9) == get_min_max(l)) else "Fail")
print ("Pass" if ((None, None) == get_min_max([])) else "Fail")
print ("Pass" if ((0, 0) == get_min_max([0])) else "Fail")
print ("Pass" if ((1, 1) == get_min_max([1])) else "Fail")
print ("Pass" if ((0,0) == get_min_max([0,0,0,0,0,0,0,0,0,0,0,0])) else "Fail") |
6dd2295e2778737e6139619527dc0135893b880b | alinadjar/SparkRepo | /Chapter-9/Chapter-9.py | 5,822 | 4.125 | 4 | # Code for Chapter 9
# Example 9-5
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# Example 9-8
from pyspark.sql import Row, SQLContext
config = SparkConf().setAppName("My Spark SQL app")
sc = SparkContext(config)
sqlContext = SQLContext(sc)
rowsRDD = sc.textFile("path/to/employees.csv")
dataFileSplitIntoColumns = rowsRDD.map(lambda l: l.split(","))
Records = dataFileSplitIntoColumns.map(lambda cols:
Row(name=cols[0], age=cols[1], gender=cols[2]))
#Creating a DataFrame
df= sqlContext.createDataFrame(Records)
#Perform usual DataFrame operations
df.show(5)
# Example 9-9
from pyspark.sql import Row
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
# Example 9-11
#Reading a JSON file as a DataFrame
from pyspark.sql import SparkSession
callDetailsDF = spark.read.json("/home/spark/sampledata/json/cdrs.json")
# Write the DataFrame out as a Parquet File
callDetailsDF.write.parquet("cdrs.parquet")
# Loading the Parquet File as a DataFrame
callDetailsParquetDF = spark.read.parquet("cdrs.parquet")
# Standard DataFrame data manipulation
callDetailsParquetDF.createOrReplaceTempView("calldetails")
topCallLocsDF = spark.sql("select Origin,Dest, count(*) as cnt from
calldetails group by Origin,Dest order by cnt desc")
callDetailsParquetDF.filter(“OriginatingNum = 797303107 ”).agg({"CallCharge":"sum"}).show()
# Example 9-16
# Creating a Spark session with hive Support
customSparkSession = SparkSession.builder \
.appName("Ptyhon Sparl SQL and Hive Integration ") \
.config("spark.sql.warehouse.dir","spark-warehouse") \
.enableHiveSupport() \
.getOrCreate()
# Creating a Table
customSparkSession.sql("CREATE TABLE IF NOT EXISTS cdrs
(callingNumber STRING, calledNumber String, origin String, Dest
String,CallDtTm String, callCharge Int)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ")
# Loading Data into Hive CDRs table
customSparkSession. sql("LOAD DATA LOCAL INPATH '/home/spark/sampledata/cdrs.csv' INTO table cdrs")
# Viewing top 5 Origin destination Pairs
customSparkSession. sql(" SELECT origin, dest, count(*) as cnt from cdrs
group by origin, dest order by cnt desc LIMIT 5").show()
# Example 9-19
from pyspark.sql.types import *
sc = spark.sparkContext
schemaString = "name age gender"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
userSchema = StructType(fields)
userDF = spark.read.schema(userSchema).json("path/to/user.json")
userDF.createOrReplaceTempView("user")
cntByGenderDF = spark.sql("SELECT gender, count(1) as cnt FROM user GROUP BY gender ORDER BY cnt")
# Example 9-21
// save a DataFrame in JSON format
customerDF.write
.format("org.apache.spark.sql.json")
.save("path/to/output-directory")
// save a DataFrame in Parquet format
homeDF.write
.format("org.apache.spark.sql.parquet")
.partitionBy("city")
.save("path/to/output-directory")
// save a DataFrame in ORC file format
homeDF.write
.format("orc")
.partitionBy("city")
.save("path/to/output-directory")
// save a DataFrame as a Postgres database table
df.write
.format("org.apache.spark.sql.jdbc")
.options(Map("url" -> "jdbc:postgresql://host:port/database?user=<USER>&password=<PASS>",
"dbtable" -> "schema-name.table-name")).save()
// save a DataFrame to a Hive table
df.write.saveAsTable("hive-table-name")
df = spark.read.load("examples/src/main/resources/people.json", format="json")
df.select("name", "age").write.save("namesAndAges.parquet", format="parquet")
df = spark.read.load("examples/src/main/resources/people.csv",
format="csv", sep=":", inferSchema="true", header="true")
# Example 9-25
jdbcDF.write \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.save()
jdbcDF2.write \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Example 9-28
df = sqlContext.read.json("temperatures.json")
df.registerTempTable("citytemps")
# Register the UDF with our SQLContext
sqlContext.registerFunction("CTOF", lambda degreesCelsius: ((degreesCelsius * 9.0 / 5.0) + 32.0))
sqlContext.sql("SELECT city, CTOF(avgLow) AS avgLowF, CTOF(avgHigh) AS avgHighF FROM citytemps").show()
|
2690856645451099474cbed49d688a0fecd653f4 | KaviyaMadheswaran/laser | /infytq prev question.py | 408 | 4.15625 | 4 | Ex 20)
1:special string reverse
Input Format:
b@rd
output Format:
d@rb
Explanation:
We should reverse the alphabets of the string by
keeping the special characters in the same position
s=input()
alp=[]
#index of char
ind=[]
for i in range(0,len(s)):
if(s[i].isalpha()):
alp.append(s[i])
else:
ind.append(i)
rev=alp[::-1]
for i in ind:
#character value in s
char=s[i]
rev.insert(i,char)
print(rev)
|
50e3bc5493956708bf1897a74600cd9639777bf8 | KaviyaMadheswaran/laser | /w3 resource.py | 403 | 4.1875 | 4 | Write a Python program to split a given list into two parts where the length of the first part of the list is given. Go to the editor
Original list:
[1, 1, 2, 3, 4, 4, 5, 1]
Length of the first part of the list: 3
Splited the said list into two parts:
([1, 1, 2], [3, 4, 4, 5, 1])
n=int(input())
l=list(map(int,input().split()))
ans=[]
l1=l[:n]
l2=l[n:]
ans.append(l1)
ans.append(l2)
print(tuple(ans))
|
fbcf1c36b34a8565e0153b403fbcb185782890ba | KaviyaMadheswaran/laser | /w3 resource6.py | 392 | 4.15625 | 4 | Write a Python program to flatten a given nested list structure. Go to the editor
Original list: [0, 10, [20, 30], 40, 50, [60, 70, 80], [90, 100, 110, 120]]
Flatten list:
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]
l=[0, 10, [20, 30], 40, 50, [60, 70, 80], [90, 100, 110, 120]]
l1=[]
for i in l:
if(isinstance(i,list)):
for j in i:
l1.append(j)
else:
l1.append(i)
print(l1)
|
5539d4170fa7ecc1a9a97ba4aa3ed2f23650bd1a | KaviyaMadheswaran/laser | /Birthday Cake candle(Hackerrank).py | 490 | 4.125 | 4 | Output Format
Return the number of candles that can be blown out on a new line.
Sample Input 0
4
3 2 1 3
Sample Output 0
2
Explanation 0
We have one candle of height 1, one candle of height 2, and two candles of height 3. Your niece only blows out the tallest candles,
meaning the candles where height = 3. Because there are 2 such candles, we print 2 on a new line.
n=int(input())
l=list(map(int,input().split()))
maxi=max(l)
c=0;
for i in l:
if(i==maxi):
c+=1
print(c)
|
5f8af1379485f30152ed43f8c180373a4e9982c5 | KaviyaMadheswaran/laser | /Fibonacci.py | 92 | 3.78125 | 4 | n=int(input("enter num"))
a=0
b=1
for i in range(1,n+1):
print(a)
s=a+b
a=b
b=s
|
f21306c8b839de0d69c1e259b37b8026bfc79e8c | andreagibb93/MSc | /python/LibrarySystems/library_member.py | 1,901 | 3.765625 | 4 | class LibraryMember:
def __init__(self, name, user_id, messages):
self.name = name
self.user_id = user_id
self.borrowed = []
self.messages = messages
# This class represents a library member
def get_name(self):
return self.name
# Returns the library member name
def set_name(self, name):
self.name = name
# Sets name to given value
def get_user_id(self):
return self.user_id
# Returns the library member's user id
def set_user_id(self, user_id):
self.user_id = user_id
# Sets name to user id value
def get_borrowed(self):
return self.borrowed
# Returns the books borrowed by library member
def set_borrowed(self, borrowed):
self.borrowed = borrowed
# Sets books borrowed
def get_messages(self):
return self.messages
# Returns the messages received
def set_messages(self, messages):
self.messages += messages
# Sets messages received
def print_user_details(self):
print("The library member name: " + str(self.name))
print("The library user ID is: " + str(self.user_id))
print("The library member book(s) borrowed: " + str(self.borrowed))
print("Messages received: " + str(self.messages))
# Prints the details of library member
def print_messages(self):
print(self.messages)
# Prints all the messages received by library member
def add_book(self, book):
self.borrowed.append(book)
# Adds a borrowed book the list of borrowed books for the library member
def print_books(self):
for book in self.borrowed:
book.print_book_details()
# Prints all the details of books currently borrowed
def get_number_borrowed(self):
return len(self.borrowed)
# Returns the number of borrowed books by a library member
|
229f0b4c27d60e5e4ef6f542f449d82a2f0062a2 | andreagibb93/MSc | /python/pairsTask.py | 1,199 | 3.765625 | 4 | class Assignment:
def _init_(self, assignment1, assignment2):
self.assignment = assignment1
self.assignment = assignment2
class Module:
def __init__(self, module1, module2, module3):
self.module1 = module1
self.module2 = module2
self.module3 = module3
class Student:
def _init_(self, name, stNo, maths, english, computing):
self.name = name
self.stNo = stNo
self.maths = maths
self.english = english
self.computing = computing
def setName(self, name):
self.name = name
def setStudentNo(self, stNo):
self.stNo = stNo
def setMaths(self, module1):
self.maths = module1
def setEnglish(self, module2):
self.english = module2
def setComputing(self, module3):
self.computing = module3
def getName(self):
return self.name
def getStudentNo(self):
return self.stNo
def getMaths(self):
return self.maths
def getEnglish(self):
return self.english
def getComputing(self):
return self.computing
def test():
m = Module(10)
st = Student("andrea", "1234", m)
|
81d89825a99c76cf2fc1e2d6a2c29846a8081e55 | andreagibb93/MSc | /python/LibrarySystems/book.py | 1,755 | 4.03125 | 4 | class Book:
def __init__(self, isbn, library_member, book_damages, title, author):
self.isbn = isbn
self.library_member = library_member
self.book_damages = book_damages
self.title = title
self.author = author
# This class represents a book
def get_isbn(self):
return self.isbn
# Returns the book isbn
def set_isbn(self, isbn):
self.isbn = isbn
# Sets isbn to given value
def get_library_member(self):
return self.library_member
# Returns the library member
def set_library_member(self, library_member):
self.library_member = library_member
# Sets the library member to the given member
def get_damages(self):
return self.book_damages
# Return the book damages
def set_damages(self, damages):
self.book_damages += damages
# Sets the damages and appends to list of damages
def get_title(self):
return self.title
# Returns the book title
def set_title(self, title):
self.title = title
# Sets title to given name
def get_author(self):
return self.author
# Returns the book author
def set_author(self, author):
self.author = author
# Sets author to given name
def check_availability(self):
if self.library_member is None:
return True
else:
return False
# Returns whether or not a book is available
def print_book_details(self):
print("ISBN: " + self.isbn)
print("Damages " + self.book_damages)
if self.library_member is not None:
print("Borrowed by: " + self.library_member)
# Prints all the details of a book (isbn, damage, library member)
|
ff8506ea8c03e47441af4d47771104d1b9ddf850 | adrianyi/FacialFilter | /util.py | 6,990 | 3.890625 | 4 | import numpy as np
import cv2 # OpenCV 3
#from PIL import Image
def load_image(image_path):
'''
Load image from file
Load image file from file_dir, returns color_image and gray_image
Args:
image_path: A string of a path to an image
(i.e. '/example_images/test_image_1.jpg')
Returns:
(color_image, color_gray)
color_BGR: class numpy.ndarray with dtype 'uint8' and shape (h, w, 3)
3 is for 3 BGR colors, each pixel is in range [0,255]
color_gray: class numpy.ndarray with dtype 'uint8' and shape (h, w)
each pixel is in range [0,255]
'''
image_BGR = cv2.imread(image_path)
image_gray = cv2.cvtColor(image_BGR, cv2.COLOR_BGR2GRAY)
return image_BGR, image_gray
def detect_cascade(image_gray, cascadeClassifier,
scaleFactor = 1.2,
minNeighbors = 6):
'''
Detects objects in an image
Detects cascade objects in the image_gray, and returns (x,y,w,h)
for all the objects detected.
Args:
image_gray: 2-dimensional numpy.ndarray
cascade: string of a path to a cascade architecture
scaleFactor (optional, default 1.2): float (>1) used for scaling factor
in the cascade detection
minNeighbors (optional, default 6): integer
For good explanation of scaleFactor and minNeighbors, see
http://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_Image_Object_Detection_Face_Detection_Haar_Cascade_Classifiers.php
Returns:
detected: list of (x,y,w,h) for all detected objects
x,y are position coordinates
w,h are width (in x-direction) and height (in y-direction)
'''
detected = cascadeClassifier.detectMultiScale(image_gray, scaleFactor, minNeighbors)
return detected
def detect_facial_features(image_gray, faces, model):
'''
Args:
image_gray: 2-dimensional np.ndarray
faces: list of (x,y,w,h)
model: Keras model (NOT directory)
Returns:
features_list: list of (f1x,f1y,f2x,f2y,...,f15x,f15y)
'''
faces_array = np.zeros(((len(faces),96,96,1)))
for i, (x,y,w,h) in enumerate(faces):
face = image_gray[y:y+h,x:x+w]
face = cv2.resize(face/255, (96,96))
face = np.reshape(face, (96,96,1))
faces_array[i] = face
features_list = model.predict(faces_array)
return features_list
def add_sunglasses(image_BGR, faces, list_facialFeatures, sg_image='images/sunglasses.png'):
'''
Overlays sunglasses on faces
Args:
sg_image: string path to a sunglasses image
(4-channel, where 4th dim is opacity)
'''
image_sg = np.copy(image_BGR)
sunglasses = cv2.imread(sg_image, cv2.IMREAD_UNCHANGED)/255.
for face, facialFeatures in zip(faces,list_facialFeatures):
x, y, w, h = face
transform = get_sunglasses_transform(face, facialFeatures)
sg = cv2.warpAffine(sunglasses, transform, (w,h))
mask = sg[:,:,[3,3,3]]
image_sg[y:y+h,x:x+w] = np.multiply(sg[:,:,:3],mask) + \
np.multiply(image_sg[y:y+h,x:x+w],1-mask)
# (xmax, xmin, ymax, ymin) = extent_sunglasses(face, facialFeatures)
# sg = cv2.resize(sunglasses, (xmax-xmin,ymax-ymin))
# mask = sg[:,:,[3,3,3]]
# image_sg[ymin:ymax,xmin:xmax] = np.multiply(sg[:,:,:3],mask) + \
# np.multiply(image_sg[ymin:ymax,xmin:xmax],mask_inv)
return image_sg
def get_sunglasses_transform(face, facialFeatures):
'''
Returns the affine transformation matrix for the sunglasses image
'''
left_eye = (facialFeatures[6:8] + 1.0) * face[2:] / 2.0
right_eye = (facialFeatures[10:12] + 1.0) * face[2:] / 2.0
nose = (facialFeatures[20:22] + 1.0) * face[2:] / 2.0
src = np.float32([[37, 30], [147, 30], [92, 65]])
dst = np.float32([left_eye, right_eye, nose])
return cv2.getAffineTransform(src,dst)
def extent_sunglasses(face, facialFeatures):
'''
Calculates extent (xmax, xmin, ymax, ymin) for sunglasses given
a face and its facialFeatures
'''
# Right eyebrow (18, 19)
# Left eyebrow (14, 15)
# Outer point of right eye (6, 7)
# Outer point of left eye (10, 11)
# Tip of the nose (20, 21)
x,y,w,h = face
brow_rx, brow_lx, eye_rx, eye_lx = \
(1.3*facialFeatures[[18,14,6,10]] + 1) * w/2 + x
brow_ry, brow_ly, eye_ry, eye_ly, nose_y = \
(facialFeatures[[19,15,7,11,21]] + 1) * h/2 + y
xmin = np.int(min(brow_rx, eye_rx))
xmax = np.int(max(brow_lx, eye_lx))
ymin = np.int(min(brow_ly,brow_ry))
ymax = np.int(nose_y)
return (xmax, xmin, ymax, ymin)
def drawRects(image_BGR, objList, color_BGR=(0,0,255), thickness=3):
'''
Draws rectangles for all objects
Given list of coordinates (x,y,w,h) in objList, draws rectangles with
vertices (x,y), (x+w,y), (x,y+h), (x+w,y+h).
Args:
image_BGR: 3-dimensional numpy.ndarray (BGR is OpenCV's default)
objList: list of (x,y,w,h) for all objects to draw
color_BGR (optional, default (0,0,255): BGR color, tuple of 3 uint8
(i.e. (0,0,255) is red)
thickness (optional, default 3): pixel thickness for lines
Returns:
image_with_rects: 3-dimensional numpy.ndarray (BGR)
with all the rectangles drawn in
'''
image_with_rects = np.copy(image_BGR)
for x, y, w, h in objList:
cv2.rectangle(image_with_rects, (x,y), (x+w,y+h), color_BGR, thickness)
return image_with_rects
def plot_features(image_ndarray, coords, color_BGR=[0,255,0], thickness=2):
'''
Draws a dot for all coords
Given list of coordinates (x,y) in coords, draws circles with
center (x,y) and radius = (thickness+1)//2.
Args:
image_ndarray: numpy.ndarray (grayscale or BGR)
coords: list of (x,y) for all facial features to draw
color_BGR (optional, default (0,255,0): BGR color, tuple of 3 uint8
(i.e. (0,0,255) is red)
thickness (optional, default 3): pixel thickness for lines
Returns:
image_with_rects: 3-dimensional numpy.ndarray (BGR)
with all the rectangles drawn in
'''
image = np.copy(image_ndarray)
w,h = image.shape[:2]
if len(image.shape) == 2:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif len(image.shape) == 3:
if image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif image.shape[2] == 3:
pass
else:
raise TypeError("Input must be either a grayscale or BGR image")
#undo normalization
x_features, y_features = coords[0::2]*w/2+w/2, coords[1::2]*h/2+h/2
for coord in zip(x_features,y_features):
cv2.circle(image, coord, (thickness+1)//2, color_BGR, thickness)
return image
|
d2e92863159f613bf175485cfb7a9f624f8962ff | ClauMaj/pyHomework | /printSquare1and2.py | 117 | 3.859375 | 4 | # Ex. 9
i = 1
while (i <=5):
print("*****")
i +=1
# Ex. 10
i = int(input("How big is the square? "))
|
6c37d6800d0df00d5c38e1c319a462f810628a74 | ClauMaj/pyHomework | /coins.py | 172 | 4.0625 | 4 |
n = 0
print(f"You have {n} coins")
answer = "yes"
while answer == "yes":
n += 1
print(f"You have {n} coins")
answer = input("Do you want another (yes/no)? ")
|
84341edeb49283cc802b891920ba2fa76a8a264c | ClauMaj/pyHomework | /celToFahr.py | 218 | 3.9375 | 4 |
# week 1 Tuesday Homework
# Ex. 6
temp = float(input('Temperature in C? '))
cel = temp * 9 / 5 + 32
if cel == int(cel):
print(f'Temperature in F is: {cel}')
else:
print(f'Temperature in F is: {int(cel)}')
|
ddfc5bca9db935f7efff39b4597726dd9de208ec | ClauMaj/pyHomework | /Fibonacci.py | 591 | 4.03125 | 4 |
# 1. Fibonacci recursive
# def recur_fibo(n):
# if n <= 0:
# print("Please enter a number > 0")
# exit()
# elif n == 1:
# return 0
# elif n == 2:
# return 1
# else:
# return(recur_fibo(n-1) + recur_fibo(n-2))
# print(recur_fibo(10))
# 2. Fibonacci if
def fibonacci(n):
a = 0
b = 1
if n <= 0:
print("Please enter a number > 0")
exit()
elif n == 1:
return b
else:
for i in range(2,n):
c = a + b
a = b
b = c
return c
print(fibonacci(10))
|
3898fed3bff0899f6e6354470a5dd6ff967546db | bradleybossard/python-computer-vision | /pcv_1_3_invert_image.py | 290 | 3.765625 | 4 | """
Example for inverting an image. PCV 1.3, Graylevel transforms.
"""
from PIL import Image
from numpy import *
# Read image into numpy array
im = array(Image.open('empire.jpg').convert('L'))
im2 = 255 - im #invert image
pil_im = Image.fromarray(im2)
pil_im.save('empire_inverted.jpg')
|
a5ed73ac78f673fa965b551bef169860cd38a658 | timclaussen/Python-Examples | /OOPexample.py | 441 | 4.25 | 4 | #OOP Example
#From the simple critter example, but with dogs
class Dog(object):
"""A virtual Dog"""
total = 0
def _init_(self, name):
print("A new floof approaches!")
Dog.total += 1 #each new instance adds 1 to the class att' total
self.name = name #sets the constructor input to an attribute
def speak(self):
print("Woof, I am dog.")
def
#main
pup = Dog()
pup.speak()
|
bcf588a8a3e17d73390dd4e637c91780df1f0ff8 | simonet85/Python-training | /LCM/pgcd.py | 131 | 3.5 | 4 | # Algorithme d'Euclide pour le pgcd
def pgcd(a,b) :
while a%b != 0 :
a, b = b, a%b
return b
print(pgcd( 120, 36)) |
6a41079dfde72348c4db89b1ab7270d3b5509b30 | gkotian/easy-countdown | /easy-countdown/easy-countdown.py | 5,205 | 4 | 4 | #!/usr/bin/env python
################################################################################
#
# Description:
# ------------
# A simple yet flexible countdown timer for the Linux command line.
#
# Usage:
# ------
# $> easy-countdown <time/time-duration>
#
################################################################################
import argparse
import re
import subprocess
import sys
from datetime import datetime, time, timedelta
from time import sleep
################################################################################
#
# Gets the number of seconds from the current time until the given time. If
# the given time has already passed on the current day, then it is assumed
# that the user wants to countdown until the given time on the next day.
#
# Params:
# time_str = the given time value in HH:MM or HH:MM:SS format
#
# Returns:
# the number of seconds from the current time until the given time, or -1
# if an error occurred
#
################################################################################
def seconds_until(time_str):
try:
target_time = time(*(map(int, time_str.split(':'))))
except ValueError:
return -1
target_datetime = datetime.combine(datetime.today(), target_time)
seconds_delta = int((target_datetime - datetime.now()).total_seconds())
if seconds_delta < 0:
# We're already past the target time today, so we'll countdown until the
# given time the next day
seconds_delta = 86400 + seconds_delta
return seconds_delta
################################################################################
#
# Gets the number of seconds from the current time after which the given time
# duration would have elapsed.
#
# Params:
# time_str = the given time duration in h, m, s notation (e.g. 10s, 15m,
# 5m30s, 3h, 4h45m, 6h25m45s and so on)
#
# Returns:
# the number of seconds from the current time after which the given time
# duration would have elapsed, or -1 if an error occurred
#
################################################################################
def seconds_in(time_str):
if time_str == '0s':
return 0
# This regular expression should be improved. It currently allows erroneous
# input like '24hf' to get through.
regex = re.compile(
r'((?P<hours>\d+?)h)?((?P<minutes>\d+?)m)?((?P<seconds>\d+?)s)?')
time_parts = regex.match(time_str)
if not time_parts:
return -1
time_parts = time_parts.groupdict()
# If time_str was initially `2h3m50s`, then time_parts will now be
# `{'hours': '2', 'seconds': '50', 'minutes': '3'}`
time_params = {}
for (name, param) in time_parts.iteritems():
if param:
time_params[name] = int(param)
seconds = int(timedelta(**time_params).total_seconds())
# An erroneous input results in seconds being zero here. This should
# probably be caught earlier (i.e. when parsing the regular expression
# itself). Doing so will also allow us to remove the `if time_str == '0s'`
# check at the beginning of the function.
if seconds == 0:
return -1
return seconds
################################################################################
#
# Gets the number of seconds from the current time until which the countdown
# should run.
#
# Params:
# time_str = the given time value or time duration
# time value is expressed in HH:MM or HH:MM:SS format, whereas time
# duration is expressed in h, m, s notation
#
# Returns:
# the number of seconds from the current time until which the countdown
# should run
#
################################################################################
def calculate_seconds(time_str):
# Plain integers are assumed to be seconds
try:
seconds = int(time_str)
return seconds
except ValueError:
pass
if ':' in time_str:
seconds = seconds_until(time_str)
else:
seconds = seconds_in(time_str)
if seconds > 0:
print "Counting down '{}' seconds until '{}'".format(seconds,
(datetime.now() + timedelta(seconds=seconds))
.strftime('%Y-%m-%d %H:%M:%S'))
return seconds
# Set up command line arguments
parser = argparse.ArgumentParser(usage='%(prog)s [ARGUMENTS]',
description='Start a countdown for the given time duration')
parser.add_argument('time',
help='time until or time-duration for which the countdown should run')
parser.add_argument('message', nargs='?', default='Countdown complete!',
help='message to be displayed when the countdown completes')
args = vars(parser.parse_args())
seconds = calculate_seconds(args['time'])
if seconds < 0:
print "Unable to parse time duration '{}'".format(args['time'])
sys.exit(1)
while seconds > 0:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
sys.stdout.write("%02d:%02d:%02d" % (h, m, s))
sys.stdout.flush()
sleep(1.0)
sys.stdout.write("\r\033[K")
seconds -= 1
print "Countdown complete!"
subprocess.call(['xmessage', args['message']])
|
3919094c9ab53a108ed200729d9baabf00282ff0 | rengokantai/orintmpyprogpj1 | /longestpalin.py | 183 | 3.859375 | 4 | __author__ = 'Hernan Y.Ke'
import scrabble
longest=""
for word in scrabble.wordlist:
if list(word) == list(word[::-1]) and len(word) > len(longest):
longest=word
print(longest) |
58157c920d37f6ea9a66702030b3dc56b116361d | aswin191993/python_think | /dictionary/homophone.py | 467 | 3.578125 | 4 | def homophone(filename):
wlist=open(filename).read().split()
d=pronounce.read_dictionary('c06d')
for word in wlist:
if len(word) == 5 and word in d:
mword=modify(word,0)
if mword in d and d[mword] == d[word]:
mword=modify(word,1)
if mword in d and d[mword] == d[word]:
print word
def modify(word,i):
new=''
for index in range(len(word)):
if index == i:
continue
new+=word[index]
return new
import pronounce
homophone('words.txt')
|
ba138b4f7e98e96adf4ab635d3999b07c6ad29f1 | aswin191993/python_think | /string/ex4.py | 113 | 3.625 | 4 | name='banana'
def fnctn(name):
count=0
for n in name:
if 'a' in n:
count += 1
print count
fnctn(name)
|
0ec961356ccb99cebe36f3cc97e06a1c484409a6 | aswin191993/python_think | /cls_methods/3.py | 215 | 4.03125 | 4 | class Point(object):
""" representation of a point in 2D space """
def __init__(self,x=0,y=0):
self.x=x
self.y=y
def __str__(self):
return '(%d,%d)'%(self.x,self.y)
p=Point()
print p
q=Point(2)
print q
|
967cf0fe121fb19daf257eab1587929b3230d74a | aswin191993/python_think | /list/remove_dup.py | 207 | 3.84375 | 4 | a='aswin','ravi','appu','arun','aswin','ravi'
def remove_dup(a):
s=list(a)
l=len(s)-1
r=range(l)
p=[]
for i in r:
if s[i] not in p:
p.append(s[i])
print "orginal list:",a
print p
remove_dup(a)
|
b98b7599e5ceaa89394233ee6b18cefa1af37770 | owenyi/Algorithm | /mergeSort.py | 1,812 | 3.859375 | 4 | def mergeSort(a, l, r):
if r > l:
m = int((r+l)/2)
mergeSort(a, l, m)
mergeSort(a, m + 1, r)
merge(a, l, m, r)
def merge(a, l, m, r):
i, j, k = l, m + 1, l
while i <= m and j <= r:
if a[i] < a[j]:
b[k] = a[i]
k += 1
i += 1
else:
b[k] = a[j]
k += 1
j += 1
if i > m:
for p in range(j, r + 1):
b[k] = a[p]
k += 1
else:
for p in range(i, m + 1):
b[k] = a[p]
k += 1
for p in range(l, r + 1):
a[p] = b[p]
import random, time
print('100000(random)')
a = [random.randint(1, 100000) for _ in range(100000)]
a.insert(0, -1)
b = a.copy()
start = time.time()
mergeSort(a, 1, len(a)-1)
print('실행시간 :', time.time() - start)
print('200000(random)')
a = [random.randint(1, 200000) for _ in range(200000)]
a.insert(0, -1)
b = a.copy()
start = time.time()
mergeSort(a, 1, len(a)-1)
print('실행시간 :', time.time() - start)
print('1000000(random)')
a = [random.randint(1, 1000000) for _ in range(1000000)]
a.insert(0, -1)
b = a.copy()
start = time.time()
mergeSort(a, 1, len(a)-1)
print('실행시간 :', time.time() - start)
#민감도
print('100000(in order)')
a = [i for i in range(100000)]
a.insert(0, -1)
b = a.copy()
start = time.time()
mergeSort(a, 1, len(a)-1)
print('실행시간 :', time.time() - start)
print('100000(random)')
a = [random.randint(1, 100000) for _ in range(100000)]
a.insert(0, -1)
b = a.copy()
start = time.time()
mergeSort(a, 1, len(a)-1)
print('실행시간 :', time.time() - start)
print('100000(reverse)')
a = [i for i in range(100000, 0, -1)]
a.insert(0, -1)
b = a.copy()
start = time.time()
mergeSort(a, 1, len(a)-1)
print('실행시간 :', time.time() - start) |
2ba56012cdfecea7cd1776477948628ec75b87e2 | owenyi/Algorithm | /radixSort.py | 1,159 | 3.828125 | 4 | def radixSort(a, n, m, Q):
for k in range(1, m + 1):
for i in range(1, n + 1):
kd = digit(a[i], k)
enqueue(Q[kd], a[i])
p = 0
for i in range(10):
while Q[i] != []:
p += 1
a[p] = dequeue(Q[i])
def digit(data, k):
l = 1
for i in range(k - 1):
l *= 10
return int(data % (l * 10) / l)
def enqueue(queue, data):
queue.append(data)
def dequeue(queue):
if len(queue) == 0:
print('큐가 공백임')
return -1
else:
data = queue.pop(0)
return data
# a = [-1, 832, 690, 152, 5, 950, 965, 369, 241, 577, 875]
# n = 10
# m = 3
# radixSort(a, n, m)
# print(a)
import random, time
Q = [[] for _ in range(10)]
print('100000(random)')
M = 5
N = 100000
a = [random.randint(1, 99999) for _ in range(100000)]
a.insert(0, -1)
start = time.time()
radixSort(a, N, M, Q)
print('실행시간 :', time.time() - start)
print('200000(random)')
M = 5
N = 200000
a = [random.randint(1, 99999) for _ in range(200000)]
a.insert(0, -1)
start = time.time()
radixSort(a, N, M, Q)
print('실행시간 :', time.time() - start) |
c36c2c55eb41eb988a49b183578ba550a9dba4ac | jsacoba/pai789_finalproject | /script5_analyze/5.1 analysis_data/analysis_data.py | 1,159 | 3.734375 | 4 | # 1. Import 'pandas' module.
import pandas as pd
# 2. Read input file.
analyze = pd.read_csv('combined_clean.csv')
# ***A. Extracting countries with the same Risk Exposure Index.***
# 3. Create column 'dup' in 'analyze' to select duplicates in 'Exposure' column.
analyze['dup'] = analyze.Exposure.duplicated(keep=False) & analyze.Exposure.notna()
# 4. Create new data frame for the selected countries with the same risk exposure.
same_exposure = analyze[analyze['dup'] == True]
# 5. Save results.
same_exposure.to_csv('same_exposure.csv')
# ***B. Extracting major advanced economies.***
# 6. Create new data frame for the major advanced economies.
advanced = analyze[analyze['Economic Development Status'] == 'Major Advanced Economy']
# 7. Save results.
advanced.to_csv('advanced.csv')
# ***C. Select top 10 riches and top 10 poorest countries based on GDP.***
# 8. Select top 10 and bottom 10 GDPs.
rich = analyze.nlargest(10, ['GDP'])
poor = analyze.nsmallest(10, ['GDP'])
# 9 Concatenate data for the selected rich and poor countries.
extremes = pd.concat([rich, poor], axis = 0)
# 10. Save results.
extremes.to_csv('extremes.csv')
|
165d874a6d06feecf910d718f91eb77ef9501884 | snyderks/advent-solutions | /Day6.py | 909 | 3.953125 | 4 | # Day 6: http://adventofcode.com/2016/day/6
# Problem: Given many lines of the same length, determine the message
# by finding the most frequent letter in each column.
from collections import Counter
f = open("inputs/Day6.txt", "r")
columns = []
for line in f:
if len(columns) is 0:
for char in line:
if char is not '\n':
columns.append([char])
else:
for index, char in enumerate(line):
if char is not '\n':
columns[index].append(char)
def mostCommonLetter(chars):
counts = Counter(chars)
return counts.most_common(1)[0][0]
mostCommon = map(mostCommonLetter, columns)
print("".join(mostCommon))
# Part 2:
# Get the least common letter now!
def leastCommonLetter(chars):
counts = Counter(chars)
return counts.most_common()[-1][0]
leastCommon = map(leastCommonLetter, columns)
print("".join(leastCommon)) |
b8f1b4efb1ccf1b41ebb087b207acae1663ac4d4 | snyderks/advent-solutions | /Day18.py | 1,506 | 3.5 | 4 | from collections import Counter
startRow = ".^.^..^......^^^^^...^^^...^...^....^^.^...^.^^^^....^...^^.^^^...^^^^.^^.^.^^..^.^^^..^^^^^^.^^^..^"
rowLength = len(startRow)
trap = "^"
safe = "."
def check(left, center, right):
if center is trap:
if left is trap and right is safe:
return trap
elif right is trap and left is safe:
return trap
else:
return safe
elif left is trap and center is safe and right is safe:
return trap
elif right is trap and center is safe and left is safe:
return trap
else:
return safe
def checkAny(row, column, rows):
if column is rowLength - 1:
return check(rows[row-1][column-1], rows[row-1][column], safe)
if column is 0:
return check(safe, rows[row-1][column], rows[row-1][column+1])
else:
return check(rows[row-1][column-1], rows[row-1][column], rows[row-1][column+1])
def getRow(row, allRows):
newRow = ""
for i in range(0, rowLength):
newRow += checkAny(row, i, allRows)
return newRow
def countSafe(rows):
safes = 0
for row in rows:
safes += Counter(row)[safe]
return safes
rows = [startRow]
for i in range(1, 40):
rows.append(getRow(i, rows))
for row in rows:
print(row)
print("Safe tiles: " + str(countSafe(rows)))
# Part 2
rows = [startRow]
safes = 0
for i in range(0, 400000):
safes += countSafe(rows)
rows = [getRow(1, rows)]
print("Safe tiles: " + str(safes))
|
479c6b3b45c6ef1abac43a25ae74616a9708b807 | snyderks/advent-solutions | /Day16.py | 711 | 3.828125 | 4 | import copy
def swap(char):
if char is "1":
return "0"
else:
return "1"
def dragon(data, length):
a = data
while len(a) < length:
b = a
b = b[::-1]
b = "".join(map(lambda c: swap(c), b))
a = a + "0" + b
return a
def check(data):
result = ""
for i in range(0, len(data)-1, 2):
if data[i] is data[i+1]:
result += "1"
else:
result += "0"
return result
def checksum(data):
result = check(data)
while len(result) % 2 is not 1:
result = check(result)
return result
data = "01111010110010011"
length = 35651584
data = dragon(data, length)[:length+1]
print(checksum(data))
|
5f705c652f90bf17c55d1958b5760213e99cc3a6 | nikkisquared/OpenMPT-Music-Maker | /config.py | 1,450 | 3.75 | 4 | #!/usr/bin/env python
from __future__ import print_function
"""Handles parsing and correction of config file for the program"""
import ConfigParser
def check_boolean(section, variable, value, default=False):
"""Check that a config variable is a proper boolean"""
if value in (0, 1, "True", "False"):
return bool(value)
else:
print("Boolean value (0/True or 1/False) was expected in "
"%s %s but %s was found." % (section, variable, value))
return default
def check_integer(section, variable, value):
"""Check that a config variable is a proper integer"""
if value.isdigit():
return int(value)
else:
print("Interger value was expected in %s %s but %s was found." % (
section, variable, value))
return 0
def init_config_file():
"""Opens a config file to parse through it and returns it"""
config = ConfigParser.ConfigParser()
if not config.read("config.ini"):
# make ini from scratch
pass
production = dict(config.items("Production"))
production["overwrite"] = check_boolean("Production", "overwrite",
production["overwrite"])
production["lines"] = check_integer("Production", "lines",
production["lines"])
dbConfig = dict(config.items("Database"))
dbConfig["overwrite"] = check_boolean("Database", "overwrite",
dbConfig["overwrite"])
return dbConfig, production |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.