blob_id stringlengths 40 40 | repo_name stringlengths 11 42 | path stringlengths 6 141 | length_bytes int64 327 5.33k | score float64 3.52 4.16 | int_score int64 4 4 | content stringlengths 327 5.33k |
|---|---|---|---|---|---|---|
b7b944e5d5dd1cd1b41952c55bc13c348f905024 | Oyekunle-Mark/Graphs | /projects/ancestor/ancestor.py | 1,360 | 3.671875 | 4 | from graph import Graph
from util import Stack
def earliest_ancestor(ancestors, starting_node):
# FIRST REPRESENT THE INPUT ANCESTORS AS A GRAPH
# create a graph instance
graph = Graph()
# loop through ancestors and add every number as a vertex
for parent, child in ancestors:
# add the parent as a vertex
graph.add_vertex(parent)
# add the child as a vertex as well
graph.add_vertex(child)
# # loop through ancestors and build the connections
for parent, child in ancestors:
# connect the parent to the child
# the connection is reversed because dft transverses downward
graph.add_edge(child, parent)
# if starting node has no child
if not graph.vertices[starting_node]:
# return -1
return -1
# create a stack to hold the vertices
s = Stack()
# add the starting_node to the stack
s.push(starting_node)
# set earliest_anc to -1
earliest_anc = -1
# loop while stack is not empty
while s.size() > 0:
# pop the stack
vertex = s.pop()
# set the earliest_anc to vertex
earliest_anc = vertex
# add all its connected vertices to the queue
# sort the vertices maintain order
for v in sorted(graph.vertices[vertex]):
s.push(v)
return earliest_anc
|
150d0efefb3c712edc14a5ff039ef2082c43152b | syurskyi/Python_Topics | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/111_Minimum_Depth_of_Binary_Tree.py | 1,281 | 4.03125 | 4 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
c_ Solution o..
# def minDepth(self, root):
# """
# :type root: TreeNode
# :rtype: int
# """
# # Recursion
# if root is None:
# return 0
# ld = self.minDepth(root.left)
# rd = self.minDepth(root.right)
# if ld != 0 and rd != 0:
# # handle 0 case!
# return 1 + min(ld, rd)
# return 1 + ld +rd
___ minDepth root
# BFS
__ root is N..:
r_ 0
queue = [root]
depth, rightMost = 1, root
w.. l.. queue) > 0:
node = queue.pop(0)
__ node.left is N.. a.. node.right is N..:
______
__ node.left is n.. N..:
queue.a.. node.left)
__ node.right is n.. N..:
queue.a.. node.right)
__ node __ rightMost:
# reach the current level end
depth += 1
__ node.right is n.. N..:
rightMost = node.right
____
rightMost = node.left
r_ depth
|
039dd3727cdd229548d94108ee220efa4a5b4838 | mertdemirlicakmak/project_euler | /problem_5.py | 1,144 | 4.0625 | 4 | """"2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest positive number that is evenly
divisible by all of the numbers from 1 to 20?"""
# This function returns the smallest number that is divisible to
# 1 to num
def find_smallest_divisible(num):
multi_list = []
result_dict = {}
result = 1
multi_list.extend(range(2, num + 1))
for num in multi_list:
prime_list = find_prime_factors(num)
for prime in prime_list:
if not (prime in result_dict):
result_dict[prime] = 0
if result_dict[prime] < prime_list.count(prime):
result_dict[prime] += 1
result *= prime
return result
# This functions returns the prime factors of num
def find_prime_factors(num):
temp = num
result = []
while temp > 1:
for number in range(2, temp + 1):
if temp % number == 0:
result.append(number)
temp //= number
break
return result
if __name__ == '__main__':
print(find_smallest_divisible(20))
|
0437daed01bce0f5a3046616917a2c29a1ed15d0 | zhouyuhangnju/freshLeetcode | /Combination Sum.py | 1,252 | 3.71875 | 4 | def combinationSum(candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
res = []
candidates = sorted(candidates)
def combinationRemain(remain, curr_res):
if remain == 0:
res.append(curr_res)
return
for c in candidates:
if c > remain:
break
if curr_res and c < curr_res[-1]:
continue
combinationRemain(remain - c, curr_res + [c])
combinationRemain(target, [])
return res
def combinationSum2(candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
res = []
candidates = sorted(candidates)
def combinationRemain(remain, curr_res, curr_idx):
if remain == 0:
res.append(curr_res)
return
if remain < 0 or curr_idx >= len(candidates):
return
combinationRemain(remain-candidates[curr_idx], curr_res+[candidates[curr_idx]], curr_idx)
combinationRemain(remain, curr_res, curr_idx + 1)
combinationRemain(target, [], 0)
return res
if __name__ == '__main__':
print combinationSum2([2, 3, 6, 7], 7) |
246130cc6d8d3b7c3fee372774aa2f93018f4e35 | alexleversen/AdventOfCode | /2020/08-2/main.py | 1,119 | 3.625 | 4 | import re
file = open('input.txt', 'r')
lines = list(file.readlines())
def testTermination(swapLine):
if(lines[swapLine][0:3] == 'acc'):
return False
instructionIndex = 0
instructionsVisited = []
acc = 0
while(True):
if(instructionIndex == len(lines)):
return acc
if(instructionIndex in instructionsVisited):
return False
instructionsVisited.append(instructionIndex)
match = re.match('(\w{3}) ([+-]\d+)', lines[instructionIndex])
instruction, value = match.group(1, 2)
if(instructionIndex == swapLine):
if(instruction == 'jmp'):
instruction = 'nop'
elif(instruction == 'nop'):
instruction = 'jmp'
if(instruction == 'acc'):
acc += int(value)
instructionIndex += 1
elif(instruction == 'jmp'):
instructionIndex += int(value)
else:
instructionIndex += 1
for i in range(len(lines)):
terminationValue = testTermination(i)
if(terminationValue != False):
print(terminationValue)
|
f9af5624fe12b3c6bd0c359e5162b7f9f48234e7 | Yarin78/yal | /python/yal/fenwick.py | 1,025 | 3.765625 | 4 | # Datastructure for storing and updating integer values in an array in log(n) time
# and answering queries "what is the sum of all value in the array between 0 and x?" in log(n) time
#
# Also called Binary Indexed Tree (BIT). See http://codeforces.com/blog/entry/619
class FenwickTree:
def __init__(self, exp):
'''Creates a FenwickTree with range 0..(2^exp)-1'''
self.exp = exp
self.t = [0] * 2 ** (exp+1)
def query_range(self, x, y):
'''Gets the sum of the values in the range [x, y)'''
return self.query(y) - self.query(x)
def query(self, x, i=-1):
'''Gets the sum of the values in the range [0, x).'''
if i < 0:
i = self.exp
return (x&1) * self.t[(1<<i)+x-1] + self.query(x//2, i-1) if x else 0
def insert(self, x, v, i=-1):
'''Adds the value v to the position x'''
if i < 0:
i = self.exp
self.t[(1<<i)+x] += v
return self.t[(1<<i)+x] + (self.insert(x//2, v, i-1) if i > 0 else 0)
|
25ce186e86fc56201f52b12615caa13f98044d99 | travisoneill/project-euler | /python/007.py | 327 | 3.953125 | 4 | from math import sqrt
def is_prime(n):
if n < 2: return False
for i in range(2, int(sqrt(n)) + 1):
if n % i == 0:
return False
return True
def nth_prime(n):
count = 2
i = 3
while count < n:
i+=2
if is_prime(i):
count += 1
print(i)
nth_prime(10001)
|
5bb4299f898d7a3957d4a0fd1ed4eb151ab44b47 | efeacer/EPFL_ML_Labs | /Lab04/template/least_squares.py | 482 | 3.5625 | 4 | # -*- coding: utf-8 -*-
"""Exercise 3.
Least Square
"""
import numpy as np
def compute_error_vector(y, tx, w):
return y - tx.dot(w)
def compute_mse(error_vector):
return np.mean(error_vector ** 2) / 2
def least_squares(y, tx):
coefficient_matrix = tx.T.dot(tx)
constant_vector = tx.T.dot(y)
w = np.linalg.solve(coefficient_matrix, constant_vector)
error_vector = compute_error_vector(y, tx, w)
loss = compute_mse(error_vector)
return w, loss |
0a186e9f92527a8509f7082f2f4065d3b366e957 | vinnyatanasov/naive-bayes-classifier | /nb.py | 3,403 | 3.6875 | 4 | """
Naive Bayes classifier
- gets reviews as input
- counts how many times words appear in pos/neg
- adds one to each (to not have 0 probabilities)
- computes likelihood and multiply by prior (of review being pos/neg) to get the posterior probability
- in a balanced dataset, prior is the same for both, so we ignore it here
- chooses highest probability to be prediction
"""
import math
def test(words, probs, priors, file_name):
label = 1 if "pos" in file_name else -1
count = 0
correct = 0
with open(file_name) as file:
for line in file:
# begin with prior (simply how likely it is to be pos/neg before evidence)
pos = priors[0]
neg = priors[1]
# compute likelihood
# sum logs, better than multiplying very small numbers
for w in line.strip().split():
# if word wasn't in train data, then we have to ignore it
# same effect if we add test words into corpus and gave small probability
if w in words:
pos += math.log(probs[w][0])
neg += math.log(probs[w][1])
# say it's positive if pos >= neg
pred = 1 if pos >= neg else -1
# increment counters
count += 1
if pred == label:
correct += 1
# return results
return 100*(correct/float(count))
def main():
# count number of occurances of each word in pos/neg reviews
# we'll use a dict containing a two item list [pos count, neg count]
words = {}
w_count = 0 # words
p_count = 0 # positive instances
n_count = 0 # negative instances
# count positive occurrences
with open("data/train.positive") as file:
for line in file:
for word in line.strip().split():
try:
words[word][0] += 1
except:
words[word] = [1, 0]
w_count += 1
p_count += 1
# count negative occurrences
with open("data/train.negative") as file:
for line in file:
for word in line.strip().split():
try:
words[word][1] += 1
except:
words[word] = [0, 1]
w_count += 1
n_count += 1
# calculate probabilities of each word
corpus = len(words)
probs = {}
for key, value in words.iteritems():
# smooth values (add one to each)
value[0]+=1
value[1]+=1
# prob = count / total count + number of words (for smoothing)
p_pos = value[0] / float(w_count + corpus)
p_neg = value[1] / float(w_count + corpus)
probs[key] = [p_pos, p_neg]
# compute priors based on frequency of reviews
priors = []
priors.append(math.log(p_count / float(p_count + n_count)))
priors.append(math.log(n_count / float(p_count + n_count)))
# test naive bayes
pos_result = test(words, probs, priors, "data/test.positive")
neg_result = test(words, probs, priors, "data/test.negative")
print "Accuracy(%)"
print "Positive:", pos_result
print "Negative:", neg_result
print "Combined:", (pos_result+neg_result)/float(2)
if __name__ == "__main__":
print "-- Naive Bayes classifier --\n"
main()
|
ceca83f8d1a6d0dbc027ad04a7632bb8853bc17f | harshablast/numpy_NN | /nn.py | 2,183 | 3.734375 | 4 | import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def d_sigmoid(x):
return x * (1 - x)
def relu(x):
return x * (x > 0)
def d_relu(x):
return 1 * (x > 0)
class neural_network:
def __init__(self, nodes):
self.input_dim = nodes[0]
self.HL01_dim = nodes[1]
self.HL02_dim = nodes[2]
self.output_dim = nodes[3]
self.W1 = 2 * (np.random.rand(self.input_dim, self.HL01_dim) -1)
self.W2 = 2 * (np.random.rand(self.HL01_dim, self.HL02_dim) -1)
self.W3 = 2 * (np.random.rand(self.HL02_dim, self.output_dim) -1)
self.B1 = 2 * (np.random.rand(1, self.HL01_dim))
self.B2 = 2 * (np.random.rand(1, self.HL02_dim))
self.B3 = 2 * (np.random.rand(1, self.output_dim))
def forward_pass(self, input):
self.HL01_out = sigmoid(np.add(np.matmul(input, self.W1), self.B1))
self.HL02_out = sigmoid(np.add(np.matmul(self.HL01_out, self.W2), self.B2))
self.output = sigmoid(np.add(np.matmul(self.HL02_out, self.W3), self.B3))
return self.output
def backward_pass(self, train_data_X, train_data_Y, iterations, learning_rate):
for j in range(iterations):
self.forward_pass(train_data_X)
error = np.sum(np.square(self.output - train_data_Y))
print(error)
output_error = self.output - train_data_Y
output_deltas = output_error * d_sigmoid(self.output)
self.W3 -= np.dot(self.HL02_out.T, output_deltas) * learning_rate
self.B3 -= np.sum(output_deltas, axis=0, keepdims=True) * learning_rate
HL02_error = np.dot(output_deltas, self.W3.T)
HL02_deltas = HL02_error * d_sigmoid(self.HL02_out)
self.W2 -= np.dot(self.HL01_out.T, HL02_deltas) * learning_rate
self.B2 -= np.sum(HL02_deltas, axis=0, keepdims=True) * learning_rate
HL01_error = np.dot(HL02_deltas, self.W2.T)
HL01_deltas = HL01_error * d_sigmoid(self.HL01_out)
self.W1 -= np.dot(train_data_X.T, HL01_deltas) * learning_rate
self.B1 -= np.sum(HL01_deltas, axis=0, keepdims=True) * learning_rate
|
235fee728f7853aa65b05a101deeb1dfeb5ebf8f | syurskyi/Python_Topics | /125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/DynamicProgramming/198_HouseRobber.py | 469 | 3.609375 | 4 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
class Solution(object):
# Dynamic Programming
def rob(self, nums):
if not nums:
return 0
pre_rob = 0
pre_not_rob = 0
for num in nums:
cur_rob = pre_not_rob + num
cur_not_rob = max(pre_rob, pre_not_rob)
pre_rob = cur_rob
pre_not_rob = cur_not_rob
return max(pre_rob, pre_not_rob)
"""
[]
[1,2]
[12, 1,1,12,1]
"""
|
0d0892bf443e39c3c5ef078f2cb846370b7852e9 | JakobLybarger/Graph-Pathfinding-Algorithms | /dijkstras.py | 1,297 | 4.15625 | 4 | import math
import heapq
def dijkstras(graph, start):
distances = {} # Dictionary to keep track of the shortest distance to each vertex in the graph
# The distance to each vertex is not known so we will just assume each vertex is infinitely far away
for vertex in graph:
distances[vertex] = math.inf
distances[start] = 0 # Distance from the first point to the first point is 0
vertices_to_explore = [(0, start)]
# Continue while heap is not empty
while vertices_to_explore:
distance, vertex = heapq.heappop(vertices_to_explore) # Pop the minimum distance vertex off of the heap
for neighbor, e_weight in graph[vertex]:
new_distance = distance + e_weight
# If the new distance is less than the current distance set the current distance as new distance
if new_distance < distances[neighbor]:
distances[neighbor] = new_distance
heapq.heappush(vertices_to_explore, (new_distance, neighbor))
return distances # The dictionary of minimum distances from start to each vertex
graph = {
'A': [('B', 10), ('C', 3)],
'C': [('D', 2)],
'D': [('E', 10)],
'E': [('A', 7)],
'B': [('C', 3), ('D', 2)]
}
print(dijkstras(graph, "A")) |
c3626ea1efb1c930337e261be165d048d842d15a | Razorro/Leetcode | /72. Edit Distance.py | 3,999 | 3.515625 | 4 | """
Given two words word1 and word2, find the minimum number of operations required to convert word1 to word2.
You have the following 3 operations permitted on a word:
Insert a character
Delete a character
Replace a character
Example 1:
Input: word1 = "horse", word2 = "ros"
Output: 3
Explanation:
horse -> rorse (replace 'h' with 'r')
rorse -> rose (remove 'r')
rose -> ros (remove 'e')
Example 2:
Input: word1 = "intention", word2 = "execution"
Output: 5
Explanation:
intention -> inention (remove 't')
inention -> enention (replace 'i' with 'e')
enention -> exention (replace 'n' with 'x')
exention -> exection (replace 'n' with 'c')
exection -> execution (insert 'u')
A good question! I thought it was similiar with the dynamic programming example in CLRS,
it turns out the skeleton may has a little similiarity, but the core idea can't be extracted
with the situation of this question.
It was called Levenshtein distance.
Mathematically, the Levenshtein distance between two strings {\displaystyle a,b} a,b (of length {\displaystyle |a|} |a| and {\displaystyle |b|} |b| respectively) is given by {\displaystyle \operatorname
{lev} _{a,b}(|a|,|b|)} \operatorname{lev}_{a,b}(|a|,|b|) where
| --- max(i, j) if min(i, j) = 0
lev(i, j) = | min --- lev(i-1, j) + 1
| --- lev(i, j-1) + 1
| --- lev(i-1, j-1) + 1
Computing the Levenshtein distance is based on the observation that if we reserve a matrix to hold the Levenshtein distances
between all prefixes of the first string and all prefixes of the second, then we can compute the values in the matrix in
a dynamic programming fashion, and thus find the distance between the two full strings as the last value computed.
This algorithm, an example of bottom-up dynamic programming, is discussed, with variants, in the 1974 article The
String-to-string correction problem by Robert A. Wagner and Michael J. Fischer.[4]
"""
class Solution:
def minDistance(self, word1: 'str', word2: 'str') -> 'int':
points = self.findBiggestCommon(word1, word2)
def findBiggestCommon(self, source, target):
path = [0] * len(source)
directions = []
for i in range(len(target)):
current = [0] * len(source)
d = []
for j in range(len(source)):
if target[i] == source[j]:
current[j] = path[j-1] + 1 if j-1 >= 0 else 1
d.append('=')
else:
left = current[j-1] if j-1 >= 0 else 0
if left > path[j]:
d.append('l')
else:
d.append('u')
current[j] = max(left, path[j])
path = current
directions.append(d)
x_y = []
row, col = len(target)-1, len(source)-1
while row >= 0 and col >=0:
if directions[row][col] == '=':
x_y.append((row, col))
row -= 1
col -= 1
elif directions[row][col] == 'u':
row -= 1
else:
col -= 1
return x_y
def standardAnswer(self, word1, word2):
m = len(word1) + 1
n = len(word2) + 1
det = [[0 for _ in range(n)] for _ in range(m)]
for i in range(m):
det[i][0] = i
for i in range(n):
det[0][i] = i
for i in range(1, m):
for j in range(1, n):
det[i][j] = min(det[i][j - 1] + 1, det[i - 1][j] + 1, det[i - 1][j - 1] +
0 if word1[i - 1] == word2[j - 1] else 1)
return det[m - 1][n - 1]
if __name__ == '__main__':
s = Solution()
distance = s.findBiggestCommon('horse', 'ros')
distance = sorted(distance, key=lambda e: e[1])
c = 0
trans = 0
for left, right in distance:
trans += abs(right - left) + left-c
c = left + 1
print(s.findBiggestCommon('horse', 'ros')) |
b3a6e632568dd13f128eda2cba96293e2bd0d3cd | sniperswang/dev | /leetcode/L265/test.py | 1,452 | 3.640625 | 4 | """
There are a row of n houses, each house can be painted with one of the k colors.
The cost of painting each house with a certain color is different. You have to paint all the houses such that no two adjacent houses have the same color.
The cost of painting each house with a certain color is represented by a n x k cost matrix.
For example, costs[0][0] is the cost of painting house 0 with color 0; costs[1][2] is the cost of painting house 1 with color 2, and so on... Find the minimum cost to paint all houses.
Note:
All costs are positive integers.
Follow up:
Could you solve it in O(nk) runtime?
"""
class Solution(object):
def minCostII(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
if len(costs) == 0:
return 0
m = len(costs)
n = len(costs[0])
for i in range (1, m):
preMin = {}
preMin[0] = min(costs[i-1][1:])
costs[i][0] = costs[i][0] + preMin[0]
if ( n > 1):
preMin[n-1] = min(costs[i-1][:n-1])
costs[i][n-1] = costs[i][n-1] + preMin[n-1]
for j in range (1, n-1):
preMin[j] = min( min(costs[i-1][:j]), min(costs[i-1][j+1:]) )
costs[i][j] = costs[i][j] + preMin[j]
return min(costs[len(costs)-1])
costa = [1,2,4]
costb = [3,1,0]
costc = [1,2,1]
costs = []
costs.append(costa)
costs.append(costb)
costs.append(costc)
s = Solution()
print s.minCostII(costs)
|
a1b41adcda2d3b3522744e954cf8ae2f901c6b01 | drunkwater/leetcode | /medium/python3/c0099_209_minimum-size-subarray-sum/00_leetcode_0099.py | 798 | 3.546875 | 4 | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#209. Minimum Size Subarray Sum
#Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous subarray of which the sum ≥ s. If there isn't one, return 0 instead.
#For example, given the array [2,3,1,2,4,3] and s = 7,
#the subarray [4,3] has the minimal length under the problem constraint.
#click to show more practice.
#Credits:
#Special thanks to @Freezen for adding this problem and creating all test cases.
#class Solution:
# def minSubArrayLen(self, s, nums):
# """
# :type s: int
# :type nums: List[int]
# :rtype: int
# """
# Time Is Money |
40a908c9b3cf99674e66b75f56809d485f0a81f9 | Gborgman05/algs | /py/populate_right_pointers.py | 1,019 | 3.859375 | 4 | """
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Optional[Node]') -> 'Optional[Node]':
saved = root
levels = []
l = [root]
n = []
while l:
n = []
for node in l:
if node:
if node.left:
n.append(node.left)
if node.right:
n.append(node.right)
levels.append(l)
l = n
for level in levels:
for i in range(len(level)):
if level[i] == None:
continue
if i < len(level) - 1:
level[i].next = level[i+1]
else:
level[i].next = None
return root
|
c2f622f51bbddc54b0199c4e0e2982bc2ebfa030 | qdm12/courses | /Fundamental Algorithms/Lesson-03/algorithms.py | 2,479 | 3.875 | 4 | from operator import itemgetter
from math import floor
def radix_sort_alpha(words):
l = len(words[0])
for w in words:
if len(w) != l:
raise Exception("All words should be of same length")
for i in range(l, 0, -1):
words = sorted(words, key=itemgetter(i - 1))
words_str = str([''.join(w) for w in words])
print "PASS "+str(l - i + 1)+": "+words_str
return words_str
def bucket_sort(A):
print "Initial input array A: "+str(A)
n = len(A)
for i in range(n):
assert(A[i] >= 0 and A[i] < 1)
B = [[] for _ in range(n)]
print "Initial output buckets array B: "+str(B)
for i in range(n):
place = int(floor(A[i] * n))
B[place].append(A[i])
print "Output buckets array B with elements in buckets: "+str(B)
for j in range(n):
B[j].sort()
print "Output buckets array B with elements sorted in buckets: "+str(B)
B_final = []
for bucket in B:
B_final += bucket
print "Final output array B: "+str(B_final)
return B_final
class MergeSort(object):
def merge(self, A, l, q, r):
n1 = q - l + 1
n2 = r - q
L = [A[l + i] for i in range(n1)]
R = [A[q + 1 + i] for i in range(n2)]
i = j = 0 # Initial index of first and second subarrays
k = l # Initial index of merged subarray
while i < n1 and j < n2:
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
# Copy the remaining elements of L[], if there are any
while i < n1:
A[k] = L[i]
i += 1
k += 1
# Copy the remaining elements of R[], if there are any
while j < n2:
A[k] = R[j]
j += 1
k += 1
def mergeSort(self, A, l, r):
if l < r:
q = int(floor((l+r)/2))
self.mergeSort(A, l, q)
self.mergeSort(A, q+1, r)
self.merge(A, l, q, r)
def run(self):
A = [54,26,93,17,77,31,44,55,20]
self.mergeSort(A, 0, len(A) - 1)
print A
if __name__ == "__main__":
radix_sort_alpha(["COW", "DOG", "SEA", "RUG", "ROW", "MOB", "BOX", "TAB", "BAR", "EAR", "TAR", "DIG", "BIG", "TEA", "NOW", "FOX"])
bucket_sort([.79,.13,.16,.64,.39,.20,.89,.53,.71,.43])
m = MergeSort()
m.run() |
baa6b0b8905dfc9e832125196f3503f271557273 | syurskyi/Python_Topics | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/Array/SlidingWindowMaximum.py | 2,656 | 4.03125 | 4 | """
Given an array nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position. Return the max sliding window.
Example:
Input: nums = [1,3,-1,-3,5,3,6,7], and k = 3
Output: [3,3,5,5,6,7]
Explanation:
Window position Max
--------------- -----
[1 3 -1] -3 5 3 6 7 3
1 [3 -1 -3] 5 3 6 7 3
1 3 [-1 -3 5] 3 6 7 5
1 3 -1 [-3 5 3] 6 7 5
1 3 -1 -3 [5 3 6] 7 6
1 3 -1 -3 5 [3 6 7] 7
Note:
You may assume k is always valid, 1 ≤ k ≤ input array's size for non-empty array.
Follow up:
Could you solve it in linear time?
这个我的思路是:
1. 先把前 k 个数取出来,然后排序一组,不排序一组。
2. 排序的一组作为查找使用。 不排序的一组作为删除增加会用。
3. 这里也可以使用堆代替排序,红黑树应该最好不过了。
4. 这里使用排序过的列表是为了能够使用二分法,从而达到 log n 级别的查找和后续添加。
但同时因为即使在 log n级别查找到要添加删除的位置,进行列表的添加和删除仍然是一个 O(n) 级别的事情...
所以使用堆或者红黑树是最好的,添加和删除都是 log n 级别的。
5. sorted list 主要是进行获取最大与删除冗余,这里使用二分法来删除冗余。
6. unsorted list 用于知道要删除和添加的都是哪一个。
beat 31% 176ms.
测试地址:
https://leetcode.com/problems/sliding-window-maximum/description/
"""
from collections import deque
import bisect
c.. Solution o..
___ find_bi nums, target
lo = 0
hi = l..(nums)
_____ lo < hi:
mid = (lo + hi) // 2
__ nums[mid] __ target:
r_ mid
__ nums[mid] < target:
lo = mid + 1
____
hi = mid
___ maxSlidingWindow nums, k
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
__ n.. nums:
r_ []
x = nums[:k]
y = s..(x)
x = deque(x)
maxes = m..(x)
result = [maxes]
___ i __ nums[k:]:
pop = x.popleft()
x.a.. i)
index = self.find_bi(y, pop)
y.pop(index)
bisect.insort_left(y, i)
result.a.. y[-1])
r_ result
|
8496596aefa39873f8321a61d361bf209e54dcbd | syurskyi/Python_Topics | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/108_Convert_Sorted_Array_to_Binary_Search_Tree.py | 977 | 3.859375 | 4 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
c_ Solution o..
# def sortedArrayToBST(self, nums):
# """
# :type nums: List[int]
# :rtype: TreeNode
# """
# # Recursion with slicing
# if not nums:
# return None
# mid = len(nums) / 2
# root = TreeNode(nums[mid])
# root.left = self.sortedArrayToBST(nums[:mid])
# root.right = self.sortedArrayToBST(nums[mid + 1:])
# return root
___ sortedArrayToBST nums
# Recursion with index
r_ getHelper(nums, 0, l.. nums) - 1)
___ getHelper nums, start, end
__ start > end:
r_ N..
mid = (start + end) / 2
node = TreeNode(nums[mid])
node.left = getHelper(nums, start, mid - 1)
node.right = getHelper(nums, mid + 1, end)
r_ node |
0be537def5f8cc9ba9218267bf774b28ee44d4c7 | SoumyaMalgonde/AlgoBook | /python/graph_algorithms/Dijkstra's_Shortest_Path_Implementation_using_Adjacency_List.py | 2,933 | 3.921875 | 4 | class Node_Distance :
def __init__(self, name, dist) :
self.name = name
self.dist = dist
class Graph :
def __init__(self, node_count) :
self.adjlist = {}
self.node_count = node_count
def Add_Into_Adjlist(self, src, node_dist) :
if src not in self.adjlist :
self.adjlist[src] = []
self.adjlist[src].append(node_dist)
def Dijkstras_Shortest_Path(self, source) :
# Initialize the distance of all the nodes from source to infinity
distance = [999999999999] * self.node_count
# Distance of source node to itself is 0
distance[source] = 0
# Create a dictionary of { node, distance_from_source }
dict_node_length = {source: 0}
while dict_node_length :
# Get the key for the smallest value in the dictionary
# i.e Get the node with the shortest distance from the source
source_node = min(dict_node_length, key = lambda k: dict_node_length[k])
del dict_node_length[source_node]
for node_dist in self.adjlist[source_node] :
adjnode = node_dist.name
length_to_adjnode = node_dist.dist
# Edge relaxation
if distance[adjnode] > distance[source_node] + length_to_adjnode :
distance[adjnode] = distance[source_node] + length_to_adjnode
dict_node_length[adjnode] = distance[adjnode]
for i in range(self.node_count) :
print("Source Node ("+str(source)+") -> Destination Node(" + str(i) + ") : " + str(distance[i]))
def main() :
g = Graph(6)
# Node 0: <1,5> <2,1> <3,4>
g.Add_Into_Adjlist(0, Node_Distance(1, 5))
g.Add_Into_Adjlist(0, Node_Distance(2, 1))
g.Add_Into_Adjlist(0, Node_Distance(3, 4))
# Node 1: <0,5> <2,3> <4,8>
g.Add_Into_Adjlist(1, Node_Distance(0, 5))
g.Add_Into_Adjlist(1, Node_Distance(2, 3))
g.Add_Into_Adjlist(1, Node_Distance(4, 8))
# Node 2: <0,1> <1,3> <3,2> <4,1>
g.Add_Into_Adjlist(2, Node_Distance(0, 1))
g.Add_Into_Adjlist(2, Node_Distance(1, 3))
g.Add_Into_Adjlist(2, Node_Distance(3, 2))
g.Add_Into_Adjlist(2, Node_Distance(4, 1))
# Node 3: <0,4> <2,2> <4,2> <5,1>
g.Add_Into_Adjlist(3, Node_Distance(0, 4))
g.Add_Into_Adjlist(3, Node_Distance(2, 2))
g.Add_Into_Adjlist(3, Node_Distance(4, 2))
g.Add_Into_Adjlist(3, Node_Distance(5, 1))
# Node 4: <1,8> <2,1> <3,2> <5,3>
g.Add_Into_Adjlist(4, Node_Distance(1, 8))
g.Add_Into_Adjlist(4, Node_Distance(2, 1))
g.Add_Into_Adjlist(4, Node_Distance(3, 2))
g.Add_Into_Adjlist(4, Node_Distance(5, 3))
# Node 5: <3,1> <4,3>
g.Add_Into_Adjlist(5, Node_Distance(3, 1))
g.Add_Into_Adjlist(5, Node_Distance(4, 3))
g.Dijkstras_Shortest_Path(0)
print("\n")
g.Dijkstras_Shortest_Path(5)
if __name__ == "__main__" :
main() |
8130b1edf4df29a9ab76784289a22d5fb90863e7 | ridhishguhan/faceattractivenesslearner | /Classify.py | 1,158 | 3.6875 | 4 | import numpy as np
import Utils
class Classifier:
training = None
train_arr = None
classes = None
def __init__(self, training, train_arr, CLASSES = 3):
self.training = training
self.train_arr = train_arr
self.classes = CLASSES
#KNN Classification method
def OneNNClassify(self, test_set, K):
# KNN Method
# for each test sample t
# for each training sample tr
# compute norm |t - tr|
# choose top norm
# class which it belongs to is classification
[tr,tc] = test_set.shape
[trr,trc] = self.train_arr.shape
result = np.array(np.zeros([tc]))
i = 0
#print "KNN : with K = ",K
while i < tc:
x = test_set[:,i]
xmat = np.tile(x,(1,trc))
xmat = xmat - self.train_arr
norms = Utils.ComputeNorm(xmat)
closest_train = np.argmin(norms)
which_train = self.training[closest_train]
attr = which_train.attractiveness
result[i] = attr
#print "Class : ",result[i]
i += 1
return result |
fa0a2e8e0ec8251c6d735b02dfa1d7a94e09c6b2 | paul0920/leetcode | /question_leetcode/1488_2.py | 1,538 | 3.984375 | 4 | import collections
import heapq
rains = [1, 2, 0, 0, 2, 1]
# 0 1 2 3 4 5
rains = [10, 20, 20, 0, 20, 10]
# min heap to track the days when flooding would happen (if lake not dried)
nearest = []
# dict to store all rainy days
# use case: to push the subsequent rainy days into the heap for wet lakes
locs = collections.defaultdict(collections.deque)
# result - assume all days are rainy
res = [-1] * len(rains)
# pre-processing - {K: lake, V: list of rainy days}
for i, lake in enumerate(rains):
locs[lake].append(i)
for i, lake in enumerate(rains):
print "nearest wet day:", nearest
# check whether the day, i, is a flooded day
# the nearest lake got flooded (termination case)
if nearest and nearest[0] == i:
print []
exit()
# lake got wet
if lake != 0:
# pop the wet day. time complexity: O(1)
locs[lake].popleft()
# prioritize the next rainy day of this lake
if locs[lake]:
nxt = locs[lake][0]
heapq.heappush(nearest, nxt)
print "nearest wet day:", nearest
# a dry day
else:
# no wet lake, append an arbitrary value
if not nearest:
res[i] = 1
else:
# dry the lake that has the highest priority
# since that lake will be flooded in nearest future otherwise (greedy property)
next_wet_day = heapq.heappop(nearest)
wet_lake = rains[next_wet_day]
res[i] = wet_lake
print ""
print res
|
ef0440b8ce5c5303d75b1d297e323a1d8b92d619 | AndreiBoris/sample-problems | /python/0200-numbers-of-islands/number-of-islands.py | 5,325 | 3.84375 | 4 | from typing import List
LAND = '1'
WATER = '0'
# TODO: Review a superior solutions
def overlaps(min1, max1, min2, max2):
overlap = max(0, min(max1, max2) - max(min1, min2))
if overlap > 0:
return True
if min1 == min2 or min1 == max2 or max1 == min2 or max1 == max2:
return True
if (min1 > min2 and max1 < max2) or (min2 > min1 and max2 < max1):
return True
return False
print(overlaps(0, 2, 1, 1))
# Definition for a Bucket.
class Bucket:
def __init__(self, identifiers: List[int]):
self.destination = None
self.identifiers = set(identifiers)
def hasDestination(self) -> bool:
return self.destination != None
def getDestination(self):
if not self.hasDestination():
return self
return self.destination.getDestination()
def combine(self, bucket):
otherDestination = bucket.getDestination()
thisDestination = self.getDestination()
uniqueIdentifiers = otherDestination.identifiers | thisDestination.identifiers
newBucket = Bucket(uniqueIdentifiers)
otherDestination.destination = newBucket
thisDestination.destination = newBucket
return newBucket
def contains(self, identifier: int) -> bool:
return identifier in self.getDestination().identifiers
class Solution:
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands.
An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.
You may assume all four edges of the grid are all surrounded by water.
'''
def numIslands(self, grid: List[List[str]]) -> int:
if len(grid) < 1:
return 0
nextRowIsland = 1
rowIslands = {}
currentRowIslandStart = None
'''
Here we are generating row islands that we will then be pairing with adjacent row islands to form
groups that we will then combine into the true islands that are needed to get the correct answer
'''
for rowIndex, row in enumerate(grid):
lastSpot = WATER
lengthOfRow = len(row)
rowIslands[rowIndex] = []
for spotIndex, spot in enumerate(row):
if lastSpot == WATER and spot == LAND:
currentRowIslandStart = spotIndex
if spotIndex + 1 >= lengthOfRow and spot == LAND:
rowIslands[rowIndex].append((nextRowIsland, currentRowIslandStart, spotIndex))
nextRowIsland += 1
currentRowIslandStart = None
elif spot == WATER and currentRowIslandStart != None:
rowIslands[rowIndex].append((nextRowIsland, currentRowIslandStart, spotIndex - 1))
nextRowIsland += 1
if spot == WATER:
currentRowIslandStart = None
lastSpot = spot
nextGroup = 1
maxRowIndex = len(grid)
rowIslandsToGroups = {}
for rowNumber in [rowNumber for rowNumber in range(maxRowIndex)]:
for rowIslandNumber, startIndex, endIndex in rowIslands[rowNumber]:
rowIslandsToGroups[rowIslandNumber] = []
if rowNumber == 0:
rowIslandsToGroups[rowIslandNumber].append(nextGroup)
nextGroup += 1
continue
for prevRowIslandNumber, prevStartIndex, prevEndIndex in rowIslands[rowNumber - 1]:
if overlaps(prevStartIndex, prevEndIndex, startIndex, endIndex):
for groupNumber in rowIslandsToGroups[prevRowIslandNumber]:
rowIslandsToGroups[rowIslandNumber].append(groupNumber)
if len(rowIslandsToGroups[rowIslandNumber]) == 0:
rowIslandsToGroups[rowIslandNumber].append(nextGroup)
nextGroup += 1
groupBuckets = {}
allBuckets = []
for rowIslandNumber in range(1, nextRowIsland):
relatedGroups = rowIslandsToGroups[rowIslandNumber]
for group in relatedGroups:
if (groupBuckets.get(group, None)) == None:
newGroupBucket = Bucket([group])
groupBuckets[group] = newGroupBucket
allBuckets.append(newGroupBucket)
relatedBuckets = [groupBuckets[group] for group in relatedGroups]
firstBucket = relatedBuckets[0]
for group in relatedGroups:
if not firstBucket.contains(group):
newCombinedBucket = firstBucket.combine(groupBuckets[group])
allBuckets.append(newCombinedBucket)
return len([resultBucket for resultBucket in allBuckets if not resultBucket.hasDestination()])
solver = Solution()
# 1
# inputGrid = [
# '11110',
# '11010',
# '11000',
# '00000',
# ]
# 3
# inputGrid = [
# '11000',
# '11000',
# '00100',
# '00011',
# ]
# 1
# inputGrid = [
# '11011',
# '10001',
# '10001',
# '11111',
# ]
# 5
# inputGrid = [
# '101',
# '010',
# '101',
# ]
# 1
inputGrid = [
'111',
'010',
'010',
]
print(solver.numIslands(inputGrid)) |
227925521077e04140edcb13d50808695efd39a5 | erikseulean/machine_learning | /python/linear_regression/multivariable.py | 1,042 | 3.671875 | 4 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
iterations = 35
alpha = 0.1
def read_data():
data = np.loadtxt('data/housing_prices.in', delimiter=',')
X = data[:, [0,1]]
y = data[:, 2]
y.shape = (y.shape[0], 1)
return X, y
def normalize(X):
return (X - X.mean(0))/X.std(0)
def add_xzero(X):
return np.hstack((np.ones((X.shape[0],1)), X))
def gradient_descent(X, y):
theta = np.zeros((X.shape[1],1))
m = X.shape[0]
cost = []
for _ in range(iterations):
X_transpose = np.transpose(X)
cost_deriv = (alpha/m) * np.dot(X_transpose, np.dot(X, theta) - y)
theta = theta - cost_deriv
cost_func = np.sum(np.square(np.dot(X, theta) - y))/(2 * m)
cost.append(cost_func)
return theta, cost
def plot_cost_function(cost):
plt.plot(cost)
plt.xlabel("Iterations")
plt.ylabel("Cost function")
plt.show()
X, y = read_data()
X = add_xzero(normalize(X))
theta, cost = gradient_descent(X, y)
plot_cost_function(cost)
|
c7b567bde9e143c404c3670793576644a26f6142 | AhmadQasim/Battleships-AI | /gym-battleship/gym_battleship/envs/battleship_env.py | 4,760 | 3.53125 | 4 | import gym
import numpy as np
from abc import ABC
from gym import spaces
from typing import Tuple
from copy import deepcopy
from collections import namedtuple
Ship = namedtuple('Ship', ['min_x', 'max_x', 'min_y', 'max_y'])
Action = namedtuple('Action', ['x', 'y'])
# Extension: Add info for when the ship is sunk
class BattleshipEnv(gym.Env, ABC):
def __init__(self, board_size: Tuple = None, ship_sizes: dict = None, episode_steps: int = 100):
self.ship_sizes = ship_sizes or {5: 1, 4: 1, 3: 2, 2: 1}
self.board_size = board_size or (10, 10)
self.board = None
self.board_generated = None
self.observation = None
self.done = None
self.step_count = None
self.episode_steps = episode_steps
self.action_space = spaces.Discrete(self.board_size[0] * self.board_size[1])
# MultiBinary is a binary space array
self.observation_space = spaces.MultiBinary([2, self.board_size[0], self.board_size[1]])
# dict to save all the ship objects
self.ship_dict = {}
def step(self, raw_action: int) -> Tuple[np.ndarray, int, bool, dict]:
assert (raw_action < self.board_size[0]*self.board_size[1]),\
"Invalid action (Superior than size_board[0]*size_board[1])"
action = Action(x=raw_action // self.board_size[0], y=raw_action % self.board_size[1])
self.step_count += 1
if self.step_count >= self.episode_steps:
self.done = True
# it looks if there is a ship on the current cell
# if there is a ship then the cell is 1 and 0 otherwise
if self.board[action.x, action.y] != 0:
# if the cell that we just hit is the last one from the respective ship
# then add this info to the observation
if self.board[self.board == self.board[action.x, action.y]].shape[0] == 1:
ship = self.ship_dict[self.board[action.x, action.y]]
self.observation[1, ship.min_x:ship.max_x, ship.min_y:ship.max_y] = 1
self.board[action.x, action.y] = 0
self.observation[0, action.x, action.y] = 1
# if the whole board is already filled, no ships
if not self.board.any():
self.done = True
return self.observation, 100, self.done, {}
return self.observation, 1, self.done, {}
# we end up here if we hit a cell that we had hit before already
elif self.observation[0, action.x, action.y] == 1 or self.observation[1, action.x, action.y] == 1:
return self.observation, -1, self.done, {}
# we end up here if we hit a cell that has not been hit before and doesn't contain a ship
else:
self.observation[1, action.x, action.y] = 1
return self.observation, 0, self.done, {}
def reset(self):
self.set_board()
# maintain an original copy of the board generated in the start
self.board_generated = deepcopy(self.board)
self.observation = np.zeros((2, *self.board_size), dtype=np.float32)
self.step_count = 0
return self.observation
def set_board(self):
self.board = np.zeros(self.board_size, dtype=np.float32)
k = 1
for i, (ship_size, ship_count) in enumerate(self.ship_sizes.items()):
for j in range(ship_count):
self.place_ship(ship_size, k)
k += 1
def place_ship(self, ship_size, ship_index):
can_place_ship = False
while not can_place_ship:
ship = self.get_ship(ship_size, self.board_size)
can_place_ship = self.is_place_empty(ship)
# set the ship cells to one
self.board[ship.min_x:ship.max_x, ship.min_y:ship.max_y] = ship_index
self.ship_dict.update({ship_index: ship})
@staticmethod
def get_ship(ship_size, board_size) -> Ship:
if np.random.choice(('Horizontal', 'Vertical')) == 'Horizontal':
# find the ship coordinates randomly
min_x = np.random.randint(0, board_size[0] - 1 - ship_size)
min_y = np.random.randint(0, board_size[1] - 1)
return Ship(min_x=min_x, max_x=min_x + ship_size, min_y=min_y, max_y=min_y + 1)
else:
min_x = np.random.randint(0, board_size[0] - 1)
min_y = np.random.randint(0, board_size[1] - 1 - ship_size)
return Ship(min_x=min_x, max_x=min_x + 1, min_y=min_y, max_y=min_y + ship_size)
def is_place_empty(self, ship):
# make sure that there are no ships by simply summing the cell values
return np.count_nonzero(self.board[ship.min_x:ship.max_x, ship.min_y:ship.max_y]) == 0
def get_board(self):
return self.board
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.