repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/neural_network/activation_functions/exponential_linear_unit.py | neural_network/activation_functions/exponential_linear_unit.py | """
Implements the Exponential Linear Unit or ELU function.
The function takes a vector of K real numbers and a real number alpha as
input and then applies the ELU function to each element of the vector.
Script inspired from its corresponding Wikipedia article
https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
"""
import numpy as np
def exponential_linear_unit(vector: np.ndarray, alpha: float) -> np.ndarray:
"""
Implements the ELU activation function.
Parameters:
vector: the array containing input of elu activation
alpha: hyper-parameter
return:
elu (np.array): The input numpy array after applying elu.
Mathematically, f(x) = x, x>0 else (alpha * (e^x -1)), x<=0, alpha >=0
Examples:
>>> exponential_linear_unit(vector=np.array([2.3,0.6,-2,-3.8]), alpha=0.3)
array([ 2.3 , 0.6 , -0.25939942, -0.29328877])
>>> exponential_linear_unit(vector=np.array([-9.2,-0.3,0.45,-4.56]), alpha=0.067)
array([-0.06699323, -0.01736518, 0.45 , -0.06629904])
"""
return np.where(vector > 0, vector, (alpha * (np.exp(vector) - 1)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/neural_network/activation_functions/__init__.py | neural_network/activation_functions/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/neural_network/activation_functions/swish.py | neural_network/activation_functions/swish.py | """
This script demonstrates the implementation of the Sigmoid Linear Unit (SiLU)
or swish function.
* https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
* https://en.wikipedia.org/wiki/Swish_function
The function takes a vector x of K real numbers as input and returns x * sigmoid(x).
Swish is a smooth, non-monotonic function defined as f(x) = x * sigmoid(x).
Extensive experiments shows that Swish consistently matches or outperforms ReLU
on deep networks applied to a variety of challenging domains such as
image classification and machine translation.
This script is inspired by a corresponding research paper.
* https://arxiv.org/abs/1710.05941
* https://blog.paperspace.com/swish-activation-function/
"""
import numpy as np
def sigmoid(vector: np.ndarray) -> np.ndarray:
"""
Mathematical function sigmoid takes a vector x of K real numbers as input and
returns 1/ (1 + e^-x).
https://en.wikipedia.org/wiki/Sigmoid_function
>>> sigmoid(np.array([-1.0, 1.0, 2.0]))
array([0.26894142, 0.73105858, 0.88079708])
"""
return 1 / (1 + np.exp(-vector))
def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray:
"""
Implements the Sigmoid Linear Unit (SiLU) or swish function
Parameters:
vector (np.ndarray): A numpy array consisting of real values
Returns:
swish_vec (np.ndarray): The input numpy array, after applying swish
Examples:
>>> sigmoid_linear_unit(np.array([-1.0, 1.0, 2.0]))
array([-0.26894142, 0.73105858, 1.76159416])
>>> sigmoid_linear_unit(np.array([-2]))
array([-0.23840584])
"""
return vector * sigmoid(vector)
def swish(vector: np.ndarray, trainable_parameter: int) -> np.ndarray:
"""
Parameters:
vector (np.ndarray): A numpy array consisting of real values
trainable_parameter: Use to implement various Swish Activation Functions
Returns:
swish_vec (np.ndarray): The input numpy array, after applying swish
Examples:
>>> swish(np.array([-1.0, 1.0, 2.0]), 2)
array([-0.11920292, 0.88079708, 1.96402758])
>>> swish(np.array([-2]), 1)
array([-0.23840584])
"""
return vector * sigmoid(trainable_parameter * vector)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/neural_network/activation_functions/rectified_linear_unit.py | neural_network/activation_functions/rectified_linear_unit.py | """
This script demonstrates the implementation of the ReLU function.
It's a kind of activation function defined as the positive part of its argument in the
context of neural network.
The function takes a vector of K real numbers as input and then argmax(x, 0).
After through ReLU, the element of the vector always 0 or real number.
Script inspired from its corresponding Wikipedia article
https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
"""
from __future__ import annotations
import numpy as np
def relu(vector: list[float]):
"""
Implements the relu function
Parameters:
vector (np.array,list,tuple): A numpy array of shape (1,n)
consisting of real values or a similar list,tuple
Returns:
relu_vec (np.array): The input numpy array, after applying
relu.
>>> vec = np.array([-1, 0, 5])
>>> relu(vec)
array([0, 0, 5])
"""
# compare two arrays and then return element-wise maxima.
return np.maximum(0, vector)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/neural_network/activation_functions/leaky_rectified_linear_unit.py | neural_network/activation_functions/leaky_rectified_linear_unit.py | """
Leaky Rectified Linear Unit (Leaky ReLU)
Use Case: Leaky ReLU addresses the problem of the vanishing gradient.
For more detailed information, you can refer to the following link:
https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Leaky_ReLU
"""
import numpy as np
def leaky_rectified_linear_unit(vector: np.ndarray, alpha: float) -> np.ndarray:
"""
Implements the LeakyReLU activation function.
Parameters:
vector (np.ndarray): The input array for LeakyReLU activation.
alpha (float): The slope for negative values.
Returns:
np.ndarray: The input array after applying the LeakyReLU activation.
Formula: f(x) = x if x > 0 else f(x) = alpha * x
Examples:
>>> leaky_rectified_linear_unit(vector=np.array([2.3,0.6,-2,-3.8]), alpha=0.3)
array([ 2.3 , 0.6 , -0.6 , -1.14])
>>> leaky_rectified_linear_unit(np.array([-9.2, -0.3, 0.45, -4.56]), alpha=0.067)
array([-0.6164 , -0.0201 , 0.45 , -0.30552])
"""
return np.where(vector > 0, vector, alpha * vector)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/tribonacci.py | dynamic_programming/tribonacci.py | # Tribonacci sequence using Dynamic Programming
def tribonacci(num: int) -> list[int]:
"""
Given a number, return first n Tribonacci Numbers.
>>> tribonacci(5)
[0, 0, 1, 1, 2]
>>> tribonacci(8)
[0, 0, 1, 1, 2, 4, 7, 13]
"""
dp = [0] * num
dp[2] = 1
for i in range(3, num):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/wildcard_matching.py | dynamic_programming/wildcard_matching.py | """
Author : ilyas dahhou
Date : Oct 7, 2023
Task:
Given an input string and a pattern, implement wildcard pattern matching with support
for '?' and '*' where:
'?' matches any single character.
'*' matches any sequence of characters (including the empty sequence).
The matching should cover the entire input string (not partial).
Runtime complexity: O(m * n)
The implementation was tested on the
leetcode: https://leetcode.com/problems/wildcard-matching/
"""
def is_match(string: str, pattern: str) -> bool:
"""
>>> is_match("", "")
True
>>> is_match("aa", "a")
False
>>> is_match("abc", "abc")
True
>>> is_match("abc", "*c")
True
>>> is_match("abc", "a*")
True
>>> is_match("abc", "*a*")
True
>>> is_match("abc", "?b?")
True
>>> is_match("abc", "*?")
True
>>> is_match("abc", "a*d")
False
>>> is_match("abc", "a*c?")
False
>>> is_match('baaabab','*****ba*****ba')
False
>>> is_match('baaabab','*****ba*****ab')
True
>>> is_match('aa','*')
True
"""
dp = [[False] * (len(pattern) + 1) for _ in string + "1"]
dp[0][0] = True
# Fill in the first row
for j, char in enumerate(pattern, 1):
if char == "*":
dp[0][j] = dp[0][j - 1]
# Fill in the rest of the DP table
for i, s_char in enumerate(string, 1):
for j, p_char in enumerate(pattern, 1):
if p_char in (s_char, "?"):
dp[i][j] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
dp[i][j] = dp[i - 1][j] or dp[i][j - 1]
return dp[len(string)][len(pattern)]
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{is_match('baaabab','*****ba*****ab') = }")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/longest_common_substring.py | dynamic_programming/longest_common_substring.py | """
Longest Common Substring Problem Statement:
Given two sequences, find the
longest common substring present in both of them. A substring is
necessarily continuous.
Example:
``abcdef`` and ``xabded`` have two longest common substrings, ``ab`` or ``de``.
Therefore, algorithm should return any one of them.
"""
def longest_common_substring(text1: str, text2: str) -> str:
"""
Finds the longest common substring between two strings.
>>> longest_common_substring("", "")
''
>>> longest_common_substring("a","")
''
>>> longest_common_substring("", "a")
''
>>> longest_common_substring("a", "a")
'a'
>>> longest_common_substring("abcdef", "bcd")
'bcd'
>>> longest_common_substring("abcdef", "xabded")
'ab'
>>> longest_common_substring("GeeksforGeeks", "GeeksQuiz")
'Geeks'
>>> longest_common_substring("abcdxyz", "xyzabcd")
'abcd'
>>> longest_common_substring("zxabcdezy", "yzabcdezx")
'abcdez'
>>> longest_common_substring("OldSite:GeeksforGeeks.org", "NewSite:GeeksQuiz.com")
'Site:Geeks'
>>> longest_common_substring(1, 1)
Traceback (most recent call last):
...
ValueError: longest_common_substring() takes two strings for inputs
"""
if not (isinstance(text1, str) and isinstance(text2, str)):
raise ValueError("longest_common_substring() takes two strings for inputs")
if not text1 or not text2:
return ""
text1_length = len(text1)
text2_length = len(text2)
dp = [[0] * (text2_length + 1) for _ in range(text1_length + 1)]
end_pos = 0
max_length = 0
for i in range(1, text1_length + 1):
for j in range(1, text2_length + 1):
if text1[i - 1] == text2[j - 1]:
dp[i][j] = 1 + dp[i - 1][j - 1]
if dp[i][j] > max_length:
end_pos = i
max_length = dp[i][j]
return text1[end_pos - max_length : end_pos]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/abbreviation.py | dynamic_programming/abbreviation.py | """
https://www.hackerrank.com/challenges/abbr/problem
You can perform the following operation on some string, :
1. Capitalize zero or more of 's lowercase letters at some index i
(i.e., make them uppercase).
2. Delete all of the remaining lowercase letters in .
Example:
a=daBcd and b="ABC"
daBcd -> capitalize a and c(dABCd) -> remove d (ABC)
"""
def abbr(a: str, b: str) -> bool:
"""
>>> abbr("daBcd", "ABC")
True
>>> abbr("dBcd", "ABC")
False
"""
n = len(a)
m = len(b)
dp = [[False for _ in range(m + 1)] for _ in range(n + 1)]
dp[0][0] = True
for i in range(n):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
dp[i + 1][j + 1] = True
if a[i].islower():
dp[i + 1][j] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/optimal_binary_search_tree.py | dynamic_programming/optimal_binary_search_tree.py | #!/usr/bin/env python3
# This Python program implements an optimal binary search tree (abbreviated BST)
# building dynamic programming algorithm that delivers O(n^2) performance.
#
# The goal of the optimal BST problem is to build a low-cost BST for a
# given set of nodes, each with its own key and frequency. The frequency
# of the node is defined as how many time the node is being searched.
# The search cost of binary search tree is given by this formula:
#
# cost(1, n) = sum{i = 1 to n}((depth(node_i) + 1) * node_i_freq)
#
# where n is number of nodes in the BST. The characteristic of low-cost
# BSTs is having a faster overall search time than other implementations.
# The reason for their fast search time is that the nodes with high
# frequencies will be placed near the root of the tree while the nodes
# with low frequencies will be placed near the leaves of the tree thus
# reducing search time in the most frequent instances.
import sys
from random import randint
class Node:
"""Binary Search Tree Node"""
def __init__(self, key, freq):
self.key = key
self.freq = freq
def __str__(self):
"""
>>> str(Node(1, 2))
'Node(key=1, freq=2)'
"""
return f"Node(key={self.key}, freq={self.freq})"
def print_binary_search_tree(root, key, i, j, parent, is_left):
"""
Recursive function to print a BST from a root table.
>>> key = [3, 8, 9, 10, 17, 21]
>>> root = [[0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 3], [0, 0, 2, 3, 3, 3], \
[0, 0, 0, 3, 3, 3], [0, 0, 0, 0, 4, 5], [0, 0, 0, 0, 0, 5]]
>>> print_binary_search_tree(root, key, 0, 5, -1, False)
8 is the root of the binary search tree.
3 is the left child of key 8.
10 is the right child of key 8.
9 is the left child of key 10.
21 is the right child of key 10.
17 is the left child of key 21.
"""
if i > j or i < 0 or j > len(root) - 1:
return
node = root[i][j]
if parent == -1: # root does not have a parent
print(f"{key[node]} is the root of the binary search tree.")
elif is_left:
print(f"{key[node]} is the left child of key {parent}.")
else:
print(f"{key[node]} is the right child of key {parent}.")
print_binary_search_tree(root, key, i, node - 1, key[node], True)
print_binary_search_tree(root, key, node + 1, j, key[node], False)
def find_optimal_binary_search_tree(nodes):
"""
This function calculates and prints the optimal binary search tree.
The dynamic programming algorithm below runs in O(n^2) time.
Implemented from CLRS (Introduction to Algorithms) book.
https://en.wikipedia.org/wiki/Introduction_to_Algorithms
>>> find_optimal_binary_search_tree([Node(12, 8), Node(10, 34), Node(20, 50), \
Node(42, 3), Node(25, 40), Node(37, 30)])
Binary search tree nodes:
Node(key=10, freq=34)
Node(key=12, freq=8)
Node(key=20, freq=50)
Node(key=25, freq=40)
Node(key=37, freq=30)
Node(key=42, freq=3)
<BLANKLINE>
The cost of optimal BST for given tree nodes is 324.
20 is the root of the binary search tree.
10 is the left child of key 20.
12 is the right child of key 10.
25 is the right child of key 20.
37 is the right child of key 25.
42 is the right child of key 37.
"""
# Tree nodes must be sorted first, the code below sorts the keys in
# increasing order and rearrange its frequencies accordingly.
nodes.sort(key=lambda node: node.key)
n = len(nodes)
keys = [nodes[i].key for i in range(n)]
freqs = [nodes[i].freq for i in range(n)]
# This 2D array stores the overall tree cost (which's as minimized as possible);
# for a single key, cost is equal to frequency of the key.
dp = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
# sum[i][j] stores the sum of key frequencies between i and j inclusive in nodes
# array
total = [[freqs[i] if i == j else 0 for j in range(n)] for i in range(n)]
# stores tree roots that will be used later for constructing binary search tree
root = [[i if i == j else 0 for j in range(n)] for i in range(n)]
for interval_length in range(2, n + 1):
for i in range(n - interval_length + 1):
j = i + interval_length - 1
dp[i][j] = sys.maxsize # set the value to "infinity"
total[i][j] = total[i][j - 1] + freqs[j]
# Apply Knuth's optimization
# Loop without optimization: for r in range(i, j + 1):
for r in range(root[i][j - 1], root[i + 1][j] + 1): # r is a temporal root
left = dp[i][r - 1] if r != i else 0 # optimal cost for left subtree
right = dp[r + 1][j] if r != j else 0 # optimal cost for right subtree
cost = left + total[i][j] + right
if dp[i][j] > cost:
dp[i][j] = cost
root[i][j] = r
print("Binary search tree nodes:")
for node in nodes:
print(node)
print(f"\nThe cost of optimal BST for given tree nodes is {dp[0][n - 1]}.")
print_binary_search_tree(root, keys, 0, n - 1, -1, False)
def main():
# A sample binary search tree
nodes = [Node(i, randint(1, 50)) for i in range(10, 0, -1)]
find_optimal_binary_search_tree(nodes)
if __name__ == "__main__":
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/word_break.py | dynamic_programming/word_break.py | """
Author : Alexander Pantyukhin
Date : December 12, 2022
Task:
Given a string and a list of words, return true if the string can be
segmented into a space-separated sequence of one or more words.
Note that the same word may be reused
multiple times in the segmentation.
Implementation notes: Trie + Dynamic programming up -> down.
The Trie will be used to store the words. It will be useful for scanning
available words for the current position in the string.
Leetcode:
https://leetcode.com/problems/word-break/description/
Runtime: O(n * n)
Space: O(n)
"""
import functools
from typing import Any
def word_break(string: str, words: list[str]) -> bool:
"""
Return True if numbers have opposite signs False otherwise.
>>> word_break("applepenapple", ["apple","pen"])
True
>>> word_break("catsandog", ["cats","dog","sand","and","cat"])
False
>>> word_break("cars", ["car","ca","rs"])
True
>>> word_break('abc', [])
False
>>> word_break(123, ['a'])
Traceback (most recent call last):
...
ValueError: the string should be not empty string
>>> word_break('', ['a'])
Traceback (most recent call last):
...
ValueError: the string should be not empty string
>>> word_break('abc', [123])
Traceback (most recent call last):
...
ValueError: the words should be a list of non-empty strings
>>> word_break('abc', [''])
Traceback (most recent call last):
...
ValueError: the words should be a list of non-empty strings
"""
# Validation
if not isinstance(string, str) or len(string) == 0:
raise ValueError("the string should be not empty string")
if not isinstance(words, list) or not all(
isinstance(item, str) and len(item) > 0 for item in words
):
raise ValueError("the words should be a list of non-empty strings")
# Build trie
trie: dict[str, Any] = {}
word_keeper_key = "WORD_KEEPER"
for word in words:
trie_node = trie
for c in word:
if c not in trie_node:
trie_node[c] = {}
trie_node = trie_node[c]
trie_node[word_keeper_key] = True
len_string = len(string)
# Dynamic programming method
@functools.cache
def is_breakable(index: int) -> bool:
"""
>>> string = 'a'
>>> is_breakable(1)
True
"""
if index == len_string:
return True
trie_node: Any = trie
for i in range(index, len_string):
trie_node = trie_node.get(string[i], None)
if trie_node is None:
return False
if trie_node.get(word_keeper_key, False) and is_breakable(i + 1):
return True
return False
return is_breakable(0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/regex_match.py | dynamic_programming/regex_match.py | """
Regex matching check if a text matches pattern or not.
Pattern:
1. ``.`` Matches any single character.
2. ``*`` Matches zero or more of the preceding element.
More info:
https://medium.com/trick-the-interviwer/regular-expression-matching-9972eb74c03
"""
def recursive_match(text: str, pattern: str) -> bool:
r"""
Recursive matching algorithm.
| Time complexity: O(2^(\|text\| + \|pattern\|))
| Space complexity: Recursion depth is O(\|text\| + \|pattern\|).
:param text: Text to match.
:param pattern: Pattern to match.
:return: ``True`` if `text` matches `pattern`, ``False`` otherwise.
>>> recursive_match('abc', 'a.c')
True
>>> recursive_match('abc', 'af*.c')
True
>>> recursive_match('abc', 'a.c*')
True
>>> recursive_match('abc', 'a.c*d')
False
>>> recursive_match('aa', '.*')
True
"""
if not pattern:
return not text
if not text:
return pattern[-1] == "*" and recursive_match(text, pattern[:-2])
if text[-1] == pattern[-1] or pattern[-1] == ".":
return recursive_match(text[:-1], pattern[:-1])
if pattern[-1] == "*":
return recursive_match(text[:-1], pattern) or recursive_match(
text, pattern[:-2]
)
return False
def dp_match(text: str, pattern: str) -> bool:
r"""
Dynamic programming matching algorithm.
| Time complexity: O(\|text\| * \|pattern\|)
| Space complexity: O(\|text\| * \|pattern\|)
:param text: Text to match.
:param pattern: Pattern to match.
:return: ``True`` if `text` matches `pattern`, ``False`` otherwise.
>>> dp_match('abc', 'a.c')
True
>>> dp_match('abc', 'af*.c')
True
>>> dp_match('abc', 'a.c*')
True
>>> dp_match('abc', 'a.c*d')
False
>>> dp_match('aa', '.*')
True
"""
m = len(text)
n = len(pattern)
dp = [[False for _ in range(n + 1)] for _ in range(m + 1)]
dp[0][0] = True
for j in range(1, n + 1):
dp[0][j] = pattern[j - 1] == "*" and dp[0][j - 2]
for i in range(1, m + 1):
for j in range(1, n + 1):
if pattern[j - 1] in {".", text[i - 1]}:
dp[i][j] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
dp[i][j] = dp[i][j - 2]
if pattern[j - 2] in {".", text[i - 1]}:
dp[i][j] |= dp[i - 1][j]
else:
dp[i][j] = False
return dp[m][n]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/rod_cutting.py | dynamic_programming/rod_cutting.py | """
This module provides two implementations for the rod-cutting problem:
1. A naive recursive implementation which has an exponential runtime
2. Two dynamic programming implementations which have quadratic runtime
The rod-cutting problem is the problem of finding the maximum possible revenue
obtainable from a rod of length ``n`` given a list of prices for each integral piece
of the rod. The maximum revenue can thus be obtained by cutting the rod and selling the
pieces separately or not cutting it at all if the price of it is the maximum obtainable.
"""
def naive_cut_rod_recursive(n: int, prices: list):
"""
Solves the rod-cutting problem via naively without using the benefit of dynamic
programming. The results is the same sub-problems are solved several times
leading to an exponential runtime
Runtime: O(2^n)
Arguments
---------
* `n`: int, the length of the rod
* `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
Returns
-------
The maximum revenue obtainable for a rod of length `n` given the list of prices
for each piece.
Examples
--------
>>> naive_cut_rod_recursive(4, [1, 5, 8, 9])
10
>>> naive_cut_rod_recursive(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
30
"""
_enforce_args(n, prices)
if n == 0:
return 0
max_revue = float("-inf")
for i in range(1, n + 1):
max_revue = max(
max_revue, prices[i - 1] + naive_cut_rod_recursive(n - i, prices)
)
return max_revue
def top_down_cut_rod(n: int, prices: list):
"""
Constructs a top-down dynamic programming solution for the rod-cutting
problem via memoization. This function serves as a wrapper for
``_top_down_cut_rod_recursive``
Runtime: O(n^2)
Arguments
---------
* `n`: int, the length of the rod
* `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
.. note::
For convenience and because Python's lists using ``0``-indexing, ``length(max_rev)
= n + 1``, to accommodate for the revenue obtainable from a rod of length ``0``.
Returns
-------
The maximum revenue obtainable for a rod of length `n` given the list of prices
for each piece.
Examples
--------
>>> top_down_cut_rod(4, [1, 5, 8, 9])
10
>>> top_down_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
30
"""
_enforce_args(n, prices)
max_rev = [float("-inf") for _ in range(n + 1)]
return _top_down_cut_rod_recursive(n, prices, max_rev)
def _top_down_cut_rod_recursive(n: int, prices: list, max_rev: list):
"""
Constructs a top-down dynamic programming solution for the rod-cutting problem
via memoization.
Runtime: O(n^2)
Arguments
---------
* `n`: int, the length of the rod
* `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
* `max_rev`: list, the computed maximum revenue for a piece of rod.
``max_rev[i]`` is the maximum revenue obtainable for a rod of length ``i``
Returns
-------
The maximum revenue obtainable for a rod of length `n` given the list of prices
for each piece.
"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
max_revenue = float("-inf")
for i in range(1, n + 1):
max_revenue = max(
max_revenue,
prices[i - 1] + _top_down_cut_rod_recursive(n - i, prices, max_rev),
)
max_rev[n] = max_revenue
return max_rev[n]
def bottom_up_cut_rod(n: int, prices: list):
"""
Constructs a bottom-up dynamic programming solution for the rod-cutting problem
Runtime: O(n^2)
Arguments
---------
* `n`: int, the maximum length of the rod.
* `prices`: list, the prices for each piece of rod. ``p[i-i]`` is the
price for a rod of length ``i``
Returns
-------
The maximum revenue obtainable from cutting a rod of length `n` given
the prices for each piece of rod p.
Examples
--------
>>> bottom_up_cut_rod(4, [1, 5, 8, 9])
10
>>> bottom_up_cut_rod(10, [1, 5, 8, 9, 10, 17, 17, 20, 24, 30])
30
"""
_enforce_args(n, prices)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
max_rev = [float("-inf") for _ in range(n + 1)]
max_rev[0] = 0
for i in range(1, n + 1):
max_revenue_i = max_rev[i]
for j in range(1, i + 1):
max_revenue_i = max(max_revenue_i, prices[j - 1] + max_rev[i - j])
max_rev[i] = max_revenue_i
return max_rev[n]
def _enforce_args(n: int, prices: list):
"""
Basic checks on the arguments to the rod-cutting algorithms
* `n`: int, the length of the rod
* `prices`: list, the price list for each piece of rod.
Throws ``ValueError``:
if `n` is negative or there are fewer items in the price list than the length of
the rod
"""
if n < 0:
msg = f"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(msg)
if n > len(prices):
msg = (
"Each integral piece of rod must have a corresponding price. "
f"Got n = {n} but length of prices = {len(prices)}"
)
raise ValueError(msg)
def main():
prices = [6, 10, 12, 15, 20, 23]
n = len(prices)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
expected_max_revenue = 36
max_rev_top_down = top_down_cut_rod(n, prices)
max_rev_bottom_up = bottom_up_cut_rod(n, prices)
max_rev_naive = naive_cut_rod_recursive(n, prices)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/fast_fibonacci.py | dynamic_programming/fast_fibonacci.py | #!/usr/bin/env python3
"""
This program calculates the nth Fibonacci number in O(log(n)).
It's possible to calculate F(1_000_000) in less than a second.
"""
from __future__ import annotations
import sys
def fibonacci(n: int) -> int:
"""
return F(n)
>>> [fibonacci(i) for i in range(13)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
"""
if n < 0:
raise ValueError("Negative arguments are not supported")
return _fib(n)[0]
# returns (F(n), F(n-1))
def _fib(n: int) -> tuple[int, int]:
if n == 0: # (F(0), F(1))
return (0, 1)
# F(2n) = F(n)[2F(n+1) - F(n)]
# F(2n+1) = F(n+1)^2+F(n)^2
a, b = _fib(n // 2)
c = a * (b * 2 - a)
d = a * a + b * b
return (d, c + d) if n % 2 else (c, d)
if __name__ == "__main__":
n = int(sys.argv[1])
print(f"fibonacci({n}) is {fibonacci(n)}")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/longest_increasing_subsequence.py | dynamic_programming/longest_increasing_subsequence.py | """
Author : Mehdi ALAOUI
This is a pure Python implementation of Dynamic Programming solution to the longest
increasing subsequence of a given sequence.
The problem is:
Given an array, to find the longest and increasing sub-array in that given array and
return it.
Example:
``[10, 22, 9, 33, 21, 50, 41, 60, 80]`` as input will return
``[10, 22, 33, 41, 60, 80]`` as output
"""
from __future__ import annotations
def longest_subsequence(array: list[int]) -> list[int]: # This function is recursive
"""
Some examples
>>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80])
[10, 22, 33, 41, 60, 80]
>>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9])
[1, 2, 3, 9]
>>> longest_subsequence([28, 26, 12, 23, 35, 39])
[12, 23, 35, 39]
>>> longest_subsequence([9, 8, 7, 6, 5, 7])
[5, 7]
>>> longest_subsequence([1, 1, 1])
[1, 1, 1]
>>> longest_subsequence([])
[]
"""
array_length = len(array)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
pivot = array[0]
is_found = False
i = 1
longest_subseq: list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
is_found = True
temp_array = array[i:]
temp_array = longest_subsequence(temp_array)
if len(temp_array) > len(longest_subseq):
longest_subseq = temp_array
else:
i += 1
temp_array = [element for element in array[1:] if element >= pivot]
temp_array = [pivot, *longest_subsequence(temp_array)]
if len(temp_array) > len(longest_subseq):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/viterbi.py | dynamic_programming/viterbi.py | from typing import Any
def viterbi(
observations_space: list,
states_space: list,
initial_probabilities: dict,
transition_probabilities: dict,
emission_probabilities: dict,
) -> list:
"""
Viterbi Algorithm, to find the most likely path of
states from the start and the expected output.
https://en.wikipedia.org/wiki/Viterbi_algorithm
Wikipedia example
>>> observations = ["normal", "cold", "dizzy"]
>>> states = ["Healthy", "Fever"]
>>> start_p = {"Healthy": 0.6, "Fever": 0.4}
>>> trans_p = {
... "Healthy": {"Healthy": 0.7, "Fever": 0.3},
... "Fever": {"Healthy": 0.4, "Fever": 0.6},
... }
>>> emit_p = {
... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1},
... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6},
... }
>>> viterbi(observations, states, start_p, trans_p, emit_p)
['Healthy', 'Healthy', 'Fever']
>>> viterbi((), states, start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: There's an empty parameter
>>> viterbi(observations, (), start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: There's an empty parameter
>>> viterbi(observations, states, {}, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: There's an empty parameter
>>> viterbi(observations, states, start_p, {}, emit_p)
Traceback (most recent call last):
...
ValueError: There's an empty parameter
>>> viterbi(observations, states, start_p, trans_p, {})
Traceback (most recent call last):
...
ValueError: There's an empty parameter
>>> viterbi("invalid", states, start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: observations_space must be a list
>>> viterbi(["valid", 123], states, start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: observations_space must be a list of strings
>>> viterbi(observations, "invalid", start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: states_space must be a list
>>> viterbi(observations, ["valid", 123], start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: states_space must be a list of strings
>>> viterbi(observations, states, "invalid", trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: initial_probabilities must be a dict
>>> viterbi(observations, states, {2:2}, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: initial_probabilities all keys must be strings
>>> viterbi(observations, states, {"a":2}, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: initial_probabilities all values must be float
>>> viterbi(observations, states, start_p, "invalid", emit_p)
Traceback (most recent call last):
...
ValueError: transition_probabilities must be a dict
>>> viterbi(observations, states, start_p, {"a":2}, emit_p)
Traceback (most recent call last):
...
ValueError: transition_probabilities all values must be dict
>>> viterbi(observations, states, start_p, {2:{2:2}}, emit_p)
Traceback (most recent call last):
...
ValueError: transition_probabilities all keys must be strings
>>> viterbi(observations, states, start_p, {"a":{2:2}}, emit_p)
Traceback (most recent call last):
...
ValueError: transition_probabilities all keys must be strings
>>> viterbi(observations, states, start_p, {"a":{"b":2}}, emit_p)
Traceback (most recent call last):
...
ValueError: transition_probabilities nested dictionary all values must be float
>>> viterbi(observations, states, start_p, trans_p, "invalid")
Traceback (most recent call last):
...
ValueError: emission_probabilities must be a dict
>>> viterbi(observations, states, start_p, trans_p, None)
Traceback (most recent call last):
...
ValueError: There's an empty parameter
"""
_validation(
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
)
# Creates data structures and fill initial step
probabilities: dict = {}
pointers: dict = {}
for state in states_space:
observation = observations_space[0]
probabilities[(state, observation)] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
pointers[(state, observation)] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1, len(observations_space)):
observation = observations_space[o]
prior_observation = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
arg_max = ""
max_probability = -1
for k_state in states_space:
probability = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
max_probability = probability
arg_max = k_state
# Update probabilities and pointers dicts
probabilities[(state, observation)] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
pointers[(state, observation)] = arg_max
# The final observation
final_observation = observations_space[len(observations_space) - 1]
# argmax for given final observation
arg_max = ""
max_probability = -1
for k_state in states_space:
probability = probabilities[(k_state, final_observation)]
if probability > max_probability:
max_probability = probability
arg_max = k_state
last_state = arg_max
# Process pointers backwards
previous = last_state
result = []
for o in range(len(observations_space) - 1, -1, -1):
result.append(previous)
previous = pointers[previous, observations_space[o]]
result.reverse()
return result
def _validation(
observations_space: Any,
states_space: Any,
initial_probabilities: Any,
transition_probabilities: Any,
emission_probabilities: Any,
) -> None:
"""
>>> observations = ["normal", "cold", "dizzy"]
>>> states = ["Healthy", "Fever"]
>>> start_p = {"Healthy": 0.6, "Fever": 0.4}
>>> trans_p = {
... "Healthy": {"Healthy": 0.7, "Fever": 0.3},
... "Fever": {"Healthy": 0.4, "Fever": 0.6},
... }
>>> emit_p = {
... "Healthy": {"normal": 0.5, "cold": 0.4, "dizzy": 0.1},
... "Fever": {"normal": 0.1, "cold": 0.3, "dizzy": 0.6},
... }
>>> _validation(observations, states, start_p, trans_p, emit_p)
>>> _validation([], states, start_p, trans_p, emit_p)
Traceback (most recent call last):
...
ValueError: There's an empty parameter
"""
_validate_not_empty(
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
)
_validate_lists(observations_space, states_space)
_validate_dicts(
initial_probabilities, transition_probabilities, emission_probabilities
)
def _validate_not_empty(
observations_space: Any,
states_space: Any,
initial_probabilities: Any,
transition_probabilities: Any,
emission_probabilities: Any,
) -> None:
"""
>>> _validate_not_empty(["a"], ["b"], {"c":0.5},
... {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
>>> _validate_not_empty(["a"], ["b"], {"c":0.5}, {}, {"f": {"g": 0.7}})
Traceback (most recent call last):
...
ValueError: There's an empty parameter
>>> _validate_not_empty(["a"], ["b"], None, {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
Traceback (most recent call last):
...
ValueError: There's an empty parameter
"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]
):
raise ValueError("There's an empty parameter")
def _validate_lists(observations_space: Any, states_space: Any) -> None:
"""
>>> _validate_lists(["a"], ["b"])
>>> _validate_lists(1234, ["b"])
Traceback (most recent call last):
...
ValueError: observations_space must be a list
>>> _validate_lists(["a"], [3])
Traceback (most recent call last):
...
ValueError: states_space must be a list of strings
"""
_validate_list(observations_space, "observations_space")
_validate_list(states_space, "states_space")
def _validate_list(_object: Any, var_name: str) -> None:
"""
>>> _validate_list(["a"], "mock_name")
>>> _validate_list("a", "mock_name")
Traceback (most recent call last):
...
ValueError: mock_name must be a list
>>> _validate_list([0.5], "mock_name")
Traceback (most recent call last):
...
ValueError: mock_name must be a list of strings
"""
if not isinstance(_object, list):
msg = f"{var_name} must be a list"
raise ValueError(msg)
else:
for x in _object:
if not isinstance(x, str):
msg = f"{var_name} must be a list of strings"
raise ValueError(msg)
def _validate_dicts(
initial_probabilities: Any,
transition_probabilities: Any,
emission_probabilities: Any,
) -> None:
"""
>>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
>>> _validate_dicts("invalid", {"d": {"e": 0.6}}, {"f": {"g": 0.7}})
Traceback (most recent call last):
...
ValueError: initial_probabilities must be a dict
>>> _validate_dicts({"c":0.5}, {2: {"e": 0.6}}, {"f": {"g": 0.7}})
Traceback (most recent call last):
...
ValueError: transition_probabilities all keys must be strings
>>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {2: 0.7}})
Traceback (most recent call last):
...
ValueError: emission_probabilities all keys must be strings
>>> _validate_dicts({"c":0.5}, {"d": {"e": 0.6}}, {"f": {"g": "h"}})
Traceback (most recent call last):
...
ValueError: emission_probabilities nested dictionary all values must be float
"""
_validate_dict(initial_probabilities, "initial_probabilities", float)
_validate_nested_dict(transition_probabilities, "transition_probabilities")
_validate_nested_dict(emission_probabilities, "emission_probabilities")
def _validate_nested_dict(_object: Any, var_name: str) -> None:
"""
>>> _validate_nested_dict({"a":{"b": 0.5}}, "mock_name")
>>> _validate_nested_dict("invalid", "mock_name")
Traceback (most recent call last):
...
ValueError: mock_name must be a dict
>>> _validate_nested_dict({"a": 8}, "mock_name")
Traceback (most recent call last):
...
ValueError: mock_name all values must be dict
>>> _validate_nested_dict({"a":{2: 0.5}}, "mock_name")
Traceback (most recent call last):
...
ValueError: mock_name all keys must be strings
>>> _validate_nested_dict({"a":{"b": 4}}, "mock_name")
Traceback (most recent call last):
...
ValueError: mock_name nested dictionary all values must be float
"""
_validate_dict(_object, var_name, dict)
for x in _object.values():
_validate_dict(x, var_name, float, True)
def _validate_dict(
_object: Any, var_name: str, value_type: type, nested: bool = False
) -> None:
"""
>>> _validate_dict({"b": 0.5}, "mock_name", float)
>>> _validate_dict("invalid", "mock_name", float)
Traceback (most recent call last):
...
ValueError: mock_name must be a dict
>>> _validate_dict({"a": 8}, "mock_name", dict)
Traceback (most recent call last):
...
ValueError: mock_name all values must be dict
>>> _validate_dict({2: 0.5}, "mock_name",float, True)
Traceback (most recent call last):
...
ValueError: mock_name all keys must be strings
>>> _validate_dict({"b": 4}, "mock_name", float,True)
Traceback (most recent call last):
...
ValueError: mock_name nested dictionary all values must be float
"""
if not isinstance(_object, dict):
msg = f"{var_name} must be a dict"
raise ValueError(msg)
if not all(isinstance(x, str) for x in _object):
msg = f"{var_name} all keys must be strings"
raise ValueError(msg)
if not all(isinstance(x, value_type) for x in _object.values()):
nested_text = "nested dictionary " if nested else ""
msg = f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(msg)
if __name__ == "__main__":
from doctest import testmod
testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/fibonacci.py | dynamic_programming/fibonacci.py | """
This is a pure Python implementation of Dynamic Programming solution to the fibonacci
sequence problem.
"""
class Fibonacci:
def __init__(self) -> None:
self.sequence = [0, 1]
def get(self, index: int) -> list:
"""
Get the Fibonacci number of `index`. If the number does not exist,
calculate all missing numbers leading up to the number of `index`.
>>> Fibonacci().get(10)
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
>>> Fibonacci().get(5)
[0, 1, 1, 2, 3]
"""
if (difference := index - (len(self.sequence) - 2)) >= 1:
for _ in range(difference):
self.sequence.append(self.sequence[-1] + self.sequence[-2])
return self.sequence[:index]
def main() -> None:
print(
"Fibonacci Series Using Dynamic Programming\n",
"Enter the index of the Fibonacci number you want to calculate ",
"in the prompt below. (To exit enter exit or Ctrl-C)\n",
sep="",
)
fibonacci = Fibonacci()
while True:
prompt: str = input(">> ")
if prompt in {"exit", "quit"}:
break
try:
index: int = int(prompt)
except ValueError:
print("Enter a number or 'exit'")
continue
print(fibonacci.get(index))
if __name__ == "__main__":
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/knapsack.py | dynamic_programming/knapsack.py | """
Given weights and values of n items, put these items in a knapsack of
capacity W to get the maximum total value in the knapsack.
Note that only the integer weights 0-1 knapsack problem is solvable
using dynamic programming.
"""
def mf_knapsack(i, wt, val, j):
"""
This code involves the concept of memory functions. Here we solve the subproblems
which are needed unlike the below example
F is a 2D array with ``-1`` s filled up
"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
val = mf_knapsack(i - 1, wt, val, j)
else:
val = max(
mf_knapsack(i - 1, wt, val, j),
mf_knapsack(i - 1, wt, val, j - wt[i - 1]) + val[i - 1],
)
f[i][j] = val
return f[i][j]
def knapsack(w, wt, val, n):
dp = [[0] * (w + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
for w_ in range(1, w + 1):
if wt[i - 1] <= w_:
dp[i][w_] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_])
else:
dp[i][w_] = dp[i - 1][w_]
return dp[n][w_], dp
def knapsack_with_example_solution(w: int, wt: list, val: list):
"""
Solves the integer weights knapsack problem returns one of
the several possible optimal subsets.
Parameters
----------
* `w`: int, the total maximum weight for the given knapsack problem.
* `wt`: list, the vector of weights for all items where ``wt[i]`` is the weight
of the ``i``-th item.
* `val`: list, the vector of values for all items where ``val[i]`` is the value
of the ``i``-th item
Returns
-------
* `optimal_val`: float, the optimal value for the given knapsack problem
* `example_optional_set`: set, the indices of one of the optimal subsets
which gave rise to the optimal value.
Examples
--------
>>> knapsack_with_example_solution(10, [1, 3, 5, 2], [10, 20, 100, 22])
(142, {2, 3, 4})
>>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4, 4])
(8, {3, 4})
>>> knapsack_with_example_solution(6, [4, 3, 2, 3], [3, 2, 4])
Traceback (most recent call last):
...
ValueError: The number of weights must be the same as the number of values.
But got 4 weights and 3 values
"""
if not (isinstance(wt, (list, tuple)) and isinstance(val, (list, tuple))):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples"
)
num_items = len(wt)
if num_items != len(val):
msg = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(val)} values"
)
raise ValueError(msg)
for i in range(num_items):
if not isinstance(wt[i], int):
msg = (
"All weights must be integers but got weight of "
f"type {type(wt[i])} at index {i}"
)
raise TypeError(msg)
optimal_val, dp_table = knapsack(w, wt, val, num_items)
example_optional_set: set = set()
_construct_solution(dp_table, wt, num_items, w, example_optional_set)
return optimal_val, example_optional_set
def _construct_solution(dp: list, wt: list, i: int, j: int, optimal_set: set):
"""
Recursively reconstructs one of the optimal subsets given
a filled DP table and the vector of weights
Parameters
----------
* `dp`: list of list, the table of a solved integer weight dynamic programming
problem
* `wt`: list or tuple, the vector of weights of the items
* `i`: int, the index of the item under consideration
* `j`: int, the current possible maximum weight
* `optimal_set`: set, the optimal subset so far. This gets modified by the function.
Returns
-------
``None``
"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(dp, wt, i - 1, j, optimal_set)
else:
optimal_set.add(i)
_construct_solution(dp, wt, i - 1, j - wt[i - 1], optimal_set)
if __name__ == "__main__":
"""
Adding test case for knapsack
"""
val = [3, 2, 4, 4]
wt = [4, 3, 2, 3]
n = 4
w = 6
f = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
optimal_solution, _ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
optimal_solution, optimal_subset = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_squares_to_represent_a_number.py | dynamic_programming/minimum_squares_to_represent_a_number.py | import math
import sys
def minimum_squares_to_represent_a_number(number: int) -> int:
"""
Count the number of minimum squares to represent a number
>>> minimum_squares_to_represent_a_number(25)
1
>>> minimum_squares_to_represent_a_number(37)
2
>>> minimum_squares_to_represent_a_number(21)
3
>>> minimum_squares_to_represent_a_number(58)
2
>>> minimum_squares_to_represent_a_number(-1)
Traceback (most recent call last):
...
ValueError: the value of input must not be a negative number
>>> minimum_squares_to_represent_a_number(0)
1
>>> minimum_squares_to_represent_a_number(12.34)
Traceback (most recent call last):
...
ValueError: the value of input must be a natural number
"""
if number != int(number):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
answers = [-1] * (number + 1)
answers[0] = 0
for i in range(1, number + 1):
answer = sys.maxsize
root = int(math.sqrt(i))
for j in range(1, root + 1):
current_answer = 1 + answers[i - (j**2)]
answer = min(answer, current_answer)
answers[i] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/longest_palindromic_subsequence.py | dynamic_programming/longest_palindromic_subsequence.py | """
author: Sanket Kittad
Given a string s, find the longest palindromic subsequence's length in s.
Input: s = "bbbab"
Output: 4
Explanation: One possible longest palindromic subsequence is "bbbb".
Leetcode link: https://leetcode.com/problems/longest-palindromic-subsequence/description/
"""
def longest_palindromic_subsequence(input_string: str) -> int:
"""
This function returns the longest palindromic subsequence in a string
>>> longest_palindromic_subsequence("bbbab")
4
>>> longest_palindromic_subsequence("bbabcbcab")
7
"""
n = len(input_string)
rev = input_string[::-1]
m = len(rev)
dp = [[-1] * (m + 1) for i in range(n + 1)]
for i in range(n + 1):
dp[i][0] = 0
for i in range(m + 1):
dp[0][i] = 0
# create and initialise dp array
for i in range(1, n + 1):
for j in range(1, m + 1):
# If characters at i and j are the same
# include them in the palindromic subsequence
if input_string[i - 1] == rev[j - 1]:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/smith_waterman.py | dynamic_programming/smith_waterman.py | """
https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
The Smith-Waterman algorithm is a dynamic programming algorithm used for sequence
alignment. It is particularly useful for finding similarities between two sequences,
such as DNA or protein sequences. In this implementation, gaps are penalized
linearly, meaning that the score is reduced by a fixed amount for each gap introduced
in the alignment. However, it's important to note that the Smith-Waterman algorithm
supports other gap penalty methods as well.
"""
def score_function(
source_char: str,
target_char: str,
match: int = 1,
mismatch: int = -1,
gap: int = -2,
) -> int:
"""
Calculate the score for a character pair based on whether they match or mismatch.
Returns 1 if the characters match, -1 if they mismatch, and -2 if either of the
characters is a gap.
>>> score_function('A', 'A')
1
>>> score_function('A', 'C')
-1
>>> score_function('-', 'A')
-2
>>> score_function('A', '-')
-2
>>> score_function('-', '-')
-2
"""
if "-" in (source_char, target_char):
return gap
return match if source_char == target_char else mismatch
def smith_waterman(
query: str,
subject: str,
match: int = 1,
mismatch: int = -1,
gap: int = -2,
) -> list[list[int]]:
"""
Perform the Smith-Waterman local sequence alignment algorithm.
Returns a 2D list representing the score matrix. Each value in the matrix
corresponds to the score of the best local alignment ending at that point.
>>> smith_waterman('ACAC', 'CA')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('acac', 'ca')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('ACAC', 'ca')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('acac', 'CA')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('ACAC', '')
[[0], [0], [0], [0], [0]]
>>> smith_waterman('', 'CA')
[[0, 0, 0]]
>>> smith_waterman('ACAC', 'CA')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('acac', 'ca')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('ACAC', 'ca')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('acac', 'CA')
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]]
>>> smith_waterman('ACAC', '')
[[0], [0], [0], [0], [0]]
>>> smith_waterman('', 'CA')
[[0, 0, 0]]
>>> smith_waterman('AGT', 'AGT')
[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3]]
>>> smith_waterman('AGT', 'GTA')
[[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 0, 2, 0]]
>>> smith_waterman('AGT', 'GTC')
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0]]
>>> smith_waterman('AGT', 'G')
[[0, 0], [0, 0], [0, 1], [0, 0]]
>>> smith_waterman('G', 'AGT')
[[0, 0, 0, 0], [0, 0, 1, 0]]
>>> smith_waterman('AGT', 'AGTCT')
[[0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 2, 0, 0, 0], [0, 0, 0, 3, 1, 1]]
>>> smith_waterman('AGTCT', 'AGT')
[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 1], [0, 0, 0, 1]]
>>> smith_waterman('AGTCT', 'GTC')
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 1, 1]]
"""
# make both query and subject uppercase
query = query.upper()
subject = subject.upper()
# Initialize score matrix
m = len(query)
n = len(subject)
score = [[0] * (n + 1) for _ in range(m + 1)]
kwargs = {"match": match, "mismatch": mismatch, "gap": gap}
for i in range(1, m + 1):
for j in range(1, n + 1):
# Calculate scores for each cell
match = score[i - 1][j - 1] + score_function(
query[i - 1], subject[j - 1], **kwargs
)
delete = score[i - 1][j] + gap
insert = score[i][j - 1] + gap
# Take maximum score
score[i][j] = max(0, match, delete, insert)
return score
def traceback(score: list[list[int]], query: str, subject: str) -> str:
r"""
Perform traceback to find the optimal local alignment.
Starts from the highest scoring cell in the matrix and traces back recursively
until a 0 score is found. Returns the alignment strings.
>>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'ACAC', 'CA')
'CA\nCA'
>>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'acac', 'ca')
'CA\nCA'
>>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'ACAC', 'ca')
'CA\nCA'
>>> traceback([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 2], [0, 1, 0]], 'acac', 'CA')
'CA\nCA'
>>> traceback([[0, 0, 0]], 'ACAC', '')
''
"""
# make both query and subject uppercase
query = query.upper()
subject = subject.upper()
# find the indices of the maximum value in the score matrix
max_value = float("-inf")
i_max = j_max = 0
for i, row in enumerate(score):
for j, value in enumerate(row):
if value > max_value:
max_value = value
i_max, j_max = i, j
# Traceback logic to find optimal alignment
i = i_max
j = j_max
align1 = ""
align2 = ""
gap = score_function("-", "-")
# guard against empty query or subject
if i == 0 or j == 0:
return ""
while i > 0 and j > 0:
if score[i][j] == score[i - 1][j - 1] + score_function(
query[i - 1], subject[j - 1]
):
# optimal path is a diagonal take both letters
align1 = query[i - 1] + align1
align2 = subject[j - 1] + align2
i -= 1
j -= 1
elif score[i][j] == score[i - 1][j] + gap:
# optimal path is a vertical
align1 = query[i - 1] + align1
align2 = f"-{align2}"
i -= 1
else:
# optimal path is a horizontal
align1 = f"-{align1}"
align2 = subject[j - 1] + align2
j -= 1
return f"{align1}\n{align2}"
if __name__ == "__main__":
query = "HEAGAWGHEE"
subject = "PAWHEAE"
score = smith_waterman(query, subject, match=1, mismatch=-1, gap=-2)
print(traceback(score, query, subject))
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/all_construct.py | dynamic_programming/all_construct.py | """
Program to list all the ways a target string can be
constructed from the given list of substrings
"""
from __future__ import annotations
def all_construct(target: str, word_bank: list[str] | None = None) -> list[list[str]]:
"""
returns the list containing all the possible
combinations a string(`target`) can be constructed from
the given list of substrings(`word_bank`)
>>> all_construct("hello", ["he", "l", "o"])
[['he', 'l', 'l', 'o']]
>>> all_construct("purple",["purp","p","ur","le","purpl"])
[['purp', 'le'], ['p', 'ur', 'p', 'le']]
"""
word_bank = word_bank or []
# create a table
table_size: int = len(target) + 1
table: list[list[list[str]]] = []
for _ in range(table_size):
table.append([])
# seed value
table[0] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(table_size):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(word)] == word:
new_combinations: list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(word)] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(target)]:
combination.reverse()
return table[len(target)]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/edit_distance.py | dynamic_programming/edit_distance.py | """
Author : Turfa Auliarachman
Date : October 12, 2016
This is a pure Python implementation of Dynamic Programming solution to the edit
distance problem.
The problem is :
Given two strings A and B. Find the minimum number of operations to string B such that
A = B. The permitted operations are removal, insertion, and substitution.
"""
class EditDistance:
"""
Use :
solver = EditDistance()
editDistanceResult = solver.solve(firstString, secondString)
"""
def __init__(self):
self.word1 = ""
self.word2 = ""
self.dp = []
def __min_dist_top_down_dp(self, m: int, n: int) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.word1[m] == self.word2[n]:
self.dp[m][n] = self.__min_dist_top_down_dp(m - 1, n - 1)
else:
insert = self.__min_dist_top_down_dp(m, n - 1)
delete = self.__min_dist_top_down_dp(m - 1, n)
replace = self.__min_dist_top_down_dp(m - 1, n - 1)
self.dp[m][n] = 1 + min(insert, delete, replace)
return self.dp[m][n]
def min_dist_top_down(self, word1: str, word2: str) -> int:
"""
>>> EditDistance().min_dist_top_down("intention", "execution")
5
>>> EditDistance().min_dist_top_down("intention", "")
9
>>> EditDistance().min_dist_top_down("", "")
0
"""
self.word1 = word1
self.word2 = word2
self.dp = [[-1 for _ in range(len(word2))] for _ in range(len(word1))]
return self.__min_dist_top_down_dp(len(word1) - 1, len(word2) - 1)
def min_dist_bottom_up(self, word1: str, word2: str) -> int:
"""
>>> EditDistance().min_dist_bottom_up("intention", "execution")
5
>>> EditDistance().min_dist_bottom_up("intention", "")
9
>>> EditDistance().min_dist_bottom_up("", "")
0
"""
self.word1 = word1
self.word2 = word2
m = len(word1)
n = len(word2)
self.dp = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0: # first string is empty
self.dp[i][j] = j
elif j == 0: # second string is empty
self.dp[i][j] = i
elif word1[i - 1] == word2[j - 1]: # last characters are equal
self.dp[i][j] = self.dp[i - 1][j - 1]
else:
insert = self.dp[i][j - 1]
delete = self.dp[i - 1][j]
replace = self.dp[i - 1][j - 1]
self.dp[i][j] = 1 + min(insert, delete, replace)
return self.dp[m][n]
if __name__ == "__main__":
solver = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
S1 = input("Enter the first string: ").strip()
S2 = input("Enter the second string: ").strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(S1, S2)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(S1, S2)}")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/longest_common_subsequence.py | dynamic_programming/longest_common_subsequence.py | """
LCS Problem Statement: Given two sequences, find the length of longest subsequence
present in both of them. A subsequence is a sequence that appears in the same relative
order, but not necessarily continuous.
Example:"abc", "abg" are subsequences of "abcdefgh".
"""
def longest_common_subsequence(x: str, y: str):
"""
Finds the longest common subsequence between two strings. Also returns the
The subsequence found
Parameters
----------
x: str, one of the strings
y: str, the other string
Returns
-------
L[m][n]: int, the length of the longest subsequence. Also equal to len(seq)
Seq: str, the subsequence found
>>> longest_common_subsequence("programming", "gaming")
(6, 'gaming')
>>> longest_common_subsequence("physics", "smartphone")
(2, 'ph')
>>> longest_common_subsequence("computer", "food")
(1, 'o')
>>> longest_common_subsequence("", "abc") # One string is empty
(0, '')
>>> longest_common_subsequence("abc", "") # Other string is empty
(0, '')
>>> longest_common_subsequence("", "") # Both strings are empty
(0, '')
>>> longest_common_subsequence("abc", "def") # No common subsequence
(0, '')
>>> longest_common_subsequence("abc", "abc") # Identical strings
(3, 'abc')
>>> longest_common_subsequence("a", "a") # Single character match
(1, 'a')
>>> longest_common_subsequence("a", "b") # Single character no match
(0, '')
>>> longest_common_subsequence("abcdef", "ace") # Interleaved subsequence
(3, 'ace')
>>> longest_common_subsequence("ABCD", "ACBD") # No repeated characters
(3, 'ABD')
"""
# find the length of strings
assert x is not None
assert y is not None
m = len(x)
n = len(y)
# declaring the array for storing the dp values
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
match = 1 if x[i - 1] == y[j - 1] else 0
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1] + match)
seq = ""
i, j = m, n
while i > 0 and j > 0:
match = 1 if x[i - 1] == y[j - 1] else 0
if dp[i][j] == dp[i - 1][j - 1] + match:
if match == 1:
seq = x[i - 1] + seq
i -= 1
j -= 1
elif dp[i][j] == dp[i - 1][j]:
i -= 1
else:
j -= 1
return dp[m][n], seq
if __name__ == "__main__":
a = "AGGTAB"
b = "GXTXAYB"
expected_ln = 4
expected_subseq = "GTAB"
ln, subseq = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/climbing_stairs.py | dynamic_programming/climbing_stairs.py | #!/usr/bin/env python3
def climb_stairs(number_of_steps: int) -> int:
"""
LeetCdoe No.70: Climbing Stairs
Distinct ways to climb a number_of_steps staircase where each time you can either
climb 1 or 2 steps.
Args:
number_of_steps: number of steps on the staircase
Returns:
Distinct ways to climb a number_of_steps staircase
Raises:
AssertionError: number_of_steps not positive integer
>>> climb_stairs(3)
3
>>> climb_stairs(1)
1
>>> climb_stairs(-7) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: number_of_steps needs to be positive integer, your input -7
"""
assert isinstance(number_of_steps, int) and number_of_steps > 0, (
f"number_of_steps needs to be positive integer, your input {number_of_steps}"
)
if number_of_steps == 1:
return 1
previous, current = 1, 1
for _ in range(number_of_steps - 1):
current, previous = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/palindrome_partitioning.py | dynamic_programming/palindrome_partitioning.py | """
Given a string s, partition s such that every substring of the
partition is a palindrome.
Find the minimum cuts needed for a palindrome partitioning of s.
Time Complexity: O(n^2)
Space Complexity: O(n^2)
For other explanations refer to: https://www.youtube.com/watch?v=_H8V5hJUGd0
"""
def find_minimum_partitions(string: str) -> int:
"""
Returns the minimum cuts needed for a palindrome partitioning of string
>>> find_minimum_partitions("aab")
1
>>> find_minimum_partitions("aaa")
0
>>> find_minimum_partitions("ababbbabbababa")
3
"""
length = len(string)
cut = [0] * length
is_palindromic = [[False for i in range(length)] for j in range(length)]
for i, c in enumerate(string):
mincut = i
for j in range(i + 1):
if c == string[j] and (i - j < 2 or is_palindromic[j + 1][i - 1]):
is_palindromic[j][i] = True
mincut = min(mincut, 0 if j == 0 else (cut[j - 1] + 1))
cut[i] = mincut
return cut[length - 1]
if __name__ == "__main__":
s = input("Enter the string: ").strip()
ans = find_minimum_partitions(s)
print(f"Minimum number of partitions required for the '{s}' is {ans}")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/factorial.py | dynamic_programming/factorial.py | # Factorial of a number using memoization
from functools import lru_cache
@lru_cache
def factorial(num: int) -> int:
"""
>>> factorial(7)
5040
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: Number should not be negative.
>>> [factorial(i) for i in range(10)]
[1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880]
"""
if num < 0:
raise ValueError("Number should not be negative.")
return 1 if num in (0, 1) else num * factorial(num - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/catalan_numbers.py | dynamic_programming/catalan_numbers.py | """
Print all the Catalan numbers from 0 to n, n being the user input.
* The Catalan numbers are a sequence of positive integers that
* appear in many counting problems in combinatorics [1]. Such
* problems include counting [2]:
* - The number of Dyck words of length 2n
* - The number well-formed expressions with n pairs of parentheses
* (e.g., `()()` is valid but `())(` is not)
* - The number of different ways n + 1 factors can be completely
* parenthesized (e.g., for n = 2, C(n) = 2 and (ab)c and a(bc)
* are the two valid ways to parenthesize.
* - The number of full binary trees with n + 1 leaves
* A Catalan number satisfies the following recurrence relation
* which we will use in this algorithm [1].
* C(0) = C(1) = 1
* C(n) = sum(C(i).C(n-i-1)), from i = 0 to n-1
* In addition, the n-th Catalan number can be calculated using
* the closed form formula below [1]:
* C(n) = (1 / (n + 1)) * (2n choose n)
* Sources:
* [1] https://brilliant.org/wiki/catalan-numbers/
* [2] https://en.wikipedia.org/wiki/Catalan_number
"""
def catalan_numbers(upper_limit: int) -> "list[int]":
"""
Return a list of the Catalan number sequence from 0 through `upper_limit`.
>>> catalan_numbers(5)
[1, 1, 2, 5, 14, 42]
>>> catalan_numbers(2)
[1, 1, 2]
>>> catalan_numbers(-1)
Traceback (most recent call last):
ValueError: Limit for the Catalan sequence must be ≥ 0
"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0")
catalan_list = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
catalan_list[0] = 1
if upper_limit > 0:
catalan_list[1] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1):
for j in range(i):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
N = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/max_product_subarray.py | dynamic_programming/max_product_subarray.py | def max_product_subarray(numbers: list[int]) -> int:
"""
Returns the maximum product that can be obtained by multiplying a
contiguous subarray of the given integer list `numbers`.
Example:
>>> max_product_subarray([2, 3, -2, 4])
6
>>> max_product_subarray((-2, 0, -1))
0
>>> max_product_subarray([2, 3, -2, 4, -1])
48
>>> max_product_subarray([-1])
-1
>>> max_product_subarray([0])
0
>>> max_product_subarray([])
0
>>> max_product_subarray("")
0
>>> max_product_subarray(None)
0
>>> max_product_subarray([2, 3, -2, 4.5, -1])
Traceback (most recent call last):
...
ValueError: numbers must be an iterable of integers
>>> max_product_subarray("ABC")
Traceback (most recent call last):
...
ValueError: numbers must be an iterable of integers
"""
if not numbers:
return 0
if not isinstance(numbers, (list, tuple)) or not all(
isinstance(number, int) for number in numbers
):
raise ValueError("numbers must be an iterable of integers")
max_till_now = min_till_now = max_prod = numbers[0]
for i in range(1, len(numbers)):
# update the maximum and minimum subarray products
number = numbers[i]
if number < 0:
max_till_now, min_till_now = min_till_now, max_till_now
max_till_now = max(number, max_till_now * number)
min_till_now = min(number, min_till_now * number)
# update the maximum product found till now
max_prod = max(max_prod, max_till_now)
return max_prod
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/min_distance_up_bottom.py | dynamic_programming/min_distance_up_bottom.py | """
Author : Alexander Pantyukhin
Date : October 14, 2022
This is an implementation of the up-bottom approach to find edit distance.
The implementation was tested on Leetcode: https://leetcode.com/problems/edit-distance/
Levinstein distance
Dynamic Programming: up -> down.
"""
import functools
def min_distance_up_bottom(word1: str, word2: str) -> int:
"""
>>> min_distance_up_bottom("intention", "execution")
5
>>> min_distance_up_bottom("intention", "")
9
>>> min_distance_up_bottom("", "")
0
>>> min_distance_up_bottom("zooicoarchaeologist", "zoologist")
10
"""
len_word1 = len(word1)
len_word2 = len(word2)
@functools.cache
def min_distance(index1: int, index2: int) -> int:
# if first word index overflows - delete all from the second word
if index1 >= len_word1:
return len_word2 - index2
# if second word index overflows - delete all from the first word
if index2 >= len_word2:
return len_word1 - index1
diff = int(word1[index1] != word2[index2]) # current letters not identical
return min(
1 + min_distance(index1 + 1, index2),
1 + min_distance(index1, index2 + 1),
diff + min_distance(index1 + 1, index2 + 1),
)
return min_distance(0, 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_cost_path.py | dynamic_programming/minimum_cost_path.py | # Youtube Explanation: https://www.youtube.com/watch?v=lBRtnuxg-gU
from __future__ import annotations
def minimum_cost_path(matrix: list[list[int]]) -> int:
"""
Find the minimum cost traced by all possible paths from top left to bottom right in
a given matrix
>>> minimum_cost_path([[2, 1], [3, 1], [4, 2]])
6
>>> minimum_cost_path([[2, 1, 4], [2, 1, 3], [3, 2, 1]])
7
"""
# preprocessing the first row
for i in range(1, len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1, len(matrix)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j], matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/range_sum_query.py | dynamic_programming/range_sum_query.py | """
Author: Sanjay Muthu <https://github.com/XenoBytesX>
This is an implementation of the Dynamic Programming solution to the Range Sum Query.
The problem statement is:
Given an array and q queries,
each query stating you to find the sum of elements from l to r (inclusive)
Example:
arr = [1, 4, 6, 2, 61, 12]
queries = 3
l_1 = 2, r_1 = 5
l_2 = 1, r_2 = 5
l_3 = 3, r_3 = 4
as input will return
[81, 85, 63]
as output
0-indexing:
NOTE: 0-indexing means the indexing of the array starts from 0
Example: a = [1, 2, 3, 4, 5, 6]
Here, the 0th index of a is 1,
the 1st index of a is 2,
and so forth
Time Complexity: O(N + Q)
* O(N) pre-calculation time to calculate the prefix sum array
* and O(1) time per each query = O(1 * Q) = O(Q) time
Space Complexity: O(N)
* O(N) to store the prefix sum
Algorithm:
So, first we calculate the prefix sum (dp) of the array.
The prefix sum of the index i is the sum of all elements indexed
from 0 to i (inclusive).
The prefix sum of the index i is the prefix sum of index (i - 1) + the current element.
So, the state of the dp is dp[i] = dp[i - 1] + a[i].
After we calculate the prefix sum,
for each query [l, r]
the answer is dp[r] - dp[l - 1] (we need to be careful because l might be 0).
For example take this array:
[4, 2, 1, 6, 3]
The prefix sum calculated for this array would be:
[4, 4 + 2, 4 + 2 + 1, 4 + 2 + 1 + 6, 4 + 2 + 1 + 6 + 3]
==> [4, 6, 7, 13, 16]
If the query was l = 3, r = 4,
the answer would be 6 + 3 = 9 but this would require O(r - l + 1) time ≈ O(N) time
If we use prefix sums we can find it in O(1) by using the formula
prefix[r] - prefix[l - 1].
This formula works because prefix[r] is the sum of elements from [0, r]
and prefix[l - 1] is the sum of elements from [0, l - 1],
so if we do prefix[r] - prefix[l - 1] it will be
[0, r] - [0, l - 1] = [0, l - 1] + [l, r] - [0, l - 1] = [l, r]
"""
def prefix_sum(array: list[int], queries: list[tuple[int, int]]) -> list[int]:
"""
>>> prefix_sum([1, 4, 6, 2, 61, 12], [(2, 5), (1, 5), (3, 4)])
[81, 85, 63]
>>> prefix_sum([4, 2, 1, 6, 3], [(3, 4), (1, 3), (0, 2)])
[9, 9, 7]
"""
# The prefix sum array
dp = [0] * len(array)
dp[0] = array[0]
for i in range(1, len(array)):
dp[i] = dp[i - 1] + array[i]
# See Algorithm section (Line 44)
result = []
for query in queries:
left, right = query
res = dp[right]
if left > 0:
res -= dp[left - 1]
result.append(res)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/trapped_water.py | dynamic_programming/trapped_water.py | """
Given an array of non-negative integers representing an elevation map where the width
of each bar is 1, this program calculates how much rainwater can be trapped.
Example - height = (0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1)
Output: 6
This problem can be solved using the concept of "DYNAMIC PROGRAMMING".
We calculate the maximum height of bars on the left and right of every bar in array.
Then iterate over the width of structure and at each index.
The amount of water that will be stored is equal to minimum of maximum height of bars
on both sides minus height of bar at current position.
"""
def trapped_rainwater(heights: tuple[int, ...]) -> int:
"""
The trapped_rainwater function calculates the total amount of rainwater that can be
trapped given an array of bar heights.
It uses a dynamic programming approach, determining the maximum height of bars on
both sides for each bar, and then computing the trapped water above each bar.
The function returns the total trapped water.
>>> trapped_rainwater((0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1))
6
>>> trapped_rainwater((7, 1, 5, 3, 6, 4))
9
>>> trapped_rainwater((7, 1, 5, 3, 6, -1))
Traceback (most recent call last):
...
ValueError: No height can be negative
"""
if not heights:
return 0
if any(h < 0 for h in heights):
raise ValueError("No height can be negative")
length = len(heights)
left_max = [0] * length
left_max[0] = heights[0]
for i, height in enumerate(heights[1:], start=1):
left_max[i] = max(height, left_max[i - 1])
right_max = [0] * length
right_max[-1] = heights[-1]
for i in range(length - 2, -1, -1):
right_max[i] = max(heights[i], right_max[i + 1])
return sum(
min(left, right) - height
for left, right, height in zip(left_max, right_max, heights)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{trapped_rainwater((0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1)) = }")
print(f"{trapped_rainwater((7, 1, 5, 3, 6, 4)) = }")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_coin_change.py | dynamic_programming/minimum_coin_change.py | """
You have m types of coins available in infinite quantities
where the value of each coins is given in the array S=[S0,... Sm-1]
Can you determine number of ways of making change for n units using
the given types of coins?
https://www.hackerrank.com/challenges/coin-change/problem
"""
def dp_count(s, n):
"""
>>> dp_count([1, 2, 3], 4)
4
>>> dp_count([1, 2, 3], 7)
8
>>> dp_count([2, 5, 3, 6], 10)
5
>>> dp_count([10], 99)
0
>>> dp_count([4, 5, 6], 0)
1
>>> dp_count([1, 2, 3], -5)
0
"""
if n < 0:
return 0
# table[i] represents the number of ways to get to amount i
table = [0] * (n + 1)
# There is exactly 1 way to get to zero(You pick no coins).
table[0] = 1
# Pick all coins one by one and update table[] values
# after the index greater than or equal to the value of the
# picked coin
for coin_val in s:
for j in range(coin_val, n + 1):
table[j] += table[j - coin_val]
return table[n]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/integer_partition.py | dynamic_programming/integer_partition.py | """
The number of partitions of a number n into at least k parts equals the number of
partitions into exactly k parts plus the number of partitions into at least k-1 parts.
Subtracting 1 from each part of a partition of n into k parts gives a partition of n-k
into k parts. These two facts together are used for this algorithm.
* https://en.wikipedia.org/wiki/Partition_(number_theory)
* https://en.wikipedia.org/wiki/Partition_function_(number_theory)
"""
def partition(m: int) -> int:
"""
>>> partition(5)
7
>>> partition(7)
15
>>> partition(100)
190569292
>>> partition(1_000)
24061467864032622473692149727991
>>> partition(-7)
Traceback (most recent call last):
...
IndexError: list index out of range
>>> partition(0)
Traceback (most recent call last):
...
IndexError: list assignment index out of range
>>> partition(7.8)
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
"""
memo: list[list[int]] = [[0 for _ in range(m)] for _ in range(m + 1)]
for i in range(m + 1):
memo[i][0] = 1
for n in range(m + 1):
for k in range(1, m):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
n = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
n = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/k_means_clustering_tensorflow.py | dynamic_programming/k_means_clustering_tensorflow.py | from random import shuffle
import tensorflow as tf
from numpy import array
def tf_k_means_cluster(vectors, noofclusters):
"""
K-Means Clustering using TensorFlow.
'vectors' should be a n*k 2-D NumPy array, where n is the number
of vectors of dimensionality k.
'noofclusters' should be an integer.
"""
noofclusters = int(noofclusters)
assert noofclusters < len(vectors)
# Find out the dimensionality
dim = len(vectors[0])
# Will help select random centroids from among the available vectors
vector_indices = list(range(len(vectors)))
shuffle(vector_indices)
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
graph = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
sess = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
centroids = [
tf.Variable(vectors[vector_indices[i]]) for i in range(noofclusters)
]
##These nodes will assign the centroid Variables the appropriate
##values
centroid_value = tf.placeholder("float64", [dim])
cent_assigns = []
for centroid in centroids:
cent_assigns.append(tf.assign(centroid, centroid_value))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
assignments = [tf.Variable(0) for i in range(len(vectors))]
##These nodes will assign an assignment Variable the appropriate
##value
assignment_value = tf.placeholder("int32")
cluster_assigns = []
for assignment in assignments:
cluster_assigns.append(tf.assign(assignment, assignment_value))
##Now lets construct the node that will compute the mean
# The placeholder for the input
mean_input = tf.placeholder("float", [None, dim])
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
mean_op = tf.reduce_mean(mean_input, 0)
##Node for computing Euclidean distances
# Placeholders for input
v1 = tf.placeholder("float", [dim])
v2 = tf.placeholder("float", [dim])
euclid_dist = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(v1, v2), 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
centroid_distances = tf.placeholder("float", [noofclusters])
cluster_assignment = tf.argmin(centroid_distances, 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
init_op = tf.initialize_all_variables()
# Initialize all variables
sess.run(init_op)
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
noofiterations = 100
for _ in range(noofiterations):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(vectors)):
vect = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
distances = [
sess.run(euclid_dist, feed_dict={v1: vect, v2: sess.run(centroid)})
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
assignment = sess.run(
cluster_assignment, feed_dict={centroid_distances: distances}
)
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n], feed_dict={assignment_value: assignment}
)
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(noofclusters):
# Collect all the vectors assigned to this cluster
assigned_vects = [
vectors[i]
for i in range(len(vectors))
if sess.run(assignments[i]) == cluster_n
]
# Compute new centroid location
new_location = sess.run(
mean_op, feed_dict={mean_input: array(assigned_vects)}
)
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n], feed_dict={centroid_value: new_location}
)
# Return centroids and assignments
centroids = sess.run(centroids)
assignments = sess.run(assignments)
return centroids, assignments
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/iterating_through_submasks.py | dynamic_programming/iterating_through_submasks.py | """
Author : Syed Faizan (3rd Year Student IIIT Pune)
github : faizan2700
You are given a bitmask m and you want to efficiently iterate through all of
its submasks. The mask s is submask of m if only bits that were included in
bitmask are set
"""
from __future__ import annotations
def list_of_submasks(mask: int) -> list[int]:
"""
Args:
mask : number which shows mask ( always integer > 0, zero does not have any
submasks )
Returns:
all_submasks : the list of submasks of mask (mask s is called submask of mask
m if only bits that were included in original mask are set
Raises:
AssertionError: mask not positive integer
>>> list_of_submasks(15)
[15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
>>> list_of_submasks(13)
[13, 12, 9, 8, 5, 4, 1]
>>> list_of_submasks(-7) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: mask needs to be positive integer, your input -7
>>> list_of_submasks(0) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: mask needs to be positive integer, your input 0
"""
assert isinstance(mask, int) and mask > 0, (
f"mask needs to be positive integer, your input {mask}"
)
"""
first submask iterated will be mask itself then operation will be performed
to get other submasks till we reach empty submask that is zero ( zero is not
included in final submasks list )
"""
all_submasks = []
submask = mask
while submask:
all_submasks.append(submask)
submask = (submask - 1) & mask
return all_submasks
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/matrix_chain_multiplication.py | dynamic_programming/matrix_chain_multiplication.py | """
| Find the minimum number of multiplications needed to multiply chain of matrices.
| Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/
The algorithm has interesting real-world applications.
Example:
1. Image transformations in Computer Graphics as images are composed of matrix.
2. Solve complex polynomial equations in the field of algebra using least processing
power.
3. Calculate overall impact of macroeconomic decisions as economic equations involve a
number of variables.
4. Self-driving car navigation can be made more accurate as matrix multiplication can
accurately determine position and orientation of obstacles in short time.
Python doctests can be run with the following command::
python -m doctest -v matrix_chain_multiply.py
Given a sequence ``arr[]`` that represents chain of 2D matrices such that the dimension
of the ``i`` th matrix is ``arr[i-1]*arr[i]``.
So suppose ``arr = [40, 20, 30, 10, 30]`` means we have ``4`` matrices of dimensions
``40*20``, ``20*30``, ``30*10`` and ``10*30``.
``matrix_chain_multiply()`` returns an integer denoting minimum number of
multiplications to multiply the chain.
We do not need to perform actual multiplication here.
We only need to decide the order in which to perform the multiplication.
Hints:
1. Number of multiplications (ie cost) to multiply ``2`` matrices
of size ``m*p`` and ``p*n`` is ``m*p*n``.
2. Cost of matrix multiplication is not associative ie ``(M1*M2)*M3 != M1*(M2*M3)``
3. Matrix multiplication is not commutative. So, ``M1*M2`` does not mean ``M2*M1``
can be done.
4. To determine the required order, we can try different combinations.
So, this problem has overlapping sub-problems and can be solved using recursion.
We use Dynamic Programming for optimal time complexity.
Example input:
``arr = [40, 20, 30, 10, 30]``
output:
``26000``
"""
from collections.abc import Iterator
from contextlib import contextmanager
from functools import cache
from sys import maxsize
def matrix_chain_multiply(arr: list[int]) -> int:
"""
Find the minimum number of multiplcations required to multiply the chain of matrices
Args:
`arr`: The input array of integers.
Returns:
Minimum number of multiplications needed to multiply the chain
Examples:
>>> matrix_chain_multiply([1, 2, 3, 4, 3])
30
>>> matrix_chain_multiply([10])
0
>>> matrix_chain_multiply([10, 20])
0
>>> matrix_chain_multiply([19, 2, 19])
722
>>> matrix_chain_multiply(list(range(1, 100)))
323398
>>> # matrix_chain_multiply(list(range(1, 251)))
# 2626798
"""
if len(arr) < 2:
return 0
# initialising 2D dp matrix
n = len(arr)
dp = [[maxsize for j in range(n)] for i in range(n)]
# we want minimum cost of multiplication of matrices
# of dimension (i*k) and (k*j). This cost is arr[i-1]*arr[k]*arr[j].
for i in range(n - 1, 0, -1):
for j in range(i, n):
if i == j:
dp[i][j] = 0
continue
for k in range(i, j):
dp[i][j] = min(
dp[i][j], dp[i][k] + dp[k + 1][j] + arr[i - 1] * arr[k] * arr[j]
)
return dp[1][n - 1]
def matrix_chain_order(dims: list[int]) -> int:
"""
Source: https://en.wikipedia.org/wiki/Matrix_chain_multiplication
The dynamic programming solution is faster than cached the recursive solution and
can handle larger inputs.
>>> matrix_chain_order([1, 2, 3, 4, 3])
30
>>> matrix_chain_order([10])
0
>>> matrix_chain_order([10, 20])
0
>>> matrix_chain_order([19, 2, 19])
722
>>> matrix_chain_order(list(range(1, 100)))
323398
>>> # matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised
# 2626798
"""
@cache
def a(i: int, j: int) -> int:
return min(
(a(i, k) + dims[i] * dims[k] * dims[j] + a(k, j) for k in range(i + 1, j)),
default=0,
)
return a(0, len(dims) - 1)
@contextmanager
def elapsed_time(msg: str) -> Iterator:
# print(f"Starting: {msg}")
from time import perf_counter_ns
start = perf_counter_ns()
yield
print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10**9} seconds.")
if __name__ == "__main__":
import doctest
doctest.testmod()
with elapsed_time("matrix_chain_order"):
print(f"{matrix_chain_order(list(range(1, 251))) = }")
with elapsed_time("matrix_chain_multiply"):
print(f"{matrix_chain_multiply(list(range(1, 251))) = }")
with elapsed_time("matrix_chain_order"):
print(f"{matrix_chain_order(list(range(1, 251))) = }")
with elapsed_time("matrix_chain_multiply"):
print(f"{matrix_chain_multiply(list(range(1, 251))) = }")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/matrix_chain_order.py | dynamic_programming/matrix_chain_order.py | import sys
"""
Dynamic Programming
Implementation of Matrix Chain Multiplication
Time Complexity: O(n^3)
Space Complexity: O(n^2)
Reference: https://en.wikipedia.org/wiki/Matrix_chain_multiplication
"""
def matrix_chain_order(array: list[int]) -> tuple[list[list[int]], list[list[int]]]:
"""
>>> matrix_chain_order([10, 30, 5])
([[0, 0, 0], [0, 0, 1500], [0, 0, 0]], [[0, 0, 0], [0, 0, 1], [0, 0, 0]])
"""
n = len(array)
matrix = [[0 for _ in range(n)] for _ in range(n)]
sol = [[0 for _ in range(n)] for _ in range(n)]
for chain_length in range(2, n):
for a in range(1, n - chain_length + 1):
b = a + chain_length - 1
matrix[a][b] = sys.maxsize
for c in range(a, b):
cost = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
matrix[a][b] = cost
sol[a][b] = c
return matrix, sol
def print_optimal_solution(optimal_solution: list[list[int]], i: int, j: int):
"""
Print order of matrix with Ai as Matrix.
"""
if i == j:
print("A" + str(i), end=" ")
else:
print("(", end=" ")
print_optimal_solution(optimal_solution, i, optimal_solution[i][j])
print_optimal_solution(optimal_solution, optimal_solution[i][j] + 1, j)
print(")", end=" ")
def main():
"""
Size of matrix created from array [30, 35, 15, 5, 10, 20, 25] will be:
30*35 35*15 15*5 5*10 10*20 20*25
"""
array = [30, 35, 15, 5, 10, 20, 25]
n = len(array)
matrix, optimal_solution = matrix_chain_order(array)
print("No. of Operation required: " + str(matrix[1][n - 1]))
print_optimal_solution(optimal_solution, 1, n - 1)
if __name__ == "__main__":
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/fizz_buzz.py | dynamic_programming/fizz_buzz.py | # https://en.wikipedia.org/wiki/Fizz_buzz#Programming
def fizz_buzz(number: int, iterations: int) -> str:
"""
| Plays FizzBuzz.
| Prints Fizz if number is a multiple of ``3``.
| Prints Buzz if its a multiple of ``5``.
| Prints FizzBuzz if its a multiple of both ``3`` and ``5`` or ``15``.
| Else Prints The Number Itself.
>>> fizz_buzz(1,7)
'1 2 Fizz 4 Buzz Fizz 7 '
>>> fizz_buzz(1,0)
Traceback (most recent call last):
...
ValueError: Iterations must be done more than 0 times to play FizzBuzz
>>> fizz_buzz(-5,5)
Traceback (most recent call last):
...
ValueError: starting number must be
and integer and be more than 0
>>> fizz_buzz(10,-5)
Traceback (most recent call last):
...
ValueError: Iterations must be done more than 0 times to play FizzBuzz
>>> fizz_buzz(1.5,5)
Traceback (most recent call last):
...
ValueError: starting number must be
and integer and be more than 0
>>> fizz_buzz(1,5.5)
Traceback (most recent call last):
...
ValueError: iterations must be defined as integers
"""
if not isinstance(iterations, int):
raise ValueError("iterations must be defined as integers")
if not isinstance(number, int) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0"""
)
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz")
out = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(number)
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_tickets_cost.py | dynamic_programming/minimum_tickets_cost.py | """
Author : Alexander Pantyukhin
Date : November 1, 2022
Task:
Given a list of days when you need to travel. Each day is integer from 1 to 365.
You are able to use tickets for 1 day, 7 days and 30 days.
Each ticket has a cost.
Find the minimum cost you need to travel every day in the given list of days.
Implementation notes:
implementation Dynamic Programming up bottom approach.
Runtime complexity: O(n)
The implementation was tested on the
leetcode: https://leetcode.com/problems/minimum-cost-for-tickets/
Minimum Cost For Tickets
Dynamic Programming: up -> down.
"""
import functools
def mincost_tickets(days: list[int], costs: list[int]) -> int:
"""
>>> mincost_tickets([1, 4, 6, 7, 8, 20], [2, 7, 15])
11
>>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 7, 15])
17
>>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150])
24
>>> mincost_tickets([2], [2, 90, 150])
2
>>> mincost_tickets([], [2, 90, 150])
0
>>> mincost_tickets('hello', [2, 90, 150])
Traceback (most recent call last):
...
ValueError: The parameter days should be a list of integers
>>> mincost_tickets([], 'world')
Traceback (most recent call last):
...
ValueError: The parameter costs should be a list of three integers
>>> mincost_tickets([0.25, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150])
Traceback (most recent call last):
...
ValueError: The parameter days should be a list of integers
>>> mincost_tickets([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 0.9, 150])
Traceback (most recent call last):
...
ValueError: The parameter costs should be a list of three integers
>>> mincost_tickets([-1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [2, 90, 150])
Traceback (most recent call last):
...
ValueError: All days elements should be greater than 0
>>> mincost_tickets([2, 367], [2, 90, 150])
Traceback (most recent call last):
...
ValueError: All days elements should be less than 366
>>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [])
Traceback (most recent call last):
...
ValueError: The parameter costs should be a list of three integers
>>> mincost_tickets([], [])
Traceback (most recent call last):
...
ValueError: The parameter costs should be a list of three integers
>>> mincost_tickets([2, 3, 4, 5, 6, 7, 8, 9, 10, 30, 31], [1, 2, 3, 4])
Traceback (most recent call last):
...
ValueError: The parameter costs should be a list of three integers
"""
# Validation
if not isinstance(days, list) or not all(isinstance(day, int) for day in days):
raise ValueError("The parameter days should be a list of integers")
if len(costs) != 3 or not all(isinstance(cost, int) for cost in costs):
raise ValueError("The parameter costs should be a list of three integers")
if len(days) == 0:
return 0
if min(days) <= 0:
raise ValueError("All days elements should be greater than 0")
if max(days) >= 366:
raise ValueError("All days elements should be less than 366")
days_set = set(days)
@functools.cache
def dynamic_programming(index: int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1),
costs[1] + dynamic_programming(index + 7),
costs[2] + dynamic_programming(index + 30),
)
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/floyd_warshall.py | dynamic_programming/floyd_warshall.py | import math
class Graph:
def __init__(self, n=0): # a graph with Node 0,1,...,N-1
self.n = n
self.w = [
[math.inf for j in range(n)] for i in range(n)
] # adjacency matrix for weight
self.dp = [
[math.inf for j in range(n)] for i in range(n)
] # dp[i][j] stores minimum distance from i to j
def add_edge(self, u, v, w):
"""
Adds a directed edge from node u
to node v with weight w.
>>> g = Graph(3)
>>> g.add_edge(0, 1, 5)
>>> g.dp[0][1]
5
"""
self.dp[u][v] = w
def floyd_warshall(self):
"""
Computes the shortest paths between all pairs of
nodes using the Floyd-Warshall algorithm.
>>> g = Graph(3)
>>> g.add_edge(0, 1, 1)
>>> g.add_edge(1, 2, 2)
>>> g.floyd_warshall()
>>> g.show_min(0, 2)
3
>>> g.show_min(2, 0)
inf
"""
for k in range(self.n):
for i in range(self.n):
for j in range(self.n):
self.dp[i][j] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j])
def show_min(self, u, v):
"""
Returns the minimum distance from node u to node v.
>>> g = Graph(3)
>>> g.add_edge(0, 1, 3)
>>> g.add_edge(1, 2, 4)
>>> g.floyd_warshall()
>>> g.show_min(0, 2)
7
>>> g.show_min(1, 0)
inf
"""
return self.dp[u][v]
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example usage
graph = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
print(
graph.show_min(1, 4)
) # Should output the minimum distance from node 1 to node 4
print(
graph.show_min(0, 3)
) # Should output the minimum distance from node 0 to node 3
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_partition.py | dynamic_programming/minimum_partition.py | """
Partition a set into two subsets such that the difference of subset sums is minimum
"""
def find_min(numbers: list[int]) -> int:
"""
>>> find_min([1, 2, 3, 4, 5])
1
>>> find_min([5, 5, 5, 5, 5])
5
>>> find_min([5, 5, 5, 5])
0
>>> find_min([3])
3
>>> find_min([])
0
>>> find_min([1, 2, 3, 4])
0
>>> find_min([0, 0, 0, 0])
0
>>> find_min([-1, -5, 5, 1])
0
>>> find_min([-1, -5, 5, 1])
0
>>> find_min([9, 9, 9, 9, 9])
9
>>> find_min([1, 5, 10, 3])
1
>>> find_min([-1, 0, 1])
0
>>> find_min(range(10, 0, -1))
1
>>> find_min([-1])
Traceback (most recent call last):
--
IndexError: list assignment index out of range
>>> find_min([0, 0, 0, 1, 2, -4])
Traceback (most recent call last):
...
IndexError: list assignment index out of range
>>> find_min([-1, -5, -10, -3])
Traceback (most recent call last):
...
IndexError: list assignment index out of range
"""
n = len(numbers)
s = sum(numbers)
dp = [[False for x in range(s + 1)] for y in range(n + 1)]
for i in range(n + 1):
dp[i][0] = True
for i in range(1, s + 1):
dp[0][i] = False
for i in range(1, n + 1):
for j in range(1, s + 1):
dp[i][j] = dp[i - 1][j]
if numbers[i - 1] <= j:
dp[i][j] = dp[i][j] or dp[i - 1][j - numbers[i - 1]]
for j in range(int(s / 2), -1, -1):
if dp[n][j] is True:
diff = s - 2 * j
break
return diff
if __name__ == "__main__":
from doctest import testmod
testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/__init__.py | dynamic_programming/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/longest_increasing_subsequence_iterative.py | dynamic_programming/longest_increasing_subsequence_iterative.py | """
Author : Sanjay Muthu <https://github.com/XenoBytesX>
This is a pure Python implementation of Dynamic Programming solution to the longest
increasing subsequence of a given sequence.
The problem is:
Given an array, to find the longest and increasing sub-array in that given array and
return it.
Example:
``[10, 22, 9, 33, 21, 50, 41, 60, 80]`` as input will return
``[10, 22, 33, 50, 60, 80]`` as output
"""
from __future__ import annotations
import copy
def longest_subsequence(array: list[int]) -> list[int]:
"""
Some examples
>>> longest_subsequence([10, 22, 9, 33, 21, 50, 41, 60, 80])
[10, 22, 33, 50, 60, 80]
>>> longest_subsequence([4, 8, 7, 5, 1, 12, 2, 3, 9])
[1, 2, 3, 9]
>>> longest_subsequence([9, 8, 7, 6, 5, 7])
[7, 7]
>>> longest_subsequence([28, 26, 12, 23, 35, 39])
[12, 23, 35, 39]
>>> longest_subsequence([1, 1, 1])
[1, 1, 1]
>>> longest_subsequence([])
[]
"""
n = len(array)
# The longest increasing subsequence ending at array[i]
longest_increasing_subsequence = []
for i in range(n):
longest_increasing_subsequence.append([array[i]])
for i in range(1, n):
for prev in range(i):
# If array[prev] is less than or equal to array[i], then
# longest_increasing_subsequence[prev] + array[i]
# is a valid increasing subsequence
# longest_increasing_subsequence[i] is only set to
# longest_increasing_subsequence[prev] + array[i] if the length is longer.
if array[prev] <= array[i] and len(
longest_increasing_subsequence[prev]
) + 1 > len(longest_increasing_subsequence[i]):
longest_increasing_subsequence[i] = copy.copy(
longest_increasing_subsequence[prev]
)
longest_increasing_subsequence[i].append(array[i])
result: list[int] = []
for i in range(n):
if len(longest_increasing_subsequence[i]) > len(result):
result = longest_increasing_subsequence[i]
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/bitmask.py | dynamic_programming/bitmask.py | """
This is a Python implementation for questions involving task assignments between people.
Here Bitmasking and DP are used for solving this.
Question :-
We have N tasks and M people. Each person in M can do only certain of these tasks. Also
a person can do only one task and a task is performed only by one person.
Find the total no of ways in which the tasks can be distributed.
"""
from collections import defaultdict
class AssignmentUsingBitmask:
def __init__(self, task_performed, total):
self.total_tasks = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
self.dp = [
[-1 for i in range(total + 1)] for j in range(2 ** len(task_performed))
]
self.task = defaultdict(list) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
self.final_mask = (1 << len(task_performed)) - 1
def count_ways_until(self, mask, task_no):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
total_ways_until = self.count_ways_until(mask, task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_until += self.count_ways_until(mask | (1 << p), task_no + 1)
# save the value.
self.dp[mask][task_no] = total_ways_until
return self.dp[mask][task_no]
def count_no_of_ways(self, task_performed):
# Store the list of persons for each task
for i in range(len(task_performed)):
for j in task_performed[i]:
self.task[j].append(i)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1)
if __name__ == "__main__":
total_tasks = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
task_performed = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
"""
For the particular example the tasks can be distributed as
(1,2,3), (1,2,4), (1,5,3), (1,5,4), (3,1,4),
(3,2,4), (3,5,4), (4,1,3), (4,2,3), (4,5,3)
total 10
"""
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_steps_to_one.py | dynamic_programming/minimum_steps_to_one.py | """
YouTube Explanation: https://www.youtube.com/watch?v=f2xi3c1S95M
Given an integer n, return the minimum steps from n to 1
AVAILABLE STEPS:
* Decrement by 1
* if n is divisible by 2, divide by 2
* if n is divisible by 3, divide by 3
Example 1: n = 10
10 -> 9 -> 3 -> 1
Result: 3 steps
Example 2: n = 15
15 -> 5 -> 4 -> 2 -> 1
Result: 4 steps
Example 3: n = 6
6 -> 2 -> 1
Result: 2 step
"""
from __future__ import annotations
__author__ = "Alexander Joslin"
def min_steps_to_one(number: int) -> int:
"""
Minimum steps to 1 implemented using tabulation.
>>> min_steps_to_one(10)
3
>>> min_steps_to_one(15)
4
>>> min_steps_to_one(6)
2
:param number:
:return int:
"""
if number <= 0:
msg = f"n must be greater than 0. Got n = {number}"
raise ValueError(msg)
table = [number + 1] * (number + 1)
# starting position
table[1] = 0
for i in range(1, number):
table[i + 1] = min(table[i + 1], table[i] + 1)
# check if out of bounds
if i * 2 <= number:
table[i * 2] = min(table[i * 2], table[i] + 1)
# check if out of bounds
if i * 3 <= number:
table[i * 3] = min(table[i * 3], table[i] + 1)
return table[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/sum_of_subset.py | dynamic_programming/sum_of_subset.py | def is_sum_subset(arr: list[int], required_sum: int) -> bool:
"""
>>> is_sum_subset([2, 4, 6, 8], 5)
False
>>> is_sum_subset([2, 4, 6, 8], 14)
True
"""
# a subset value says 1 if that subset sum can be formed else 0
# initially no subsets can be formed hence False/0
arr_len = len(arr)
subset = [[False] * (required_sum + 1) for _ in range(arr_len + 1)]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1):
subset[i][0] = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1):
subset[0][i] = False
for i in range(1, arr_len + 1):
for j in range(1, required_sum + 1):
if arr[i - 1] > j:
subset[i][j] = subset[i - 1][j]
if arr[i - 1] <= j:
subset[i][j] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/longest_increasing_subsequence_o_nlogn.py | dynamic_programming/longest_increasing_subsequence_o_nlogn.py | #############################
# Author: Aravind Kashyap
# File: lis.py
# comments: This programme outputs the Longest Strictly Increasing Subsequence in
# O(NLogN) Where N is the Number of elements in the list
#############################
from __future__ import annotations
def ceil_index(v, left, right, key):
while right - left > 1:
middle = (left + right) // 2
if v[middle] >= key:
right = middle
else:
left = middle
return right
def longest_increasing_subsequence_length(v: list[int]) -> int:
"""
>>> longest_increasing_subsequence_length([2, 5, 3, 7, 11, 8, 10, 13, 6])
6
>>> longest_increasing_subsequence_length([])
0
>>> longest_increasing_subsequence_length([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13,
... 3, 11, 7, 15])
6
>>> longest_increasing_subsequence_length([5, 4, 3, 2, 1])
1
"""
if len(v) == 0:
return 0
tail = [0] * len(v)
length = 1
tail[0] = v[0]
for i in range(1, len(v)):
if v[i] < tail[0]:
tail[0] = v[i]
elif v[i] > tail[length - 1]:
tail[length] = v[i]
length += 1
else:
tail[ceil_index(tail, -1, length - 1, v[i])] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/largest_divisible_subset.py | dynamic_programming/largest_divisible_subset.py | from __future__ import annotations
def largest_divisible_subset(items: list[int]) -> list[int]:
"""
Algorithm to find the biggest subset in the given array such that for any 2 elements
x and y in the subset, either x divides y or y divides x.
>>> largest_divisible_subset([1, 16, 7, 8, 4])
[16, 8, 4, 1]
>>> largest_divisible_subset([1, 2, 3])
[2, 1]
>>> largest_divisible_subset([-1, -2, -3])
[-3]
>>> largest_divisible_subset([1, 2, 4, 8])
[8, 4, 2, 1]
>>> largest_divisible_subset((1, 2, 4, 8))
[8, 4, 2, 1]
>>> largest_divisible_subset([1, 1, 1])
[1, 1, 1]
>>> largest_divisible_subset([0, 0, 0])
[0, 0, 0]
>>> largest_divisible_subset([-1, -1, -1])
[-1, -1, -1]
>>> largest_divisible_subset([])
[]
"""
# Sort the array in ascending order as the sequence does not matter we only have to
# pick up a subset.
items = sorted(items)
number_of_items = len(items)
# Initialize memo with 1s and hash with increasing numbers
memo = [1] * number_of_items
hash_array = list(range(number_of_items))
# Iterate through the array
for i, item in enumerate(items):
for prev_index in range(i):
if ((items[prev_index] != 0 and item % items[prev_index]) == 0) and (
(1 + memo[prev_index]) > memo[i]
):
memo[i] = 1 + memo[prev_index]
hash_array[i] = prev_index
ans = -1
last_index = -1
# Find the maximum length and its corresponding index
for i, memo_item in enumerate(memo):
if memo_item > ans:
ans = memo_item
last_index = i
# Reconstruct the divisible subset
if last_index == -1:
return []
result = [items[last_index]]
while hash_array[last_index] != last_index:
last_index = hash_array[last_index]
result.append(items[last_index])
return result
if __name__ == "__main__":
from doctest import testmod
testmod()
items = [1, 16, 7, 8, 4]
print(
f"The longest divisible subset of {items} is {largest_divisible_subset(items)}."
)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/minimum_size_subarray_sum.py | dynamic_programming/minimum_size_subarray_sum.py | import sys
def minimum_subarray_sum(target: int, numbers: list[int]) -> int:
"""
Return the length of the shortest contiguous subarray in a list of numbers whose sum
is at least target. Reference: https://stackoverflow.com/questions/8269916
>>> minimum_subarray_sum(7, [2, 3, 1, 2, 4, 3])
2
>>> minimum_subarray_sum(7, [2, 3, -1, 2, 4, -3])
4
>>> minimum_subarray_sum(11, [1, 1, 1, 1, 1, 1, 1, 1])
0
>>> minimum_subarray_sum(10, [1, 2, 3, 4, 5, 6, 7])
2
>>> minimum_subarray_sum(5, [1, 1, 1, 1, 1, 5])
1
>>> minimum_subarray_sum(0, [])
0
>>> minimum_subarray_sum(0, [1, 2, 3])
1
>>> minimum_subarray_sum(10, [10, 20, 30])
1
>>> minimum_subarray_sum(7, [1, 1, 1, 1, 1, 1, 10])
1
>>> minimum_subarray_sum(6, [])
0
>>> minimum_subarray_sum(2, [1, 2, 3])
1
>>> minimum_subarray_sum(-6, [])
0
>>> minimum_subarray_sum(-6, [3, 4, 5])
1
>>> minimum_subarray_sum(8, None)
0
>>> minimum_subarray_sum(2, "ABC")
Traceback (most recent call last):
...
ValueError: numbers must be an iterable of integers
"""
if not numbers:
return 0
if target == 0 and target in numbers:
return 0
if not isinstance(numbers, (list, tuple)) or not all(
isinstance(number, int) for number in numbers
):
raise ValueError("numbers must be an iterable of integers")
left = right = curr_sum = 0
min_len = sys.maxsize
while right < len(numbers):
curr_sum += numbers[right]
while curr_sum >= target and left <= right:
min_len = min(min_len, right - left + 1)
curr_sum -= numbers[left]
left += 1
right += 1
return 0 if min_len == sys.maxsize else min_len
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/max_subarray_sum.py | dynamic_programming/max_subarray_sum.py | """
The maximum subarray sum problem is the task of finding the maximum sum that can be
obtained from a contiguous subarray within a given array of numbers. For example, given
the array [-2, 1, -3, 4, -1, 2, 1, -5, 4], the contiguous subarray with the maximum sum
is [4, -1, 2, 1], so the maximum subarray sum is 6.
Kadane's algorithm is a simple dynamic programming algorithm that solves the maximum
subarray sum problem in O(n) time and O(1) space.
Reference: https://en.wikipedia.org/wiki/Maximum_subarray_problem
"""
from collections.abc import Sequence
def max_subarray_sum(
arr: Sequence[float], allow_empty_subarrays: bool = False
) -> float:
"""
Solves the maximum subarray sum problem using Kadane's algorithm.
:param arr: the given array of numbers
:param allow_empty_subarrays: if True, then the algorithm considers empty subarrays
>>> max_subarray_sum([2, 8, 9])
19
>>> max_subarray_sum([0, 0])
0
>>> max_subarray_sum([-1.0, 0.0, 1.0])
1.0
>>> max_subarray_sum([1, 2, 3, 4, -2])
10
>>> max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4])
6
>>> max_subarray_sum([2, 3, -9, 8, -2])
8
>>> max_subarray_sum([-2, -3, -1, -4, -6])
-1
>>> max_subarray_sum([-2, -3, -1, -4, -6], allow_empty_subarrays=True)
0
>>> max_subarray_sum([])
0
"""
if not arr:
return 0
max_sum = 0 if allow_empty_subarrays else float("-inf")
curr_sum = 0.0
for num in arr:
curr_sum = max(0 if allow_empty_subarrays else num, curr_sum + num)
max_sum = max(max_sum, curr_sum)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/combination_sum_iv.py | dynamic_programming/combination_sum_iv.py | """
Question:
You are given an array of distinct integers and you have to tell how many
different ways of selecting the elements from the array are there such that
the sum of chosen elements is equal to the target number tar.
Example
Input:
* N = 3
* target = 5
* array = [1, 2, 5]
Output:
9
Approach:
The basic idea is to go over recursively to find the way such that the sum
of chosen elements is `target`. For every element, we have two choices
1. Include the element in our set of chosen elements.
2. Don't include the element in our set of chosen elements.
"""
def combination_sum_iv(array: list[int], target: int) -> int:
"""
Function checks the all possible combinations, and returns the count
of possible combination in exponential Time Complexity.
>>> combination_sum_iv([1,2,5], 5)
9
"""
def count_of_possible_combinations(target: int) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item) for item in array)
return count_of_possible_combinations(target)
def combination_sum_iv_dp_array(array: list[int], target: int) -> int:
"""
Function checks the all possible combinations, and returns the count
of possible combination in O(N^2) Time Complexity as we are using Dynamic
programming array here.
>>> combination_sum_iv_dp_array([1,2,5], 5)
9
"""
def count_of_possible_combinations_with_dp_array(
target: int, dp_array: list[int]
) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
answer = sum(
count_of_possible_combinations_with_dp_array(target - item, dp_array)
for item in array
)
dp_array[target] = answer
return answer
dp_array = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(target, dp_array)
def combination_sum_iv_bottom_up(n: int, array: list[int], target: int) -> int:
"""
Function checks the all possible combinations with using bottom up approach,
and returns the count of possible combination in O(N^2) Time Complexity
as we are using Dynamic programming array here.
>>> combination_sum_iv_bottom_up(3, [1,2,5], 5)
9
"""
dp_array = [0] * (target + 1)
dp_array[0] = 1
for i in range(1, target + 1):
for j in range(n):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
target = 5
array = [1, 2, 5]
print(combination_sum_iv(array, target))
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/subset_generation.py | dynamic_programming/subset_generation.py | def subset_combinations(elements: list[int], n: int) -> list:
"""
Compute n-element combinations from a given list using dynamic programming.
Args:
* `elements`: The list of elements from which combinations will be generated.
* `n`: The number of elements in each combination.
Returns:
A list of tuples, each representing a combination of `n` elements.
>>> subset_combinations(elements=[10, 20, 30, 40], n=2)
[(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)]
>>> subset_combinations(elements=[1, 2, 3], n=1)
[(1,), (2,), (3,)]
>>> subset_combinations(elements=[1, 2, 3], n=3)
[(1, 2, 3)]
>>> subset_combinations(elements=[42], n=1)
[(42,)]
>>> subset_combinations(elements=[6, 7, 8, 9], n=4)
[(6, 7, 8, 9)]
>>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0)
[()]
>>> subset_combinations(elements=[1, 2, 3, 4], n=2)
[(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
>>> subset_combinations(elements=[1, 'apple', 3.14], n=2)
[(1, 'apple'), (1, 3.14), ('apple', 3.14)]
>>> subset_combinations(elements=['single'], n=0)
[()]
>>> subset_combinations(elements=[], n=9)
[]
>>> from itertools import combinations
>>> all(subset_combinations(items, n) == list(combinations(items, n))
... for items, n in (
... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1),
... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2),
... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9)))
True
"""
r = len(elements)
if n > r:
return []
dp: list[list[tuple]] = [[] for _ in range(r + 1)]
dp[0].append(())
for i in range(1, r + 1):
for j in range(i, 0, -1):
for prev_combination in dp[j - 1]:
dp[j].append((*prev_combination, elements[i - 1]))
try:
return sorted(dp[n])
except TypeError:
return dp[n]
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{subset_combinations(elements=[10, 20, 30, 40], n=2) = }")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/dynamic_programming/max_non_adjacent_sum.py | dynamic_programming/max_non_adjacent_sum.py | # Video Explanation: https://www.youtube.com/watch?v=6w60Zi1NtL8&feature=emb_logo
from __future__ import annotations
def maximum_non_adjacent_sum(nums: list[int]) -> int:
"""
Find the maximum non-adjacent sum of the integers in the nums input list
>>> maximum_non_adjacent_sum([1, 2, 3])
4
>>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6])
18
>>> maximum_non_adjacent_sum([-1, -5, -3, -7, -2, -2, -6])
0
>>> maximum_non_adjacent_sum([499, 500, -3, -7, -2, -2, -6])
500
"""
if not nums:
return 0
max_including = nums[0]
max_excluding = 0
for num in nums[1:]:
max_including, max_excluding = (
max_excluding + num,
max(max_including, max_excluding),
)
return max(max_excluding, max_including)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/logistic_regression.py | machine_learning/logistic_regression.py | #!/usr/bin/python
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
"""
Implementing logistic regression for classification problem
Helpful resources:
Coursera ML course
https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[67]:
# sigmoid function or logistic function is used as a hypothesis function in
# classification problems
def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
"""
Also known as Logistic Function.
1
f(x) = -------
1 + e⁻ˣ
The sigmoid function approaches a value of 1 as its input 'x' becomes
increasing positive. Opposite for negative values.
Reference: https://en.wikipedia.org/wiki/Sigmoid_function
@param z: input to the function
@returns: returns value in the range 0 to 1
Examples:
>>> float(sigmoid_function(4))
0.9820137900379085
>>> sigmoid_function(np.array([-3, 3]))
array([0.04742587, 0.95257413])
>>> sigmoid_function(np.array([-3, 3, 1]))
array([0.04742587, 0.95257413, 0.73105858])
>>> sigmoid_function(np.array([-0.01, -2, -1.9]))
array([0.49750002, 0.11920292, 0.13010847])
>>> sigmoid_function(np.array([-1.3, 5.3, 12]))
array([0.21416502, 0.9950332 , 0.99999386])
>>> sigmoid_function(np.array([0.01, 0.02, 4.1]))
array([0.50249998, 0.50499983, 0.9836975 ])
>>> sigmoid_function(np.array([0.8]))
array([0.68997448])
"""
return 1 / (1 + np.exp(-z))
def cost_function(h: np.ndarray, y: np.ndarray) -> float:
"""
Cost function quantifies the error between predicted and expected values.
The cost function used in Logistic Regression is called Log Loss
or Cross Entropy Function.
J(θ) = (1/m) * Σ [ -y * log(hθ(x)) - (1 - y) * log(1 - hθ(x)) ]
Where:
- J(θ) is the cost that we want to minimize during training
- m is the number of training examples
- Σ represents the summation over all training examples
- y is the actual binary label (0 or 1) for a given example
- hθ(x) is the predicted probability that x belongs to the positive class
@param h: the output of sigmoid function. It is the estimated probability
that the input example 'x' belongs to the positive class
@param y: the actual binary label associated with input example 'x'
Examples:
>>> estimations = sigmoid_function(np.array([0.3, -4.3, 8.1]))
>>> cost_function(h=estimations,y=np.array([1, 0, 1]))
0.18937868932131605
>>> estimations = sigmoid_function(np.array([4, 3, 1]))
>>> cost_function(h=estimations,y=np.array([1, 0, 0]))
1.459999655669926
>>> estimations = sigmoid_function(np.array([4, -3, -1]))
>>> cost_function(h=estimations,y=np.array([1,0,0]))
0.1266663223365915
>>> estimations = sigmoid_function(0)
>>> cost_function(h=estimations,y=np.array([1]))
0.6931471805599453
References:
- https://en.wikipedia.org/wiki/Logistic_regression
"""
return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())
def log_likelihood(x, y, weights):
scores = np.dot(x, weights)
return np.sum(y * scores - np.log(1 + np.exp(scores)))
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
def logistic_reg(alpha, x, y, max_iterations=70000):
theta = np.zeros(x.shape[1])
for iterations in range(max_iterations):
z = np.dot(x, theta)
h = sigmoid_function(z)
gradient = np.dot(x.T, h - y) / y.size
theta = theta - alpha * gradient # updating the weights
z = np.dot(x, theta)
h = sigmoid_function(z)
j = cost_function(h, y)
if iterations % 100 == 0:
print(f"loss: {j} \t") # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
import doctest
doctest.testmod()
iris = datasets.load_iris()
x = iris.data[:, :2]
y = (iris.target != 0) * 1
alpha = 0.1
theta = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def predict_prob(x):
return sigmoid_function(
np.dot(x, theta)
) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(x1_min, x1_max) = (x[:, 0].min(), x[:, 0].max())
(x2_min, x2_max) = (x[:, 1].min(), x[:, 1].max())
(xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = predict_prob(grid).reshape(xx1.shape)
plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/automatic_differentiation.py | machine_learning/automatic_differentiation.py | """
Demonstration of the Automatic Differentiation (Reverse mode).
Reference: https://en.wikipedia.org/wiki/Automatic_differentiation
Author: Poojan Smart
Email: smrtpoojan@gmail.com
"""
from __future__ import annotations
from collections import defaultdict
from enum import Enum
from types import TracebackType
from typing import Any
import numpy as np
from typing_extensions import Self # noqa: UP035
class OpType(Enum):
"""
Class represents list of supported operations on Variable for gradient calculation.
"""
ADD = 0
SUB = 1
MUL = 2
DIV = 3
MATMUL = 4
POWER = 5
NOOP = 6
class Variable:
"""
Class represents n-dimensional object which is used to wrap numpy array on which
operations will be performed and the gradient will be calculated.
Examples:
>>> Variable(5.0)
Variable(5.0)
>>> Variable([5.0, 2.9])
Variable([5. 2.9])
>>> Variable([5.0, 2.9]) + Variable([1.0, 5.5])
Variable([6. 8.4])
>>> Variable([[8.0, 10.0]])
Variable([[ 8. 10.]])
"""
def __init__(self, value: Any) -> None:
self.value = np.array(value)
# pointers to the operations to which the Variable is input
self.param_to: list[Operation] = []
# pointer to the operation of which the Variable is output of
self.result_of: Operation = Operation(OpType.NOOP)
def __repr__(self) -> str:
return f"Variable({self.value})"
def to_ndarray(self) -> np.ndarray:
return self.value
def __add__(self, other: Variable) -> Variable:
result = Variable(self.value + other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.ADD, params=[self, other], output=result)
return result
def __sub__(self, other: Variable) -> Variable:
result = Variable(self.value - other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.SUB, params=[self, other], output=result)
return result
def __mul__(self, other: Variable) -> Variable:
result = Variable(self.value * other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.MUL, params=[self, other], output=result)
return result
def __truediv__(self, other: Variable) -> Variable:
result = Variable(self.value / other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.DIV, params=[self, other], output=result)
return result
def __matmul__(self, other: Variable) -> Variable:
result = Variable(self.value @ other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.MATMUL, params=[self, other], output=result)
return result
def __pow__(self, power: int) -> Variable:
result = Variable(self.value**power)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(
OpType.POWER,
params=[self],
output=result,
other_params={"power": power},
)
return result
def add_param_to(self, param_to: Operation) -> None:
self.param_to.append(param_to)
def add_result_of(self, result_of: Operation) -> None:
self.result_of = result_of
class Operation:
"""
Class represents operation between single or two Variable objects.
Operation objects contains type of operation, pointers to input Variable
objects and pointer to resulting Variable from the operation.
"""
def __init__(
self,
op_type: OpType,
other_params: dict | None = None,
) -> None:
self.op_type = op_type
self.other_params = {} if other_params is None else other_params
def add_params(self, params: list[Variable]) -> None:
self.params = params
def add_output(self, output: Variable) -> None:
self.output = output
def __eq__(self, value) -> bool:
return self.op_type == value if isinstance(value, OpType) else False
class GradientTracker:
"""
Class contains methods to compute partial derivatives of Variable
based on the computation graph.
Examples:
>>> with GradientTracker() as tracker:
... a = Variable([2.0, 5.0])
... b = Variable([1.0, 2.0])
... m = Variable([1.0, 2.0])
... c = a + b
... d = a * b
... e = c / d
>>> tracker.gradient(e, a)
array([-0.25, -0.04])
>>> tracker.gradient(e, b)
array([-1. , -0.25])
>>> tracker.gradient(e, m) is None
True
>>> with GradientTracker() as tracker:
... a = Variable([[2.0, 5.0]])
... b = Variable([[1.0], [2.0]])
... c = a @ b
>>> tracker.gradient(c, a)
array([[1., 2.]])
>>> tracker.gradient(c, b)
array([[2.],
[5.]])
>>> with GradientTracker() as tracker:
... a = Variable([[2.0, 5.0]])
... b = a ** 3
>>> tracker.gradient(b, a)
array([[12., 75.]])
"""
instance = None
def __new__(cls) -> Self:
"""
Executes at the creation of class object and returns if
object is already created. This class follows singleton
design pattern.
"""
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self) -> None:
self.enabled = False
def __enter__(self) -> Self:
self.enabled = True
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.enabled = False
def append(
self,
op_type: OpType,
params: list[Variable],
output: Variable,
other_params: dict | None = None,
) -> None:
"""
Adds Operation object to the related Variable objects for
creating computational graph for calculating gradients.
Args:
op_type: Operation type
params: Input parameters to the operation
output: Output variable of the operation
"""
operation = Operation(op_type, other_params=other_params)
param_nodes = []
for param in params:
param.add_param_to(operation)
param_nodes.append(param)
output.add_result_of(operation)
operation.add_params(param_nodes)
operation.add_output(output)
def gradient(self, target: Variable, source: Variable) -> np.ndarray | None:
"""
Reverse accumulation of partial derivatives to calculate gradients
of target variable with respect to source variable.
Args:
target: target variable for which gradients are calculated.
source: source variable with respect to which the gradients are
calculated.
Returns:
Gradient of the source variable with respect to the target variable
"""
# partial derivatives with respect to target
partial_deriv = defaultdict(lambda: 0)
partial_deriv[target] = np.ones_like(target.to_ndarray())
# iterating through each operations in the computation graph
operation_queue = [target.result_of]
while len(operation_queue) > 0:
operation = operation_queue.pop()
for param in operation.params:
# as per the chain rule, multiplying partial derivatives
# of variables with respect to the target
dparam_doutput = self.derivative(param, operation)
dparam_dtarget = dparam_doutput * partial_deriv[operation.output]
partial_deriv[param] += dparam_dtarget
if param.result_of and param.result_of != OpType.NOOP:
operation_queue.append(param.result_of)
return partial_deriv.get(source)
def derivative(self, param: Variable, operation: Operation) -> np.ndarray:
"""
Compute the derivative of given operation/function
Args:
param: variable to be differentiated
operation: function performed on the input variable
Returns:
Derivative of input variable with respect to the output of
the operation
"""
params = operation.params
if operation == OpType.ADD:
return np.ones_like(params[0].to_ndarray(), dtype=np.float64)
if operation == OpType.SUB:
if params[0] == param:
return np.ones_like(params[0].to_ndarray(), dtype=np.float64)
return -np.ones_like(params[1].to_ndarray(), dtype=np.float64)
if operation == OpType.MUL:
return (
params[1].to_ndarray().T
if params[0] == param
else params[0].to_ndarray().T
)
if operation == OpType.DIV:
if params[0] == param:
return 1 / params[1].to_ndarray()
return -params[0].to_ndarray() / (params[1].to_ndarray() ** 2)
if operation == OpType.MATMUL:
return (
params[1].to_ndarray().T
if params[0] == param
else params[0].to_ndarray().T
)
if operation == OpType.POWER:
power = operation.other_params["power"]
return power * (params[0].to_ndarray() ** (power - 1))
err_msg = f"invalid operation type: {operation.op_type}"
raise ValueError(err_msg)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/xgboost_regressor.py | machine_learning/xgboost_regressor.py | # XGBoost Regressor Example
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def data_handling(data: dict) -> tuple:
# Split dataset into features and target. Data is features.
"""
>>> data_handling((
... {'data':'[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]'
... ,'target':([4.526])}))
('[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]', [4.526])
"""
return (data["data"], data["target"])
def xgboost(
features: np.ndarray, target: np.ndarray, test_features: np.ndarray
) -> np.ndarray:
"""
>>> xgboost(np.array([[ 2.3571 , 52. , 6.00813008, 1.06775068,
... 907. , 2.45799458, 40.58 , -124.26]]),np.array([1.114]),
... np.array([[1.97840000e+00, 3.70000000e+01, 4.98858447e+00, 1.03881279e+00,
... 1.14300000e+03, 2.60958904e+00, 3.67800000e+01, -1.19780000e+02]]))
array([[1.1139996]], dtype=float32)
"""
xgb = XGBRegressor(
verbosity=0, random_state=42, tree_method="exact", base_score=0.5
)
xgb.fit(features, target)
# Predict target for test data
predictions = xgb.predict(test_features)
predictions = predictions.reshape(len(predictions), 1)
return predictions
def main() -> None:
"""
The URL for this algorithm
https://xgboost.readthedocs.io/en/stable/
California house price dataset is used to demonstrate the algorithm.
Expected error values:
Mean Absolute Error: 0.30957163379906033
Mean Square Error: 0.22611560196662744
"""
# Load California house price dataset
california = fetch_california_housing()
data, target = data_handling(california)
x_train, x_test, y_train, y_test = train_test_split(
data, target, test_size=0.25, random_state=1
)
predictions = xgboost(x_train, y_train, x_test)
# Error printing
print(f"Mean Absolute Error: {mean_absolute_error(y_test, predictions)}")
print(f"Mean Square Error: {mean_squared_error(y_test, predictions)}")
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/similarity_search.py | machine_learning/similarity_search.py | """
Similarity Search : https://en.wikipedia.org/wiki/Similarity_search
Similarity search is a search algorithm for finding the nearest vector from
vectors, used in natural language processing.
In this algorithm, it calculates distance with euclidean distance and
returns a list containing two data for each vector:
1. the nearest vector
2. distance between the vector and the nearest vector (float)
"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
"""
Calculates euclidean distance between two data.
:param input_a: ndarray of first vector.
:param input_b: ndarray of second vector.
:return: Euclidean distance of input_a and input_b. By using math.sqrt(),
result will be float.
>>> euclidean(np.array([0]), np.array([1]))
1.0
>>> euclidean(np.array([0, 1]), np.array([1, 1]))
1.0
>>> euclidean(np.array([0, 0, 0]), np.array([0, 0, 1]))
1.0
"""
return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b)))
def similarity_search(
dataset: np.ndarray, value_array: np.ndarray
) -> list[list[list[float] | float]]:
"""
:param dataset: Set containing the vectors. Should be ndarray.
:param value_array: vector/vectors we want to know the nearest vector from dataset.
:return: Result will be a list containing
1. the nearest vector
2. distance from the vector
>>> dataset = np.array([[0], [1], [2]])
>>> value_array = np.array([[0]])
>>> similarity_search(dataset, value_array)
[[[0], 0.0]]
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
>>> value_array = np.array([[0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0], 1.0]]
>>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> value_array = np.array([[0, 0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0, 0], 1.0]]
>>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0, 0], 0.0], [[0, 0, 0], 1.0]]
These are the errors that might occur:
1. If dimensions are different.
For example, dataset has 2d array and value_array has 1d array:
>>> dataset = np.array([[1]])
>>> value_array = np.array([1])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
...
ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1
2. If data's shapes are different.
For example, dataset has shape of (3, 2) and value_array has (2, 3).
We are expecting same shapes of two arrays, so it is wrong.
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
...
ValueError: Wrong input data's shape... dataset : 2, value_array : 3
3. If data types are different.
When trying to compare, we are expecting same types so they should be same.
If not, it'll come up with errors.
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]], dtype=np.float32)
>>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32)
>>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: Input data have different datatype...
dataset : float32, value_array : int32
"""
if dataset.ndim != value_array.ndim:
msg = (
"Wrong input data's dimensions... "
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(msg)
try:
if dataset.shape[1] != value_array.shape[1]:
msg = (
"Wrong input data's shape... "
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(msg)
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape")
if dataset.dtype != value_array.dtype:
msg = (
"Input data have different datatype... "
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(msg)
answer = []
for value in value_array:
dist = euclidean(value, dataset[0])
vector = dataset[0].tolist()
for dataset_value in dataset[1:]:
temp_dist = euclidean(value, dataset_value)
if dist > temp_dist:
dist = temp_dist
vector = dataset_value.tolist()
answer.append([vector, dist])
return answer
def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float:
"""
Calculates cosine similarity between two data.
:param input_a: ndarray of first vector.
:param input_b: ndarray of second vector.
:return: Cosine similarity of input_a and input_b. By using math.sqrt(),
result will be float.
>>> cosine_similarity(np.array([1]), np.array([1]))
1.0
>>> cosine_similarity(np.array([1, 2]), np.array([6, 32]))
0.9615239476408232
"""
return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/multilayer_perceptron_classifier.py | machine_learning/multilayer_perceptron_classifier.py | from sklearn.neural_network import MLPClassifier
X = [[0.0, 0.0], [1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]
y = [0, 1, 0, 0]
clf = MLPClassifier(
solver="lbfgs", alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1
)
clf.fit(X, y)
test = [[0.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
Y = clf.predict(test)
def wrapper(y):
"""
>>> [int(x) for x in wrapper(Y)]
[0, 0, 1]
"""
return list(y)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/loss_functions.py | machine_learning/loss_functions.py | import numpy as np
def binary_cross_entropy(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate the mean binary cross-entropy (BCE) loss between true labels and predicted
probabilities.
BCE loss quantifies dissimilarity between true labels (0 or 1) and predicted
probabilities. It's widely used in binary classification tasks.
BCE = -Σ(y_true * ln(y_pred) + (1 - y_true) * ln(1 - y_pred))
Reference: https://en.wikipedia.org/wiki/Cross_entropy
Parameters:
- y_true: True binary labels (0 or 1)
- y_pred: Predicted probabilities for class 1
- epsilon: Small constant to avoid numerical instability
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> float(binary_cross_entropy(true_labels, predicted_probs))
0.2529995012327421
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> binary_cross_entropy(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0)
bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))
return np.mean(bce_loss)
def binary_focal_cross_entropy(
y_true: np.ndarray,
y_pred: np.ndarray,
gamma: float = 2.0,
alpha: float = 0.25,
epsilon: float = 1e-15,
) -> float:
"""
Calculate the mean binary focal cross-entropy (BFCE) loss between true labels
and predicted probabilities.
BFCE loss quantifies dissimilarity between true labels (0 or 1) and predicted
probabilities. It's a variation of binary cross-entropy that addresses class
imbalance by focusing on hard examples.
BCFE = -Σ(alpha * (1 - y_pred)**gamma * y_true * log(y_pred)
+ (1 - alpha) * y_pred**gamma * (1 - y_true) * log(1 - y_pred))
Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf)
Parameters:
- y_true: True binary labels (0 or 1).
- y_pred: Predicted probabilities for class 1.
- gamma: Focusing parameter for modulating the loss (default: 2.0).
- alpha: Weighting factor for class 1 (default: 0.25).
- epsilon: Small constant to avoid numerical instability.
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> float(binary_focal_cross_entropy(true_labels, predicted_probs))
0.008257977659239775
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> binary_focal_cross_entropy(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
# Clip predicted probabilities to avoid log(0)
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
bcfe_loss = -(
alpha * (1 - y_pred) ** gamma * y_true * np.log(y_pred)
+ (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred)
)
return np.mean(bcfe_loss)
def categorical_cross_entropy(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate categorical cross-entropy (CCE) loss between true class labels and
predicted class probabilities.
CCE = -Σ(y_true * ln(y_pred))
Reference: https://en.wikipedia.org/wiki/Cross_entropy
Parameters:
- y_true: True class labels (one-hot encoded)
- y_pred: Predicted class probabilities
- epsilon: Small constant to avoid numerical instability
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> float(categorical_cross_entropy(true_labels, pred_probs))
0.567395975254385
>>> true_labels = np.array([[1, 0], [0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same shape.
>>> true_labels = np.array([[2, 0, 1], [1, 0, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: y_true must be one-hot encoded.
>>> true_labels = np.array([[1, 0, 1], [1, 0, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: y_true must be one-hot encoded.
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: Predicted probabilities must sum to approximately 1.
"""
if y_true.shape != y_pred.shape:
raise ValueError("Input arrays must have the same shape.")
if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1):
raise ValueError("y_true must be one-hot encoded.")
if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)):
raise ValueError("Predicted probabilities must sum to approximately 1.")
y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0)
return -np.sum(y_true * np.log(y_pred))
def categorical_focal_cross_entropy(
y_true: np.ndarray,
y_pred: np.ndarray,
alpha: np.ndarray = None,
gamma: float = 2.0,
epsilon: float = 1e-15,
) -> float:
"""
Calculate the mean categorical focal cross-entropy (CFCE) loss between true
labels and predicted probabilities for multi-class classification.
CFCE loss is a generalization of binary focal cross-entropy for multi-class
classification. It addresses class imbalance by focusing on hard examples.
CFCE = -Σ alpha * (1 - y_pred)**gamma * y_true * log(y_pred)
Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf)
Parameters:
- y_true: True labels in one-hot encoded form.
- y_pred: Predicted probabilities for each class.
- alpha: Array of weighting factors for each class.
- gamma: Focusing parameter for modulating the loss (default: 2.0).
- epsilon: Small constant to avoid numerical instability.
Returns:
- The mean categorical focal cross-entropy loss.
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> alpha = np.array([0.6, 0.2, 0.7])
>>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha))
0.0025966118981496423
>>> true_labels = np.array([[0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> alpha = np.array([0.25, 0.25, 0.25])
>>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha))
0.23315276982014324
>>> true_labels = np.array([[1, 0], [0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same shape.
>>> true_labels = np.array([[2, 0, 1], [1, 0, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_focal_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: y_true must be one-hot encoded.
>>> true_labels = np.array([[1, 0, 1], [1, 0, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
>>> categorical_focal_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: y_true must be one-hot encoded.
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0]])
>>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]])
>>> categorical_focal_cross_entropy(true_labels, pred_probs)
Traceback (most recent call last):
...
ValueError: Predicted probabilities must sum to approximately 1.
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> alpha = np.array([0.6, 0.2])
>>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha)
Traceback (most recent call last):
...
ValueError: Length of alpha must match the number of classes.
"""
if y_true.shape != y_pred.shape:
raise ValueError("Shape of y_true and y_pred must be the same.")
if alpha is None:
alpha = np.ones(y_true.shape[1])
if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1):
raise ValueError("y_true must be one-hot encoded.")
if len(alpha) != y_true.shape[1]:
raise ValueError("Length of alpha must match the number of classes.")
if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)):
raise ValueError("Predicted probabilities must sum to approximately 1.")
# Clip predicted probabilities to avoid log(0)
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
# Calculate loss for each class and sum across classes
cfce_loss = -np.sum(
alpha * np.power(1 - y_pred, gamma) * y_true * np.log(y_pred), axis=1
)
return np.mean(cfce_loss)
def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the mean hinge loss for between true labels and predicted probabilities
for training support vector machines (SVMs).
Hinge loss = max(0, 1 - true * pred)
Reference: https://en.wikipedia.org/wiki/Hinge_loss
Args:
- y_true: actual values (ground truth) encoded as -1 or 1
- y_pred: predicted values
>>> true_labels = np.array([-1, 1, 1, -1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> float(hinge_loss(true_labels, pred))
1.52
>>> true_labels = np.array([-1, 1, 1, -1, 1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred)
Traceback (most recent call last):
...
ValueError: Length of predicted and actual array must be same.
>>> true_labels = np.array([-1, 1, 10, -1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred)
Traceback (most recent call last):
...
ValueError: y_true can have values -1 or 1 only.
"""
if len(y_true) != len(y_pred):
raise ValueError("Length of predicted and actual array must be same.")
if np.any((y_true != -1) & (y_true != 1)):
raise ValueError("y_true can have values -1 or 1 only.")
hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred))
return np.mean(hinge_losses)
def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
"""
Calculate the mean Huber loss between the given ground truth and predicted values.
The Huber loss describes the penalty incurred by an estimation procedure, and it
serves as a measure of accuracy for regression models.
Huber loss =
0.5 * (y_true - y_pred)^2 if |y_true - y_pred| <= delta
delta * |y_true - y_pred| - 0.5 * delta^2 otherwise
Reference: https://en.wikipedia.org/wiki/Huber_loss
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102))
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164))
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> huber_loss(true_labels, predicted_probs, 1.0)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
huber_mse = 0.5 * (y_true - y_pred) ** 2
huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta)
return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean()
def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the mean squared error (MSE) between ground truth and predicted values.
MSE measures the squared difference between true values and predicted values, and it
serves as a measure of accuracy for regression models.
MSE = (1/n) * Σ(y_true - y_pred)^2
Reference: https://en.wikipedia.org/wiki/Mean_squared_error
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028))
True
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> mean_squared_error(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
squared_errors = (y_true - y_pred) ** 2
return np.mean(squared_errors)
def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculates the Mean Absolute Error (MAE) between ground truth (observed)
and predicted values.
MAE measures the absolute difference between true values and predicted values.
Equation:
MAE = (1/n) * Σ(abs(y_true - y_pred))
Reference: https://en.wikipedia.org/wiki/Mean_absolute_error
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16))
True
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16))
False
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
>>> mean_absolute_error(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
return np.mean(abs(y_true - y_pred))
def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the mean squared logarithmic error (MSLE) between ground truth and
predicted values.
MSLE measures the squared logarithmic difference between true values and predicted
values for regression models. It's particularly useful for dealing with skewed or
large-value data, and it's often used when the relative differences between
predicted and true values are more important than absolute differences.
MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2
Reference: https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035
Parameters:
- y_true: The true values (ground truth)
- y_pred: The predicted values
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> float(mean_squared_logarithmic_error(true_values, predicted_values))
0.0030860877925181344
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
>>> mean_squared_logarithmic_error(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2
return np.mean(squared_logarithmic_errors)
def mean_absolute_percentage_error(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15
) -> float:
"""
Calculate the Mean Absolute Percentage Error between y_true and y_pred.
Mean Absolute Percentage Error calculates the average of the absolute
percentage differences between the predicted and true values.
Formula = (Σ|y_true[i]-Y_pred[i]/y_true[i]|)/n
Source: https://stephenallwright.com/good-mape-score/
Parameters:
y_true (np.ndarray): Numpy array containing true/target values.
y_pred (np.ndarray): Numpy array containing predicted values.
Returns:
float: The Mean Absolute Percentage error between y_true and y_pred.
Examples:
>>> y_true = np.array([10, 20, 30, 40])
>>> y_pred = np.array([12, 18, 33, 45])
>>> float(mean_absolute_percentage_error(y_true, y_pred))
0.13125
>>> y_true = np.array([1, 2, 3, 4])
>>> y_pred = np.array([2, 3, 4, 5])
>>> float(mean_absolute_percentage_error(y_true, y_pred))
0.5208333333333333
>>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24])
>>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23])
>>> float(mean_absolute_percentage_error(y_true, y_pred))
0.064671076436071
"""
if len(y_true) != len(y_pred):
raise ValueError("The length of the two arrays should be the same.")
y_true = np.where(y_true == 0, epsilon, y_true)
absolute_percentage_diff = np.abs((y_true - y_pred) / y_true)
return np.mean(absolute_percentage_diff)
def perplexity_loss(
y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-7
) -> float:
"""
Calculate the perplexity for the y_true and y_pred.
Compute the Perplexity which useful in predicting language model
accuracy in Natural Language Processing (NLP.)
Perplexity is measure of how certain the model in its predictions.
Perplexity Loss = exp(-1/N (Σ ln(p(x)))
Reference:
https://en.wikipedia.org/wiki/Perplexity
Args:
y_true: Actual label encoded sentences of shape (batch_size, sentence_length)
y_pred: Predicted sentences of shape (batch_size, sentence_length, vocab_size)
epsilon: Small floating point number to avoid getting inf for log(0)
Returns:
Perplexity loss between y_true and y_pred.
>>> y_true = np.array([[1, 4], [2, 3]])
>>> y_pred = np.array(
... [[[0.28, 0.19, 0.21 , 0.15, 0.15],
... [0.24, 0.19, 0.09, 0.18, 0.27]],
... [[0.03, 0.26, 0.21, 0.18, 0.30],
... [0.28, 0.10, 0.33, 0.15, 0.12]]]
... )
>>> float(perplexity_loss(y_true, y_pred))
5.0247347775367945
>>> y_true = np.array([[1, 4], [2, 3]])
>>> y_pred = np.array(
... [[[0.28, 0.19, 0.21 , 0.15, 0.15],
... [0.24, 0.19, 0.09, 0.18, 0.27],
... [0.30, 0.10, 0.20, 0.15, 0.25]],
... [[0.03, 0.26, 0.21, 0.18, 0.30],
... [0.28, 0.10, 0.33, 0.15, 0.12],
... [0.30, 0.10, 0.20, 0.15, 0.25]],]
... )
>>> perplexity_loss(y_true, y_pred)
Traceback (most recent call last):
...
ValueError: Sentence length of y_true and y_pred must be equal.
>>> y_true = np.array([[1, 4], [2, 11]])
>>> y_pred = np.array(
... [[[0.28, 0.19, 0.21 , 0.15, 0.15],
... [0.24, 0.19, 0.09, 0.18, 0.27]],
... [[0.03, 0.26, 0.21, 0.18, 0.30],
... [0.28, 0.10, 0.33, 0.15, 0.12]]]
... )
>>> perplexity_loss(y_true, y_pred)
Traceback (most recent call last):
...
ValueError: Label value must not be greater than vocabulary size.
>>> y_true = np.array([[1, 4]])
>>> y_pred = np.array(
... [[[0.28, 0.19, 0.21 , 0.15, 0.15],
... [0.24, 0.19, 0.09, 0.18, 0.27]],
... [[0.03, 0.26, 0.21, 0.18, 0.30],
... [0.28, 0.10, 0.33, 0.15, 0.12]]]
... )
>>> perplexity_loss(y_true, y_pred)
Traceback (most recent call last):
...
ValueError: Batch size of y_true and y_pred must be equal.
"""
vocab_size = y_pred.shape[2]
if y_true.shape[0] != y_pred.shape[0]:
raise ValueError("Batch size of y_true and y_pred must be equal.")
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("Sentence length of y_true and y_pred must be equal.")
if np.max(y_true) > vocab_size:
raise ValueError("Label value must not be greater than vocabulary size.")
# Matrix to select prediction value only for true class
filter_matrix = np.array(
[[list(np.eye(vocab_size)[word]) for word in sentence] for sentence in y_true]
)
# Getting the matrix containing prediction for only true class
true_class_pred = np.sum(y_pred * filter_matrix, axis=2).clip(epsilon, 1)
# Calculating perplexity for each sentence
perp_losses = np.exp(np.negative(np.mean(np.log(true_class_pred), axis=1)))
return np.mean(perp_losses)
def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) -> float:
"""
Calculate the Smooth L1 Loss between y_true and y_pred.
The Smooth L1 Loss is less sensitive to outliers than the L2 Loss and is often used
in regression problems, such as object detection.
Smooth L1 Loss =
0.5 * (x - y)^2 / beta, if |x - y| < beta
|x - y| - 0.5 * beta, otherwise
Reference:
https://pytorch.org/docs/stable/generated/torch.nn.SmoothL1Loss.html
Args:
y_true: Array of true values.
y_pred: Array of predicted values.
beta: Specifies the threshold at which to change between L1 and L2 loss.
Returns:
The calculated Smooth L1 Loss between y_true and y_pred.
Raises:
ValueError: If the length of the two arrays is not the same.
>>> y_true = np.array([3, 5, 2, 7])
>>> y_pred = np.array([2.9, 4.8, 2.1, 7.2])
>>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.012500000000000022
>>> y_true = np.array([2, 4, 6])
>>> y_pred = np.array([1, 5, 7])
>>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.5
>>> y_true = np.array([1, 3, 5, 7])
>>> y_pred = np.array([1, 3, 5, 7])
>>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.0
>>> y_true = np.array([1, 3, 5])
>>> y_pred = np.array([1, 3, 5, 7])
>>> smooth_l1_loss(y_true, y_pred, 1.0)
Traceback (most recent call last):
...
ValueError: The length of the two arrays should be the same.
"""
if len(y_true) != len(y_pred):
raise ValueError("The length of the two arrays should be the same.")
diff = np.abs(y_true - y_pred)
loss = np.where(diff < beta, 0.5 * diff**2 / beta, diff - 0.5 * beta)
return np.mean(loss)
def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Calculate the Kullback-Leibler divergence (KL divergence) loss between true labels
and predicted probabilities.
KL divergence loss quantifies dissimilarity between true labels and predicted
probabilities. It's often used in training generative models.
KL = Σ(y_true * ln(y_true / y_pred))
Reference: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Parameters:
- y_true: True class probabilities
- y_pred: Predicted class probabilities
>>> true_labels = np.array([0.2, 0.3, 0.5])
>>> predicted_probs = np.array([0.3, 0.3, 0.4])
>>> float(kullback_leibler_divergence(true_labels, predicted_probs))
0.030478754035472025
>>> true_labels = np.array([0.2, 0.3, 0.5])
>>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5])
>>> kullback_leibler_divergence(true_labels, predicted_probs)
Traceback (most recent call last):
...
ValueError: Input arrays must have the same length.
"""
if len(y_true) != len(y_pred):
raise ValueError("Input arrays must have the same length.")
kl_loss = y_true * np.log(y_true / y_pred)
return np.sum(kl_loss)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/t_stochastic_neighbour_embedding.py | machine_learning/t_stochastic_neighbour_embedding.py | """
t-distributed stochastic neighbor embedding (t-SNE)
For more details, see:
https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding
"""
import doctest
import numpy as np
from numpy import ndarray
from sklearn.datasets import load_iris
def collect_dataset() -> tuple[ndarray, ndarray]:
"""
Load the Iris dataset and return features and labels.
Returns:
tuple[ndarray, ndarray]: Feature matrix and target labels.
>>> features, targets = collect_dataset()
>>> features.shape
(150, 4)
>>> targets.shape
(150,)
"""
iris_dataset = load_iris()
return np.array(iris_dataset.data), np.array(iris_dataset.target)
def compute_pairwise_affinities(data_matrix: ndarray, sigma: float = 1.0) -> ndarray:
"""
Compute high-dimensional affinities (P matrix) using a Gaussian kernel.
Args:
data_matrix: Input data of shape (n_samples, n_features).
sigma: Gaussian kernel bandwidth.
Returns:
ndarray: Symmetrized probability matrix.
>>> x = np.array([[0.0, 0.0], [1.0, 0.0]])
>>> probabilities = compute_pairwise_affinities(x)
>>> float(round(probabilities[0, 1], 3))
0.25
"""
n_samples = data_matrix.shape[0]
squared_sum = np.sum(np.square(data_matrix), axis=1)
squared_distance = np.add(
np.add(-2 * np.dot(data_matrix, data_matrix.T), squared_sum).T, squared_sum
)
affinity_matrix = np.exp(-squared_distance / (2 * sigma**2))
np.fill_diagonal(affinity_matrix, 0)
affinity_matrix /= np.sum(affinity_matrix)
return (affinity_matrix + affinity_matrix.T) / (2 * n_samples)
def compute_low_dim_affinities(embedding_matrix: ndarray) -> tuple[ndarray, ndarray]:
"""
Compute low-dimensional affinities (Q matrix) using a Student-t distribution.
Args:
embedding_matrix: Low-dimensional embedding of shape (n_samples, n_components).
Returns:
tuple[ndarray, ndarray]: (Q probability matrix, numerator matrix).
>>> y = np.array([[0.0, 0.0], [1.0, 0.0]])
>>> q_matrix, numerators = compute_low_dim_affinities(y)
>>> q_matrix.shape
(2, 2)
"""
squared_sum = np.sum(np.square(embedding_matrix), axis=1)
numerator_matrix = 1 / (
1
+ np.add(
np.add(-2 * np.dot(embedding_matrix, embedding_matrix.T), squared_sum).T,
squared_sum,
)
)
np.fill_diagonal(numerator_matrix, 0)
q_matrix = numerator_matrix / np.sum(numerator_matrix)
return q_matrix, numerator_matrix
def apply_tsne(
data_matrix: ndarray,
n_components: int = 2,
learning_rate: float = 200.0,
n_iter: int = 500,
) -> ndarray:
"""
Apply t-SNE for dimensionality reduction.
Args:
data_matrix: Original dataset (features).
n_components: Target dimension (2D or 3D).
learning_rate: Step size for gradient descent.
n_iter: Number of iterations.
Returns:
ndarray: Low-dimensional embedding of the data.
>>> features, _ = collect_dataset()
>>> embedding = apply_tsne(features, n_components=2, n_iter=50)
>>> embedding.shape
(150, 2)
"""
if n_components < 1 or n_iter < 1:
raise ValueError("n_components and n_iter must be >= 1")
n_samples = data_matrix.shape[0]
rng = np.random.default_rng()
embedding = rng.standard_normal((n_samples, n_components)) * 1e-4
high_dim_affinities = compute_pairwise_affinities(data_matrix)
high_dim_affinities = np.maximum(high_dim_affinities, 1e-12)
embedding_increment = np.zeros_like(embedding)
momentum = 0.5
for iteration in range(n_iter):
low_dim_affinities, numerator_matrix = compute_low_dim_affinities(embedding)
low_dim_affinities = np.maximum(low_dim_affinities, 1e-12)
affinity_diff = high_dim_affinities - low_dim_affinities
gradient = 4 * (
np.dot((affinity_diff * numerator_matrix), embedding)
- np.multiply(
np.sum(affinity_diff * numerator_matrix, axis=1)[:, np.newaxis],
embedding,
)
)
embedding_increment = momentum * embedding_increment - learning_rate * gradient
embedding += embedding_increment
if iteration == int(n_iter / 4):
momentum = 0.8
return embedding
def main() -> None:
"""
Run t-SNE on the Iris dataset and display the first 5 embeddings.
>>> main() # doctest: +ELLIPSIS
t-SNE embedding (first 5 points):
[[...
"""
features, _labels = collect_dataset()
embedding = apply_tsne(features, n_components=2, n_iter=300)
if not isinstance(embedding, np.ndarray):
raise TypeError("t-SNE embedding must be an ndarray")
print("t-SNE embedding (first 5 points):")
print(embedding[:5])
# Optional visualization (Ruff/mypy compliant)
# import matplotlib.pyplot as plt
# plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, cmap="viridis")
# plt.title("t-SNE Visualization of the Iris Dataset")
# plt.xlabel("Dimension 1")
# plt.ylabel("Dimension 2")
# plt.show()
if __name__ == "__main__":
doctest.testmod()
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/word_frequency_functions.py | machine_learning/word_frequency_functions.py | import string
from math import log10
"""
tf-idf Wikipedia: https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf-idf and other word frequency algorithms are often used
as a weighting factor in information retrieval and text
mining. 83% of text-based recommender systems use
tf-idf for term weighting. In Layman's terms, tf-idf
is a statistic intended to reflect how important a word
is to a document in a corpus (a collection of documents)
Here I've implemented several word frequency algorithms
that are commonly used in information retrieval: Term Frequency,
Document Frequency, and TF-IDF (Term-Frequency*Inverse-Document-Frequency)
are included.
Term Frequency is a statistical function that
returns a number representing how frequently
an expression occurs in a document. This
indicates how significant a particular term is in
a given document.
Document Frequency is a statistical function that returns
an integer representing the number of documents in a
corpus that a term occurs in (where the max number returned
would be the number of documents in the corpus).
Inverse Document Frequency is mathematically written as
log10(N/df), where N is the number of documents in your
corpus and df is the Document Frequency. If df is 0, a
ZeroDivisionError will be thrown.
Term-Frequency*Inverse-Document-Frequency is a measure
of the originality of a term. It is mathematically written
as tf*log10(N/df). It compares the number of times
a term appears in a document with the number of documents
the term appears in. If df is 0, a ZeroDivisionError will be thrown.
"""
def term_frequency(term: str, document: str) -> int:
"""
Return the number of times a term occurs within
a given document.
@params: term, the term to search a document for, and document,
the document to search within
@returns: an integer representing the number of times a term is
found within the document
@examples:
>>> term_frequency("to", "To be, or not to be")
2
"""
# strip all punctuation and newlines and replace it with ''
document_without_punctuation = document.translate(
str.maketrans("", "", string.punctuation)
).replace("\n", "")
tokenize_document = document_without_punctuation.split(" ") # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def document_frequency(term: str, corpus: str) -> tuple[int, int]:
"""
Calculate the number of documents in a corpus that contain a
given term
@params : term, the term to search each document for, and corpus, a collection of
documents. Each document should be separated by a newline.
@returns : the number of documents in the corpus that contain the term you are
searching for and the number of documents in the corpus
@examples :
>>> document_frequency("first", "This is the first document in the corpus.\\nThIs\
is the second document in the corpus.\\nTHIS is \
the third document in the corpus.")
(1, 3)
"""
corpus_without_punctuation = corpus.lower().translate(
str.maketrans("", "", string.punctuation)
) # strip all punctuation and replace it with ''
docs = corpus_without_punctuation.split("\n")
term = term.lower()
return (len([doc for doc in docs if term in doc]), len(docs))
def inverse_document_frequency(df: int, n: int, smoothing=False) -> float:
"""
Return an integer denoting the importance
of a word. This measure of importance is
calculated by log10(N/df), where N is the
number of documents and df is
the Document Frequency.
@params : df, the Document Frequency, N,
the number of documents in the corpus and
smoothing, if True return the idf-smooth
@returns : log10(N/df) or 1+log10(N/1+df)
@examples :
>>> inverse_document_frequency(3, 0)
Traceback (most recent call last):
...
ValueError: log10(0) is undefined.
>>> inverse_document_frequency(1, 3)
0.477
>>> inverse_document_frequency(0, 3)
Traceback (most recent call last):
...
ZeroDivisionError: df must be > 0
>>> inverse_document_frequency(0, 3,True)
1.477
"""
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined.")
return round(1 + log10(n / (1 + df)), 3)
if df == 0:
raise ZeroDivisionError("df must be > 0")
elif n == 0:
raise ValueError("log10(0) is undefined.")
return round(log10(n / df), 3)
def tf_idf(tf: int, idf: int) -> float:
"""
Combine the term frequency
and inverse document frequency functions to
calculate the originality of a term. This
'originality' is calculated by multiplying
the term frequency and the inverse document
frequency : tf-idf = TF * IDF
@params : tf, the term frequency, and idf, the inverse document
frequency
@examples :
>>> tf_idf(2, 0.477)
0.954
"""
return round(tf * idf, 3)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/support_vector_machines.py | machine_learning/support_vector_machines.py | import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def norm_squared(vector: ndarray) -> float:
"""
Return the squared second norm of vector
norm_squared(v) = sum(x * x for x in v)
Args:
vector (ndarray): input vector
Returns:
float: squared second norm of vector
>>> int(norm_squared([1, 2]))
5
>>> int(norm_squared(np.asarray([1, 2])))
5
>>> int(norm_squared([0, 0]))
0
"""
return np.dot(vector, vector)
class SVC:
"""
Support Vector Classifier
Args:
kernel (str): kernel to use. Default: linear
Possible choices:
- linear
regularization: constraint for soft margin (data not linearly separable)
Default: unbound
>>> SVC(kernel="asdf")
Traceback (most recent call last):
...
ValueError: Unknown kernel: asdf
>>> SVC(kernel="rbf")
Traceback (most recent call last):
...
ValueError: rbf kernel requires gamma
>>> SVC(kernel="rbf", gamma=-1)
Traceback (most recent call last):
...
ValueError: gamma must be > 0
"""
def __init__(
self,
*,
regularization: float = np.inf,
kernel: str = "linear",
gamma: float = 0.0,
) -> None:
self.regularization = regularization
self.gamma = gamma
if kernel == "linear":
self.kernel = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma")
if not isinstance(self.gamma, (float, int)):
raise ValueError("gamma must be float or int")
if not self.gamma > 0:
raise ValueError("gamma must be > 0")
self.kernel = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
msg = f"Unknown kernel: {kernel}"
raise ValueError(msg)
# kernels
def __linear(self, vector1: ndarray, vector2: ndarray) -> float:
"""Linear kernel (as if no kernel used at all)"""
return np.dot(vector1, vector2)
def __rbf(self, vector1: ndarray, vector2: ndarray) -> float:
"""
RBF: Radial Basis Function Kernel
Note: for more information see:
https://en.wikipedia.org/wiki/Radial_basis_function_kernel
Args:
vector1 (ndarray): first vector
vector2 (ndarray): second vector)
Returns:
float: exp(-(gamma * norm_squared(vector1 - vector2)))
"""
return np.exp(-(self.gamma * norm_squared(vector1 - vector2)))
def fit(self, observations: list[ndarray], classes: ndarray) -> None:
"""
Fits the SVC with a set of observations.
Args:
observations (list[ndarray]): list of observations
classes (ndarray): classification of each observation (in {1, -1})
"""
self.observations = observations
self.classes = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(n,) = np.shape(classes)
def to_minimize(candidate: ndarray) -> float:
"""
Opposite of the function to maximize
Args:
candidate (ndarray): candidate array to test
Return:
float: Wolfe's Dual result to minimize
"""
s = 0
(n,) = np.shape(candidate)
for i in range(n):
for j in range(n):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j])
)
return 1 / 2 * s - sum(candidate)
ly_contraint = LinearConstraint(classes, 0, 0)
l_bounds = Bounds(0, self.regularization)
l_star = minimize(
to_minimize, np.ones(n), bounds=l_bounds, constraints=[ly_contraint]
).x
self.optimum = l_star
# calculating mean offset of separation plane to points
s = 0
for i in range(n):
for j in range(n):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j]
)
self.offset = s / n
def predict(self, observation: ndarray) -> int:
"""
Get the expected class of an observation
Args:
observation (Vector): observation
Returns:
int {1, -1}: expected class
>>> xs = [
... np.asarray([0, 1]), np.asarray([0, 2]),
... np.asarray([1, 1]), np.asarray([1, 2])
... ]
>>> y = np.asarray([1, 1, -1, -1])
>>> s = SVC()
>>> s.fit(xs, y)
>>> s.predict(np.asarray([0, 1]))
1
>>> s.predict(np.asarray([1, 1]))
-1
>>> s.predict(np.asarray([2, 2]))
-1
"""
s = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], observation)
for n in range(len(self.classes))
)
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/dimensionality_reduction.py | machine_learning/dimensionality_reduction.py | # Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub
"""
Requirements:
- numpy version 1.21
- scipy version 1.3.3
Notes:
- Each column of the features matrix corresponds to a class item
"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def column_reshape(input_array: np.ndarray) -> np.ndarray:
"""Function to reshape a row Numpy array into a column Numpy array
>>> input_array = np.array([1, 2, 3])
>>> column_reshape(input_array)
array([[1],
[2],
[3]])
"""
return input_array.reshape((input_array.size, 1))
def covariance_within_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
"""Function to compute the covariance matrix inside each class.
>>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> labels = np.array([0, 1, 0])
>>> covariance_within_classes(features, labels, 2)
array([[0.66666667, 0.66666667, 0.66666667],
[0.66666667, 0.66666667, 0.66666667],
[0.66666667, 0.66666667, 0.66666667]])
"""
covariance_sum = np.nan
for i in range(classes):
data = features[:, labels == i]
data_mean = data.mean(1)
# Centralize the data of class i
centered_data = data - column_reshape(data_mean)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(centered_data, centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
covariance_sum = np.dot(centered_data, centered_data.T)
return covariance_sum / features.shape[1]
def covariance_between_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
"""Function to compute the covariance matrix between multiple classes
>>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]])
>>> labels = np.array([0, 1, 0])
>>> covariance_between_classes(features, labels, 2)
array([[ 3.55555556, 1.77777778, -2.66666667],
[ 1.77777778, 0.88888889, -1.33333333],
[-2.66666667, -1.33333333, 2. ]])
"""
general_data_mean = features.mean(1)
covariance_sum = np.nan
for i in range(classes):
data = features[:, labels == i]
device_data = data.shape[1]
data_mean = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(data_mean) - column_reshape(general_data_mean),
(column_reshape(data_mean) - column_reshape(general_data_mean)).T,
)
else:
# If covariance_sum is np.nan (i.e. first loop)
covariance_sum = device_data * np.dot(
column_reshape(data_mean) - column_reshape(general_data_mean),
(column_reshape(data_mean) - column_reshape(general_data_mean)).T,
)
return covariance_sum / features.shape[1]
def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray:
"""
Principal Component Analysis.
For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis.
Parameters:
* features: the features extracted from the dataset
* dimensions: to filter the projected data for the desired dimension
>>> test_principal_component_analysis()
"""
# Check if the features have been loaded
if features.any():
data_mean = features.mean(1)
# Center the dataset
centered_data = features - np.reshape(data_mean, (data_mean.size, 1))
covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1]
_, eigenvectors = np.linalg.eigh(covariance_matrix)
# Take all the columns in the reverse order (-1), and then takes only the first
filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
projected_data = np.dot(filtered_eigenvectors.T, features)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
logging.error("Dataset empty")
raise AssertionError
def linear_discriminant_analysis(
features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int
) -> np.ndarray:
"""
Linear Discriminant Analysis.
For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis.
Parameters:
* features: the features extracted from the dataset
* labels: the class labels of the features
* classes: the number of classes present in the dataset
* dimensions: to filter the projected data for the desired dimension
>>> test_linear_discriminant_analysis()
"""
# Check if the dimension desired is less than the number of classes
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_, eigenvectors = eigh(
covariance_between_classes(features, labels, classes),
covariance_within_classes(features, labels, classes),
)
filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions]
svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors)
filtered_svd_matrix = svd_matrix[:, 0:dimensions]
projected_data = np.dot(filtered_svd_matrix.T, features)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
logging.error("Dataset empty")
raise AssertionError
def test_linear_discriminant_analysis() -> None:
# Create dummy dataset with 2 classes and 3 features
features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
labels = np.array([0, 0, 0, 1, 1])
classes = 2
dimensions = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(AssertionError) as error_info: # noqa: PT012
projected_data = linear_discriminant_analysis(
features, labels, classes, dimensions
)
if isinstance(projected_data, np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes"
)
assert error_info.type is AssertionError
def test_principal_component_analysis() -> None:
features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dimensions = 2
expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]])
with pytest.raises(AssertionError) as error_info: # noqa: PT012
output = principal_component_analysis(features, dimensions)
if not np.allclose(expected_output, output):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/data_transformations.py | machine_learning/data_transformations.py | """
Normalization.
Wikipedia: https://en.wikipedia.org/wiki/Normalization
Normalization is the process of converting numerical data to a standard range of values.
This range is typically between [0, 1] or [-1, 1]. The equation for normalization is
x_norm = (x - x_min)/(x_max - x_min) where x_norm is the normalized value, x is the
value, x_min is the minimum value within the column or list of data, and x_max is the
maximum value within the column or list of data. Normalization is used to speed up the
training of data and put all of the data on a similar scale. This is useful because
variance in the range of values of a dataset can heavily impact optimization
(particularly Gradient Descent).
Standardization Wikipedia: https://en.wikipedia.org/wiki/Standardization
Standardization is the process of converting numerical data to a normally distributed
range of values. This range will have a mean of 0 and standard deviation of 1. This is
also known as z-score normalization. The equation for standardization is
x_std = (x - mu)/(sigma) where mu is the mean of the column or list of values and sigma
is the standard deviation of the column or list of values.
Choosing between Normalization & Standardization is more of an art of a science, but it
is often recommended to run experiments with both to see which performs better.
Additionally, a few rules of thumb are:
1. gaussian (normal) distributions work better with standardization
2. non-gaussian (non-normal) distributions work better with normalization
3. If a column or list of values has extreme values / outliers, use standardization
"""
from statistics import mean, stdev
def normalization(data: list, ndigits: int = 3) -> list:
"""
Return a normalized list of values.
@params: data, a list of values to normalize
@returns: a list of normalized values (rounded to ndigits decimal places)
@examples:
>>> normalization([2, 7, 10, 20, 30, 50])
[0.0, 0.104, 0.167, 0.375, 0.583, 1.0]
>>> normalization([5, 10, 15, 20, 25])
[0.0, 0.25, 0.5, 0.75, 1.0]
"""
# variables for calculation
x_min = min(data)
x_max = max(data)
# normalize data
return [round((x - x_min) / (x_max - x_min), ndigits) for x in data]
def standardization(data: list, ndigits: int = 3) -> list:
"""
Return a standardized list of values.
@params: data, a list of values to standardize
@returns: a list of standardized values (rounded to ndigits decimal places)
@examples:
>>> standardization([2, 7, 10, 20, 30, 50])
[-0.999, -0.719, -0.551, 0.009, 0.57, 1.69]
>>> standardization([5, 10, 15, 20, 25])
[-1.265, -0.632, 0.0, 0.632, 1.265]
"""
# variables for calculation
mu = mean(data)
sigma = stdev(data)
# standardize data
return [round((x - mu) / (sigma), ndigits) for x in data]
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/linear_discriminant_analysis.py | machine_learning/linear_discriminant_analysis.py | """
Linear Discriminant Analysis
Assumptions About Data :
1. The input variables has a gaussian distribution.
2. The variance calculated for each input variables by class grouping is the
same.
3. The mix of classes in your training set is representative of the problem.
Learning The Model :
The LDA model requires the estimation of statistics from the training data :
1. Mean of each input value for each class.
2. Probability of an instance belong to each class.
3. Covariance for the input data for each class
Calculate the class means :
mean(x) = 1/n ( for i = 1 to i = n --> sum(xi))
Calculate the class probabilities :
P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1))
P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1))
Calculate the variance :
We can calculate the variance for dataset in two steps :
1. Calculate the squared difference for each input variable from the
group mean.
2. Calculate the mean of the squared difference.
------------------------------------------------
Squared_Difference = (x - mean(k)) ** 2
Variance = (1 / (count(x) - count(classes))) *
(for i = 1 to i = n --> sum(Squared_Difference(xi)))
Making Predictions :
discriminant(x) = x * (mean / variance) -
((mean ** 2) / (2 * variance)) + Ln(probability)
---------------------------------------------------------------------------
After calculating the discriminant value for each class, the class with the
largest discriminant value is taken as the prediction.
Author: @EverLookNeverSee
"""
from collections.abc import Callable
from math import log
from os import name, system
from random import gauss, seed
from typing import TypeVar
# Make a training dataset drawn from a gaussian distribution
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
"""
Generate gaussian distribution instances based-on given mean and standard deviation
:param mean: mean value of class
:param std_dev: value of standard deviation entered by usr or default value of it
:param instance_count: instance number of class
:return: a list containing generated values based-on given mean, std_dev and
instance_count
>>> gaussian_distribution(5.0, 1.0, 20) # doctest: +NORMALIZE_WHITESPACE
[6.288184753155463, 6.4494456086997705, 5.066335808938262, 4.235456349028368,
3.9078267848958586, 5.031334516831717, 3.977896829989127, 3.56317055489747,
5.199311976483754, 5.133374604658605, 5.546468300338232, 4.086029056264687,
5.005005283626573, 4.935258239627312, 3.494170998739258, 5.537997178661033,
5.320711100998849, 7.3891120432406865, 5.202969177309964, 4.855297691835079]
"""
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)]
# Make corresponding Y flags to detecting classes
def y_generator(class_count: int, instance_count: list) -> list:
"""
Generate y values for corresponding classes
:param class_count: Number of classes(data groupings) in dataset
:param instance_count: number of instances in class
:return: corresponding values for data groupings in dataset
>>> y_generator(1, [10])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> y_generator(2, [5, 10])
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> y_generator(4, [10, 5, 15, 20]) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
"""
return [k for k in range(class_count) for _ in range(instance_count[k])]
# Calculate the class means
def calculate_mean(instance_count: int, items: list) -> float:
"""
Calculate given class mean
:param instance_count: Number of instances in class
:param items: items that related to specific class(data grouping)
:return: calculated actual mean of considered class
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> calculate_mean(len(items), items)
5.011267842911003
"""
# the sum of all items divided by number of instances
return sum(items) / instance_count
# Calculate the class probabilities
def calculate_probabilities(instance_count: int, total_count: int) -> float:
"""
Calculate the probability that a given instance will belong to which class
:param instance_count: number of instances in class
:param total_count: the number of all instances
:return: value of probability for considered class
>>> calculate_probabilities(20, 60)
0.3333333333333333
>>> calculate_probabilities(30, 100)
0.3
"""
# number of instances in specific class divided by number of all instances
return instance_count / total_count
# Calculate the variance
def calculate_variance(items: list, means: list, total_count: int) -> float:
"""
Calculate the variance
:param items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param total_count: the number of all instances
:return: calculated variance for considered dataset
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> means = [5.011267842911003]
>>> total_count = 20
>>> calculate_variance([items], means, total_count)
0.9618530973487491
"""
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
# for loop iterates over number of elements in inner layer of items
for j in range(len(items[i])):
# appending squared differences to 'squared_diff' list
squared_diff.append((items[i][j] - means[i]) ** 2)
# one divided by (the number of all instances - number of classes) multiplied by
# sum of all squared differences
n_classes = len(means) # Number of classes in dataset
return 1 / (total_count - n_classes) * sum(squared_diff)
# Making predictions
def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
"""This function predicts new indexes(groups for our data)
:param x_items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param variance: calculated value of variance by calculate_variance function
:param probabilities: a list containing all probabilities of classes
:return: a list containing predicted Y values
>>> x_items = [[6.288184753155463, 6.4494456086997705, 5.066335808938262,
... 4.235456349028368, 3.9078267848958586, 5.031334516831717,
... 3.977896829989127, 3.56317055489747, 5.199311976483754,
... 5.133374604658605, 5.546468300338232, 4.086029056264687,
... 5.005005283626573, 4.935258239627312, 3.494170998739258,
... 5.537997178661033, 5.320711100998849, 7.3891120432406865,
... 5.202969177309964, 4.855297691835079], [11.288184753155463,
... 11.44944560869977, 10.066335808938263, 9.235456349028368,
... 8.907826784895859, 10.031334516831716, 8.977896829989128,
... 8.56317055489747, 10.199311976483754, 10.133374604658606,
... 10.546468300338232, 9.086029056264687, 10.005005283626572,
... 9.935258239627313, 8.494170998739259, 10.537997178661033,
... 10.320711100998848, 12.389112043240686, 10.202969177309964,
... 9.85529769183508], [16.288184753155463, 16.449445608699772,
... 15.066335808938263, 14.235456349028368, 13.907826784895859,
... 15.031334516831716, 13.977896829989128, 13.56317055489747,
... 15.199311976483754, 15.133374604658606, 15.546468300338232,
... 14.086029056264687, 15.005005283626572, 14.935258239627313,
... 13.494170998739259, 15.537997178661033, 15.320711100998848,
... 17.389112043240686, 15.202969177309964, 14.85529769183508]]
>>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
>>> variance = 0.9618530973487494
>>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
>>> predict_y_values(x_items, means, variance,
... probabilities) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2]
"""
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
# for loop iterates over number of elements in list
for i in range(len(x_items)):
# for loop iterates over number of inner items of each element
for j in range(len(x_items[i])):
temp = [] # to store all discriminant values of each item as a list
# for loop iterates over number of classes we have in our dataset
for k in range(len(x_items)):
# appending values of discriminants for each class to 'temp' list
temp.append(
x_items[i][j] * (means[k] / variance)
- (means[k] ** 2 / (2 * variance))
+ log(probabilities[k])
)
# appending discriminant values of each item to 'results' list
results.append(temp)
return [result.index(max(result)) for result in results]
# Calculating Accuracy
def accuracy(actual_y: list, predicted_y: list) -> float:
"""
Calculate the value of accuracy based-on predictions
:param actual_y:a list containing initial Y values generated by 'y_generator'
function
:param predicted_y: a list containing predicted Y values generated by
'predict_y_values' function
:return: percentage of accuracy
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
... 1, 1 ,1 ,1 ,1 ,1 ,1]
>>> predicted_y = [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,
... 0, 0, 1, 1, 1, 0, 1, 1, 1]
>>> accuracy(actual_y, predicted_y)
50.0
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> predicted_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> accuracy(actual_y, predicted_y)
100.0
"""
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of correct predictions divided by number
# of all data and multiplied by 100
return (correct / len(actual_y)) * 100
num = TypeVar("num")
def valid_input(
input_type: Callable[[object], num], # Usually float or int
input_msg: str,
err_msg: str,
condition: Callable[[num], bool] = lambda _: True,
default: str | None = None,
) -> num:
"""
Ask for user value and validate that it fulfill a condition.
:input_type: user input expected type of value
:input_msg: message to show user in the screen
:err_msg: message to show in the screen in case of error
:condition: function that represents the condition that user input is valid.
:default: Default value in case the user does not type anything
:return: user's input
"""
while True:
try:
user_input = input_type(input(input_msg).strip() or default)
if condition(user_input):
return user_input
else:
print(f"{user_input}: {err_msg}")
continue
except ValueError:
print(
f"{user_input}: Incorrect input type, expected {input_type.__name__!r}"
)
# Main Function
def main():
"""This function starts execution phase"""
while True:
print(" Linear Discriminant Analysis ".center(50, "*"))
print("*" * 50, "\n")
print("First of all we should specify the number of classes that")
print("we want to generate as training dataset")
# Trying to get number of classes
n_classes = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg="Enter the number of classes (Data Groupings): ",
err_msg="Number of classes should be positive!",
)
print("-" * 100)
# Trying to get the value of standard deviation
std_dev = valid_input(
input_type=float,
condition=lambda x: x >= 0,
input_msg=(
"Enter the value of standard deviation"
"(Default value is 1.0 for all classes): "
),
err_msg="Standard deviation should not be negative!",
default="1.0",
)
print("-" * 100)
# Trying to get number of instances in classes and theirs means to generate
# dataset
counts = [] # An empty list to store instance counts of classes in dataset
for i in range(n_classes):
user_count = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg=(f"Enter The number of instances for class_{i + 1}: "),
err_msg="Number of instances should be positive!",
)
counts.append(user_count)
print("-" * 100)
# An empty list to store values of user-entered means of classes
user_means = []
for a in range(n_classes):
user_mean = valid_input(
input_type=float,
input_msg=(f"Enter the value of mean for class_{a + 1}: "),
err_msg="This is an invalid value.",
)
user_means.append(user_mean)
print("-" * 100)
print("Standard deviation: ", std_dev)
# print out the number of instances in classes in separated line
for i, count in enumerate(counts, 1):
print(f"Number of instances in class_{i} is: {count}")
print("-" * 100)
# print out mean values of classes separated line
for i, user_mean in enumerate(user_means, 1):
print(f"Mean of class_{i} is: {user_mean}")
print("-" * 100)
# Generating training dataset drawn from gaussian distribution
x = [
gaussian_distribution(user_means[j], std_dev, counts[j])
for j in range(n_classes)
]
print("Generated Normal Distribution: \n", x)
print("-" * 100)
# Generating Ys to detecting corresponding classes
y = y_generator(n_classes, counts)
print("Generated Corresponding Ys: \n", y)
print("-" * 100)
# Calculating the value of actual mean for each class
actual_means = [calculate_mean(counts[k], x[k]) for k in range(n_classes)]
# for loop iterates over number of elements in 'actual_means' list and print
# out them in separated line
for i, actual_mean in enumerate(actual_means, 1):
print(f"Actual(Real) mean of class_{i} is: {actual_mean}")
print("-" * 100)
# Calculating the value of probabilities for each class
probabilities = [
calculate_probabilities(counts[i], sum(counts)) for i in range(n_classes)
]
# for loop iterates over number of elements in 'probabilities' list and print
# out them in separated line
for i, probability in enumerate(probabilities, 1):
print(f"Probability of class_{i} is: {probability}")
print("-" * 100)
# Calculating the values of variance for each class
variance = calculate_variance(x, actual_means, sum(counts))
print("Variance: ", variance)
print("-" * 100)
# Predicting Y values
# storing predicted Y values in 'pre_indexes' variable
pre_indexes = predict_y_values(x, actual_means, variance, probabilities)
print("-" * 100)
# Calculating Accuracy of the model
print(f"Accuracy: {accuracy(y, pre_indexes)}")
print("-" * 100)
print(" DONE ".center(100, "+"))
if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
print("\n" + "GoodBye!".center(100, "-") + "\n")
break
system("cls" if name == "nt" else "clear") # noqa: S605
if __name__ == "__main__":
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/self_organizing_map.py | machine_learning/self_organizing_map.py | """
https://en.wikipedia.org/wiki/Self-organizing_map
"""
import math
class SelfOrganizingMap:
def get_winner(self, weights: list[list[float]], sample: list[int]) -> int:
"""
Compute the winning vector by Euclidean distance
>>> SelfOrganizingMap().get_winner([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
1
"""
d0 = 0.0
d1 = 0.0
for i in range(len(sample)):
d0 += math.pow((sample[i] - weights[0][i]), 2)
d1 += math.pow((sample[i] - weights[1][i]), 2)
return 0 if d0 > d1 else 1
return 0
def update(
self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float
) -> list[list[int | float]]:
"""
Update the winning vector.
>>> SelfOrganizingMap().update([[1, 2, 3], [4, 5, 6]], [1, 2, 3], 1, 0.1)
[[1, 2, 3], [3.7, 4.7, 6]]
"""
for i in range(len(weights)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
# Driver code
def main() -> None:
# Training Examples ( m, n )
training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
self_organizing_map = SelfOrganizingMap()
epochs = 3
alpha = 0.5
for _ in range(epochs):
for j in range(len(training_samples)):
# training sample
sample = training_samples[j]
# Compute the winning vector
winner = self_organizing_map.get_winner(weights, sample)
# Update the winning vector
weights = self_organizing_map.update(weights, sample, winner, alpha)
# classify test sample
sample = [0, 0, 0, 1]
winner = self_organizing_map.get_winner(weights, sample)
# results
print(f"Clusters that the test sample belongs to : {winner}")
print(f"Weights that have been trained : {weights}")
# running the main() function
if __name__ == "__main__":
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/k_means_clust.py | machine_learning/k_means_clust.py | """README, Author - Anurag Kumar(mailto:anuragkumarak95@gmail.com)
Requirements:
- sklearn
- numpy
- matplotlib
Python:
- 3.5
Inputs:
- X , a 2D numpy array of features.
- k , number of clusters to create.
- initial_centroids , initial centroid values generated by utility function(mentioned
in usage).
- maxiter , maximum number of iterations to process.
- heterogeneity , empty list that will be filled with heterogeneity values if passed
to kmeans func.
Usage:
1. define 'k' value, 'X' features array and 'heterogeneity' empty list
2. create initial_centroids,
initial_centroids = get_initial_centroids(
X,
k,
seed=0 # seed value for initial centroid generation,
# None for randomness(default=None)
)
3. find centroids and clusters using kmeans function.
centroids, cluster_assignment = kmeans(
X,
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True # whether to print logs in console or not.(default=False)
)
4. Plot the loss function and heterogeneity values for every iteration saved in
heterogeneity list.
plot_heterogeneity(
heterogeneity,
k
)
5. Plot the labeled 3D data points with centroids.
plot_kmeans(
X,
centroids,
cluster_assignment
)
6. Transfers Dataframe into excel format it must have feature called
'Clust' with k means clustering numbers in it.
"""
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import pairwise_distances
warnings.filterwarnings("ignore")
TAG = "K-MEANS-CLUST/ "
def get_initial_centroids(data, k, seed=None):
"""Randomly choose k data points as initial centroids"""
# useful for obtaining consistent results
rng = np.random.default_rng(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = rng.integers(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices, :]
return centroids
def centroid_pairwise_dist(x, centroids):
return pairwise_distances(x, centroids, metric="euclidean")
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = centroid_pairwise_dist(data, centroids)
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(
member_data_points, [centroids[i]], metric="euclidean"
)
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7, 4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel("# Iterations")
plt.ylabel("Heterogeneity")
plt.title(f"Heterogeneity of clustering over time, K={k:d}")
plt.rcParams.update({"font.size": 16})
plt.show()
def plot_kmeans(data, centroids, cluster_assignment):
ax = plt.axes(projection="3d")
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=cluster_assignment, cmap="viridis")
ax.scatter(
centroids[:, 0], centroids[:, 1], centroids[:, 2], c="red", s=100, marker="x"
)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_title("3D K-Means Clustering Visualization")
plt.show()
def kmeans(
data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False
):
"""Runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.(default=500)
record_heterogeneity: (optional) a list, to store the history of heterogeneity
as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in
each iteration"""
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in range(maxiter):
if verbose:
print(itr, end="")
# 1. Make cluster assignments using nearest centroids
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data
# points assigned to that cluster.
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if (
prev_cluster_assignment is not None
and (prev_cluster_assignment == cluster_assignment).all()
):
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if verbose:
print(
f" {num_changed:5d} elements changed their cluster assignment."
)
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
# Mock test below
if False: # change to true to run this test case.
from sklearn import datasets as ds
dataset = ds.load_iris()
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(dataset["data"], k, seed=0)
centroids, cluster_assignment = kmeans(
dataset["data"],
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True,
)
plot_heterogeneity(heterogeneity, k)
plot_kmeans(dataset["data"], centroids, cluster_assignment)
def report_generator(
predicted: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None
) -> pd.DataFrame:
"""
Generate a clustering report given these two arguments:
predicted - dataframe with predicted cluster column
fill_missing_report - dictionary of rules on how we are going to fill in missing
values for final generated report (not included in modelling);
>>> predicted = pd.DataFrame()
>>> predicted['numbers'] = [1, 2, 3]
>>> predicted['col1'] = [0.5, 2.5, 4.5]
>>> predicted['col2'] = [100, 200, 300]
>>> predicted['col3'] = [10, 20, 30]
>>> predicted['Cluster'] = [1, 1, 2]
>>> report_generator(predicted, ['col1', 'col2'], 0)
Features Type Mark 1 2
0 # of Customers ClusterSize False 2.000000 1.000000
1 % of Customers ClusterProportion False 0.666667 0.333333
2 col1 mean_with_zeros True 1.500000 4.500000
3 col2 mean_with_zeros True 150.000000 300.000000
4 numbers mean_with_zeros False 1.500000 3.000000
.. ... ... ... ... ...
99 dummy 5% False 1.000000 1.000000
100 dummy 95% False 1.000000 1.000000
101 dummy stdev False 0.000000 NaN
102 dummy mode False 1.000000 1.000000
103 dummy median False 1.000000 1.000000
<BLANKLINE>
[104 rows x 5 columns]
"""
# Fill missing values with given rules
if fill_missing_report:
predicted = predicted.fillna(value=fill_missing_report)
predicted["dummy"] = 1
numeric_cols = predicted.select_dtypes(np.number).columns
report = (
predicted.groupby(["Cluster"])[ # construct report dataframe
numeric_cols
] # group by cluster number
.agg(
[
("sum", "sum"),
("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))),
("mean_without_zeros", lambda x: x.replace(0, np.nan).mean()),
(
"mean_25-75",
lambda x: np.mean(
np.nan_to_num(
sorted(x)[
round(len(x) * 25 / 100) : round(len(x) * 75 / 100)
]
)
),
),
("mean_with_na", "mean"),
("min", lambda x: x.min()),
("5%", lambda x: x.quantile(0.05)),
("25%", lambda x: x.quantile(0.25)),
("50%", lambda x: x.quantile(0.50)),
("75%", lambda x: x.quantile(0.75)),
("95%", lambda x: x.quantile(0.95)),
("max", lambda x: x.max()),
("count", lambda x: x.count()),
("stdev", lambda x: x.std()),
("mode", lambda x: x.mode()[0]),
("median", lambda x: x.median()),
("# > 0", lambda x: (x > 0).sum()),
]
)
.T.reset_index()
.rename(index=str, columns={"level_0": "Features", "level_1": "Type"})
) # rename columns
# calculate the size of cluster(count of clientID's)
# avoid SettingWithCopyWarning
clustersize = report[
(report["Features"] == "dummy") & (report["Type"] == "count")
].copy()
# rename created predicted cluster to match report column names
clustersize.Type = "ClusterSize"
clustersize.Features = "# of Customers"
# calculating the proportion of cluster
clusterproportion = pd.DataFrame(
clustersize.iloc[:, 2:].to_numpy() / clustersize.iloc[:, 2:].to_numpy().sum()
)
# rename created predicted cluster to match report column names
clusterproportion["Type"] = "% of Customers"
clusterproportion["Features"] = "ClusterProportion"
cols = clusterproportion.columns.tolist()
cols = cols[-2:] + cols[:-2]
clusterproportion = clusterproportion[cols] # rearrange columns to match report
clusterproportion.columns = report.columns
# generating dataframe with count of nan values
a = pd.DataFrame(
abs(
report[report["Type"] == "count"].iloc[:, 2:].to_numpy()
- clustersize.iloc[:, 2:].to_numpy()
)
)
a["Features"] = 0
a["Type"] = "# of nan"
# filling values in order to match report
a.Features = report[report["Type"] == "count"].Features.tolist()
cols = a.columns.tolist()
cols = cols[-2:] + cols[:-2]
a = a[cols] # rearrange columns to match report
a.columns = report.columns # rename columns to match report
# drop count values except for cluster size
report = report.drop(report[report.Type == "count"].index)
# concat report with cluster size and nan values
report = pd.concat([report, a, clustersize, clusterproportion], axis=0)
report["Mark"] = report["Features"].isin(clustering_variables)
cols = report.columns.tolist()
cols = cols[0:2] + cols[-1:] + cols[2:-1]
report = report[cols]
sorter1 = {
"ClusterSize": 9,
"ClusterProportion": 8,
"mean_with_zeros": 7,
"mean_with_na": 6,
"max": 5,
"50%": 4,
"min": 3,
"25%": 2,
"75%": 1,
"# of nan": 0,
"# > 0": -1,
"sum_with_na": -2,
}
report = (
report.assign(
Sorter1=lambda x: x.Type.map(sorter1),
Sorter2=lambda x: list(reversed(range(len(x)))),
)
.sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False)
.drop(["Sorter1", "Sorter2"], axis=1)
)
report.columns.name = ""
report = report.reset_index()
report = report.drop(columns=["index"])
return report
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/gradient_boosting_classifier.py | machine_learning/gradient_boosting_classifier.py | import numpy as np
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
class GradientBoostingClassifier:
def __init__(self, n_estimators: int = 100, learning_rate: float = 0.1) -> None:
"""
Initialize a GradientBoostingClassifier.
Parameters:
- n_estimators (int): The number of weak learners to train.
- learning_rate (float): The learning rate for updating the model.
Attributes:
- n_estimators (int): The number of weak learners.
- learning_rate (float): The learning rate.
- models (list): A list to store the trained weak learners.
"""
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.models: list[tuple[DecisionTreeRegressor, float]] = []
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
"""
Fit the GradientBoostingClassifier to the training data.
Parameters:
- features (np.ndarray): The training features.
- target (np.ndarray): The target values.
Returns:
None
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
>>> iris = load_iris()
>>> X, y = iris.data, iris.target
>>> clf.fit(X, y)
>>> # Check if the model is trained
>>> len(clf.models) == 100
True
"""
for _ in range(self.n_estimators):
# Calculate the pseudo-residuals
residuals = -self.gradient(target, self.predict(features))
# Fit a weak learner (e.g., decision tree) to the residuals
model = DecisionTreeRegressor(max_depth=1)
model.fit(features, residuals)
# Update the model by adding the weak learner with a learning rate
self.models.append((model, self.learning_rate))
def predict(self, features: np.ndarray) -> np.ndarray:
"""
Make predictions on input data.
Parameters:
- features (np.ndarray): The input data for making predictions.
Returns:
- np.ndarray: An array of binary predictions (-1 or 1).
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
>>> iris = load_iris()
>>> X, y = iris.data, iris.target
>>> clf.fit(X, y)
>>> y_pred = clf.predict(X)
>>> # Check if the predictions have the correct shape
>>> y_pred.shape == y.shape
True
"""
# Initialize predictions with zeros
predictions = np.zeros(features.shape[0])
for model, learning_rate in self.models:
predictions += learning_rate * model.predict(features)
return np.sign(predictions) # Convert to binary predictions (-1 or 1)
def gradient(self, target: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""
Calculate the negative gradient (pseudo-residuals) for logistic loss.
Parameters:
- target (np.ndarray): The target values.
- y_pred (np.ndarray): The predicted values.
Returns:
- np.ndarray: An array of pseudo-residuals.
>>> import numpy as np
>>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
>>> target = np.array([0, 1, 0, 1])
>>> y_pred = np.array([0.2, 0.8, 0.3, 0.7])
>>> residuals = clf.gradient(target, y_pred)
>>> # Check if residuals have the correct shape
>>> residuals.shape == target.shape
True
"""
return -target / (1 + np.exp(target * y_pred))
if __name__ == "__main__":
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/scoring_functions.py | machine_learning/scoring_functions.py | import numpy as np
""" Here I implemented the scoring functions.
MAE, MSE, RMSE, RMSLE are included.
Those are used for calculating differences between
predicted values and actual values.
Metrics are slightly differentiated. Sometimes squared, rooted,
even log is used.
Using log and roots can be perceived as tools for penalizing big
errors. However, using appropriate metrics depends on the situations,
and types of data
"""
# Mean Absolute Error
def mae(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> float(np.around(mae(predict,actual),decimals = 2))
0.67
>>> actual = [1,1,1];predict = [1,1,1]
>>> float(mae(predict,actual))
0.0
"""
predict = np.array(predict)
actual = np.array(actual)
difference = abs(predict - actual)
score = difference.mean()
return score
# Mean Squared Error
def mse(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> float(np.around(mse(predict,actual),decimals = 2))
1.33
>>> actual = [1,1,1];predict = [1,1,1]
>>> float(mse(predict,actual))
0.0
"""
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
score = square_diff.mean()
return score
# Root Mean Squared Error
def rmse(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> float(np.around(rmse(predict,actual),decimals = 2))
1.15
>>> actual = [1,1,1];predict = [1,1,1]
>>> float(rmse(predict,actual))
0.0
"""
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
mean_square_diff = square_diff.mean()
score = np.sqrt(mean_square_diff)
return score
# Root Mean Square Logarithmic Error
def rmsle(predict, actual):
"""
Examples(rounded for precision):
>>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2))
0.75
>>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1]))
0.0
"""
predict = np.array(predict)
actual = np.array(actual)
log_predict = np.log(predict + 1)
log_actual = np.log(actual + 1)
difference = log_predict - log_actual
square_diff = np.square(difference)
mean_square_diff = square_diff.mean()
score = np.sqrt(mean_square_diff)
return score
# Mean Bias Deviation
def mbd(predict, actual):
"""
This value is Negative, if the model underpredicts,
positive, if it overpredicts.
Example(rounded for precision):
Here the model overpredicts
>>> actual = [1,2,3];predict = [2,3,4]
>>> float(np.around(mbd(predict,actual),decimals = 2))
50.0
Here the model underpredicts
>>> actual = [1,2,3];predict = [0,1,1]
>>> float(np.around(mbd(predict,actual),decimals = 2))
-66.67
"""
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
numerator = np.sum(difference) / len(predict)
denumerator = np.sum(actual) / len(predict)
# print(numerator, denumerator)
score = float(numerator) / denumerator * 100
return score
def manual_accuracy(predict, actual):
return np.mean(np.array(actual) == np.array(predict))
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/gradient_descent.py | machine_learning/gradient_descent.py | """
Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis
function.
"""
import numpy as np
# List of input, output pairs
train_data = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
test_data = (((515, 22, 13), 555), ((61, 35, 49), 150))
parameter_vector = [2, 4, 1, 5]
m = len(train_data)
LEARNING_RATE = 0.009
def _error(example_no, data_set="train"):
"""
:param data_set: train data or test data
:param example_no: example number whose error has to be checked
:return: error in example pointed by example number.
"""
return calculate_hypothesis_value(example_no, data_set) - output(
example_no, data_set
)
def _hypothesis_value(data_input_tuple):
"""
Calculates hypothesis function value for a given input
:param data_input_tuple: Input tuple of a particular example
:return: Value of hypothesis function at that point.
Note that there is an 'biased input' whose value is fixed as 1.
It is not explicitly mentioned in input data.. But, ML hypothesis functions use it.
So, we have to take care of it separately. Line 36 takes care of it.
"""
hyp_val = 0
for i in range(len(parameter_vector) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def output(example_no, data_set):
"""
:param data_set: test data or train data
:param example_no: example whose output is to be fetched
:return: output for that example
"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def calculate_hypothesis_value(example_no, data_set):
"""
Calculates hypothesis value for a given example
:param data_set: test data or train_data
:param example_no: example whose hypothesis value is to be calculated
:return: hypothesis value for that example
"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None
def summation_of_cost_derivative(index, end=m):
"""
Calculates the sum of cost function derivative
:param index: index wrt derivative is being calculated
:param end: value where summation ends, default is m, number of examples
:return: Returns the summation of cost derivative
Note: If index is -1, this means we are calculating summation wrt to biased
parameter.
"""
summation_value = 0
for i in range(end):
if index == -1:
summation_value += _error(i)
else:
summation_value += _error(i) * train_data[i][0][index]
return summation_value
def get_cost_derivative(index):
"""
:param index: index of the parameter vector wrt to derivative is to be calculated
:return: derivative wrt to that index
Note: If index is -1, this means we are calculating summation wrt to biased
parameter.
"""
cost_derivative_value = summation_of_cost_derivative(index, m) / m
return cost_derivative_value
def run_gradient_descent():
global parameter_vector
# Tune these values to set a tolerance value for predicted output
absolute_error_limit = 0.000002
relative_error_limit = 0
j = 0
while True:
j += 1
temp_parameter_vector = [0, 0, 0, 0]
for i in range(len(parameter_vector)):
cost_derivative = get_cost_derivative(i - 1)
temp_parameter_vector[i] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if np.allclose(
parameter_vector,
temp_parameter_vector,
atol=absolute_error_limit,
rtol=relative_error_limit,
):
break
parameter_vector = temp_parameter_vector
print(("Number of iterations:", j))
def test_gradient_descent():
for i in range(len(test_data)):
print(("Actual output value:", output(i, "test")))
print(("Hypothesis output:", calculate_hypothesis_value(i, "test")))
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/decision_tree.py | machine_learning/decision_tree.py | """
Implementation of a basic regression decision tree.
Input data set: The input data set must be 1-dimensional with continuous labels.
Output: The decision tree maps a real number input to a real number output.
"""
import numpy as np
class DecisionTree:
def __init__(self, depth=5, min_leaf_size=5):
self.depth = depth
self.decision_boundary = 0
self.left = None
self.right = None
self.min_leaf_size = min_leaf_size
self.prediction = None
def mean_squared_error(self, labels, prediction):
"""
mean_squared_error:
@param labels: a one-dimensional numpy array
@param prediction: a floating point value
return value: mean_squared_error calculates the error if prediction is used to
estimate the labels
>>> tester = DecisionTree()
>>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
>>> test_prediction = float(6)
>>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction)))
True
>>> test_labels = np.array([1,2,3])
>>> test_prediction = float(2)
>>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction)))
True
"""
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
return np.mean((labels - prediction) ** 2)
def train(self, x, y):
"""
train:
@param x: a one-dimensional numpy array
@param y: a one-dimensional numpy array.
The contents of y are the labels for the corresponding X values
train() does not have a return value
Examples:
1. Try to train when x & y are of same length & 1 dimensions (No errors)
>>> dt = DecisionTree()
>>> dt.train(np.array([10,20,30,40,50]),np.array([0,0,0,1,1]))
2. Try to train when x is 2 dimensions
>>> dt = DecisionTree()
>>> dt.train(np.array([[1,2,3,4,5],[1,2,3,4,5]]),np.array([0,0,0,1,1]))
Traceback (most recent call last):
...
ValueError: Input data set must be one-dimensional
3. Try to train when x and y are not of the same length
>>> dt = DecisionTree()
>>> dt.train(np.array([1,2,3,4,5]),np.array([[0,0,0,1,1],[0,0,0,1,1]]))
Traceback (most recent call last):
...
ValueError: x and y have different lengths
4. Try to train when x & y are of the same length but different dimensions
>>> dt = DecisionTree()
>>> dt.train(np.array([1,2,3,4,5]),np.array([[1],[2],[3],[4],[5]]))
Traceback (most recent call last):
...
ValueError: Data set labels must be one-dimensional
This section is to check that the inputs conform to our dimensionality
constraints
"""
if x.ndim != 1:
raise ValueError("Input data set must be one-dimensional")
if len(x) != len(y):
raise ValueError("x and y have different lengths")
if y.ndim != 1:
raise ValueError("Data set labels must be one-dimensional")
if len(x) < 2 * self.min_leaf_size:
self.prediction = np.mean(y)
return
if self.depth == 1:
self.prediction = np.mean(y)
return
best_split = 0
min_error = self.mean_squared_error(x, np.mean(y)) * 2
"""
loop over all possible splits for the decision tree. find the best split.
if no split exists that is less than 2 * error for the entire array
then the data set is not split and the average for the entire array is used as
the predictor
"""
for i in range(len(x)):
if len(x[:i]) < self.min_leaf_size: # noqa: SIM114
continue
elif len(x[i:]) < self.min_leaf_size:
continue
else:
error_left = self.mean_squared_error(x[:i], np.mean(y[:i]))
error_right = self.mean_squared_error(x[i:], np.mean(y[i:]))
error = error_left + error_right
if error < min_error:
best_split = i
min_error = error
if best_split != 0:
left_x = x[:best_split]
left_y = y[:best_split]
right_x = x[best_split:]
right_y = y[best_split:]
self.decision_boundary = x[best_split]
self.left = DecisionTree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
self.right = DecisionTree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
self.left.train(left_x, left_y)
self.right.train(right_x, right_y)
else:
self.prediction = np.mean(y)
return
def predict(self, x):
"""
predict:
@param x: a floating point value to predict the label of
the prediction function works by recursively calling the predict function
of the appropriate subtrees based on the tree's decision boundary
"""
if self.prediction is not None:
return self.prediction
elif self.left is not None and self.right is not None:
if x >= self.decision_boundary:
return self.right.predict(x)
else:
return self.left.predict(x)
else:
raise ValueError("Decision tree not yet trained")
class TestDecisionTree:
"""Decision Tres test class"""
@staticmethod
def helper_mean_squared_error_test(labels, prediction):
"""
helper_mean_squared_error_test:
@param labels: a one dimensional numpy array
@param prediction: a floating point value
return value: helper_mean_squared_error_test calculates the mean squared error
"""
squared_error_sum = float(0)
for label in labels:
squared_error_sum += (label - prediction) ** 2
return float(squared_error_sum / labels.size)
def main():
"""
In this demonstration we're generating a sample data set from the sin function in
numpy. We then train a decision tree on the data set and use the decision tree to
predict the label of 10 different test values. Then the mean squared error over
this test is displayed.
"""
x = np.arange(-1.0, 1.0, 0.005)
y = np.sin(x)
tree = DecisionTree(depth=10, min_leaf_size=10)
tree.train(x, y)
rng = np.random.default_rng()
test_cases = (rng.random(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
avg_error = np.mean((predictions - test_cases) ** 2)
print("Test values: " + str(test_cases))
print("Predictions: " + str(predictions))
print("Average error: " + str(avg_error))
if __name__ == "__main__":
main()
import doctest
doctest.testmod(name="mean_squared_error", verbose=True)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/sequential_minimum_optimization.py | machine_learning/sequential_minimum_optimization.py | """
Sequential minimal optimization (SMO) for support vector machines (SVM)
Sequential minimal optimization (SMO) is an algorithm for solving the quadratic
programming (QP) problem that arises during the training of SVMs. It was invented by
John Platt in 1998.
Input:
0: type: numpy.ndarray.
1: first column of ndarray must be tags of samples, must be 1 or -1.
2: rows of ndarray represent samples.
Usage:
Command:
python3 sequential_minimum_optimization.py
Code:
from sequential_minimum_optimization import SmoSVM, Kernel
kernel = Kernel(kernel='poly', degree=3., coef0=1., gamma=0.5)
init_alphas = np.zeros(train.shape[0])
SVM = SmoSVM(train=train, alpha_list=init_alphas, kernel_func=kernel, cost=0.4,
b=0.0, tolerance=0.001)
SVM.fit()
predict = SVM.predict(test_samples)
Reference:
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/smo-book.pdf
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-98-14.pdf
"""
import os
import sys
import urllib.request
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.datasets import make_blobs, make_circles
from sklearn.preprocessing import StandardScaler
CANCER_DATASET_URL = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/"
"breast-cancer-wisconsin/wdbc.data"
)
class SmoSVM:
def __init__(
self,
train,
kernel_func,
alpha_list=None,
cost=0.4,
b=0.0,
tolerance=0.001,
auto_norm=True,
):
self._init = True
self._auto_norm = auto_norm
self._c = np.float64(cost)
self._b = np.float64(b)
self._tol = np.float64(tolerance) if tolerance > 0.0001 else np.float64(0.001)
self.tags = train[:, 0]
self.samples = self._norm(train[:, 1:]) if self._auto_norm else train[:, 1:]
self.alphas = alpha_list if alpha_list is not None else np.zeros(train.shape[0])
self.Kernel = kernel_func
self._eps = 0.001
self._all_samples = list(range(self.length))
self._K_matrix = self._calculate_k_matrix()
self._error = np.zeros(self.length)
self._unbound = []
self.choose_alpha = self._choose_alphas()
# Calculate alphas using SMO algorithm
def fit(self):
k = self._k
state = None
while True:
# 1: Find alpha1, alpha2
try:
i1, i2 = self.choose_alpha.send(state)
state = None
except StopIteration:
print("Optimization done!\nEvery sample satisfy the KKT condition!")
break
# 2: calculate new alpha2 and new alpha1
y1, y2 = self.tags[i1], self.tags[i2]
a1, a2 = self.alphas[i1].copy(), self.alphas[i2].copy()
e1, e2 = self._e(i1), self._e(i2)
args = (i1, i2, a1, a2, e1, e2, y1, y2)
a1_new, a2_new = self._get_new_alpha(*args)
if not a1_new and not a2_new:
state = False
continue
self.alphas[i1], self.alphas[i2] = a1_new, a2_new
# 3: update threshold(b)
b1_new = np.float64(
-e1
- y1 * k(i1, i1) * (a1_new - a1)
- y2 * k(i2, i1) * (a2_new - a2)
+ self._b
)
b2_new = np.float64(
-e2
- y2 * k(i2, i2) * (a2_new - a2)
- y1 * k(i1, i2) * (a1_new - a1)
+ self._b
)
if 0.0 < a1_new < self._c:
b = b1_new
if 0.0 < a2_new < self._c:
b = b2_new
if not (np.float64(0) < a2_new < self._c) and not (
np.float64(0) < a1_new < self._c
):
b = (b1_new + b2_new) / 2.0
b_old = self._b
self._b = b
# 4: update error, here we only calculate the error for non-bound samples
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
for s in self.unbound:
if s in (i1, i2):
continue
self._error[s] += (
y1 * (a1_new - a1) * k(i1, s)
+ y2 * (a2_new - a2) * k(i2, s)
+ (self._b - b_old)
)
# if i1 or i2 is non-bound, update their error value to zero
if self._is_unbound(i1):
self._error[i1] = 0
if self._is_unbound(i2):
self._error[i2] = 0
# Predict test samples
def predict(self, test_samples, classify=True):
if test_samples.shape[1] > self.samples.shape[1]:
raise ValueError(
"Test samples' feature length does not equal to that of train samples"
)
if self._auto_norm:
test_samples = self._norm(test_samples)
results = []
for test_sample in test_samples:
result = self._predict(test_sample)
if classify:
results.append(1 if result > 0 else -1)
else:
results.append(result)
return np.array(results)
# Check if alpha violates the KKT condition
def _check_obey_kkt(self, index):
alphas = self.alphas
tol = self._tol
r = self._e(index) * self.tags[index]
c = self._c
return (r < -tol and alphas[index] < c) or (r > tol and alphas[index] > 0.0)
# Get value calculated from kernel function
def _k(self, i1, i2):
# for test samples, use kernel function
if isinstance(i2, np.ndarray):
return self.Kernel(self.samples[i1], i2)
# for training samples, kernel values have been saved in matrix
else:
return self._K_matrix[i1, i2]
# Get error for sample
def _e(self, index):
"""
Two cases:
1: Sample[index] is non-bound, fetch error from list: _error
2: sample[index] is bound, use predicted value minus true value: g(xi) - yi
"""
# get from error data
if self._is_unbound(index):
return self._error[index]
# get by g(xi) - yi
else:
gx = np.dot(self.alphas * self.tags, self._K_matrix[:, index]) + self._b
yi = self.tags[index]
return gx - yi
# Calculate kernel matrix of all possible i1, i2, saving time
def _calculate_k_matrix(self):
k_matrix = np.zeros([self.length, self.length])
for i in self._all_samples:
for j in self._all_samples:
k_matrix[i, j] = np.float64(
self.Kernel(self.samples[i, :], self.samples[j, :])
)
return k_matrix
# Predict tag for test sample
def _predict(self, sample):
k = self._k
predicted_value = (
np.sum(
[
self.alphas[i1] * self.tags[i1] * k(i1, sample)
for i1 in self._all_samples
]
)
+ self._b
)
return predicted_value
# Choose alpha1 and alpha2
def _choose_alphas(self):
loci = yield from self._choose_a1()
if not loci:
return None
return loci
def _choose_a1(self):
"""
Choose first alpha
Steps:
1: First loop over all samples
2: Second loop over all non-bound samples until no non-bound samples violate
the KKT condition.
3: Repeat these two processes until no samples violate the KKT condition
after the first loop.
"""
while True:
all_not_obey = True
# all sample
print("Scanning all samples!")
for i1 in [i for i in self._all_samples if self._check_obey_kkt(i)]:
all_not_obey = False
yield from self._choose_a2(i1)
# non-bound sample
print("Scanning non-bound samples!")
while True:
not_obey = True
for i1 in [
i
for i in self._all_samples
if self._check_obey_kkt(i) and self._is_unbound(i)
]:
not_obey = False
yield from self._choose_a2(i1)
if not_obey:
print("All non-bound samples satisfy the KKT condition!")
break
if all_not_obey:
print("All samples satisfy the KKT condition!")
break
return False
def _choose_a2(self, i1):
"""
Choose the second alpha using a heuristic algorithm
Steps:
1: Choose alpha2 that maximizes the step size (|E1 - E2|).
2: Start in a random point, loop over all non-bound samples till alpha1 and
alpha2 are optimized.
3: Start in a random point, loop over all samples till alpha1 and alpha2 are
optimized.
"""
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
if len(self.unbound) > 0:
tmp_error = self._error.copy().tolist()
tmp_error_dict = {
index: value
for index, value in enumerate(tmp_error)
if self._is_unbound(index)
}
if self._e(i1) >= 0:
i2 = min(tmp_error_dict, key=lambda index: tmp_error_dict[index])
else:
i2 = max(tmp_error_dict, key=lambda index: tmp_error_dict[index])
cmd = yield i1, i2
if cmd is None:
return
rng = np.random.default_rng()
for i2 in np.roll(self.unbound, rng.choice(self.length)):
cmd = yield i1, i2
if cmd is None:
return
for i2 in np.roll(self._all_samples, rng.choice(self.length)):
cmd = yield i1, i2
if cmd is None:
return
# Get the new alpha2 and new alpha1
def _get_new_alpha(self, i1, i2, a1, a2, e1, e2, y1, y2):
k = self._k
if i1 == i2:
return None, None
# calculate L and H which bound the new alpha2
s = y1 * y2
if s == -1:
l, h = max(0.0, a2 - a1), min(self._c, self._c + a2 - a1) # noqa: E741
else:
l, h = max(0.0, a2 + a1 - self._c), min(self._c, a2 + a1) # noqa: E741
if l == h:
return None, None
# calculate eta
k11 = k(i1, i1)
k22 = k(i2, i2)
k12 = k(i1, i2)
# select the new alpha2 which could achieve the minimal objectives
if (eta := k11 + k22 - 2.0 * k12) > 0.0:
a2_new_unc = a2 + (y2 * (e1 - e2)) / eta
# a2_new has a boundary
if a2_new_unc >= h:
a2_new = h
elif a2_new_unc <= l:
a2_new = l
else:
a2_new = a2_new_unc
else:
b = self._b
l1 = a1 + s * (a2 - l)
h1 = a1 + s * (a2 - h)
# Method 1
f1 = y1 * (e1 + b) - a1 * k(i1, i1) - s * a2 * k(i1, i2)
f2 = y2 * (e2 + b) - a2 * k(i2, i2) - s * a1 * k(i1, i2)
ol = (
l1 * f1
+ l * f2
+ 1 / 2 * l1**2 * k(i1, i1)
+ 1 / 2 * l**2 * k(i2, i2)
+ s * l * l1 * k(i1, i2)
)
oh = (
h1 * f1
+ h * f2
+ 1 / 2 * h1**2 * k(i1, i1)
+ 1 / 2 * h**2 * k(i2, i2)
+ s * h * h1 * k(i1, i2)
)
"""
Method 2: Use objective function to check which alpha2_new could achieve the
minimal objectives
"""
if ol < (oh - self._eps):
a2_new = l
elif ol > oh + self._eps:
a2_new = h
else:
a2_new = a2
# a1_new has a boundary too
a1_new = a1 + s * (a2 - a2_new)
if a1_new < 0:
a2_new += s * a1_new
a1_new = 0
if a1_new > self._c:
a2_new += s * (a1_new - self._c)
a1_new = self._c
return a1_new, a2_new
# Normalize data using min-max method
def _norm(self, data):
if self._init:
self._min = np.min(data, axis=0)
self._max = np.max(data, axis=0)
self._init = False
return (data - self._min) / (self._max - self._min)
else:
return (data - self._min) / (self._max - self._min)
def _is_unbound(self, index):
return bool(0.0 < self.alphas[index] < self._c)
def _is_support(self, index):
return bool(self.alphas[index] > 0)
@property
def unbound(self):
return self._unbound
@property
def support(self):
return [i for i in range(self.length) if self._is_support(i)]
@property
def length(self):
return self.samples.shape[0]
class Kernel:
def __init__(self, kernel, degree=1.0, coef0=0.0, gamma=1.0):
self.degree = np.float64(degree)
self.coef0 = np.float64(coef0)
self.gamma = np.float64(gamma)
self._kernel_name = kernel
self._kernel = self._get_kernel(kernel_name=kernel)
self._check()
def _polynomial(self, v1, v2):
return (self.gamma * np.inner(v1, v2) + self.coef0) ** self.degree
def _linear(self, v1, v2):
return np.inner(v1, v2) + self.coef0
def _rbf(self, v1, v2):
return np.exp(-1 * (self.gamma * np.linalg.norm(v1 - v2) ** 2))
def _check(self):
if self._kernel == self._rbf and self.gamma < 0:
raise ValueError("gamma value must be non-negative")
def _get_kernel(self, kernel_name):
maps = {"linear": self._linear, "poly": self._polynomial, "rbf": self._rbf}
return maps[kernel_name]
def __call__(self, v1, v2):
return self._kernel(v1, v2)
def __repr__(self):
return self._kernel_name
def count_time(func):
def call_func(*args, **kwargs):
import time
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
print(f"SMO algorithm cost {end_time - start_time} seconds")
return call_func
@count_time
def test_cancer_data():
print("Hello!\nStart test SVM using the SMO algorithm!")
# 0: download dataset and load into pandas' dataframe
if not os.path.exists(r"cancer_data.csv"):
request = urllib.request.Request( # noqa: S310
CANCER_DATASET_URL,
headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"},
)
response = urllib.request.urlopen(request) # noqa: S310
content = response.read().decode("utf-8")
with open(r"cancer_data.csv", "w") as f:
f.write(content)
data = pd.read_csv(
"cancer_data.csv",
header=None,
dtype={0: str}, # Assuming the first column contains string data
)
# 1: pre-processing data
del data[data.columns.tolist()[0]]
data = data.dropna(axis=0)
data = data.replace({"M": np.float64(1), "B": np.float64(-1)})
samples = np.array(data)[:, :]
# 2: dividing data into train_data data and test_data data
train_data, test_data = samples[:328, :], samples[328:, :]
test_tags, test_samples = test_data[:, 0], test_data[:, 1:]
# 3: choose kernel function, and set initial alphas to zero (optional)
my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
al = np.zeros(train_data.shape[0])
# 4: calculating best alphas using SMO algorithm and predict test_data samples
mysvm = SmoSVM(
train=train_data,
kernel_func=my_kernel,
alpha_list=al,
cost=0.4,
b=0.0,
tolerance=0.001,
)
mysvm.fit()
predict = mysvm.predict(test_samples)
# 5: check accuracy
score = 0
test_num = test_tags.shape[0]
for i in range(test_tags.shape[0]):
if test_tags[i] == predict[i]:
score += 1
print(f"\nAll: {test_num}\nCorrect: {score}\nIncorrect: {test_num - score}")
print(f"Rough Accuracy: {score / test_tags.shape[0]}")
def test_demonstration():
# change stdout
print("\nStarting plot, please wait!")
sys.stdout = open(os.devnull, "w")
ax1 = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (0, 1))
ax3 = plt.subplot2grid((2, 2), (1, 0))
ax4 = plt.subplot2grid((2, 2), (1, 1))
ax1.set_title("Linear SVM, cost = 0.1")
test_linear_kernel(ax1, cost=0.1)
ax2.set_title("Linear SVM, cost = 500")
test_linear_kernel(ax2, cost=500)
ax3.set_title("RBF kernel SVM, cost = 0.1")
test_rbf_kernel(ax3, cost=0.1)
ax4.set_title("RBF kernel SVM, cost = 500")
test_rbf_kernel(ax4, cost=500)
sys.stdout = sys.__stdout__
print("Plot done!")
def test_linear_kernel(ax, cost):
train_x, train_y = make_blobs(
n_samples=500, centers=2, n_features=2, random_state=1
)
train_y[train_y == 0] = -1
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x, train_y)
train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
my_kernel = Kernel(kernel="linear", degree=5, coef0=1, gamma=0.5)
mysvm = SmoSVM(
train=train_data,
kernel_func=my_kernel,
cost=cost,
tolerance=0.001,
auto_norm=False,
)
mysvm.fit()
plot_partition_boundary(mysvm, train_data, ax=ax)
def test_rbf_kernel(ax, cost):
train_x, train_y = make_circles(
n_samples=500, noise=0.1, factor=0.1, random_state=1
)
train_y[train_y == 0] = -1
scaler = StandardScaler()
train_x_scaled = scaler.fit_transform(train_x, train_y)
train_data = np.hstack((train_y.reshape(500, 1), train_x_scaled))
my_kernel = Kernel(kernel="rbf", degree=5, coef0=1, gamma=0.5)
mysvm = SmoSVM(
train=train_data,
kernel_func=my_kernel,
cost=cost,
tolerance=0.001,
auto_norm=False,
)
mysvm.fit()
plot_partition_boundary(mysvm, train_data, ax=ax)
def plot_partition_boundary(
model, train_data, ax, resolution=100, colors=("b", "k", "r")
):
"""
We cannot get the optimal w of our kernel SVM model, which is different from a
linear SVM. For this reason, we generate randomly distributed points with high
density, and predicted values of these points are calculated using our trained
model. Then we could use this predicted values to draw contour map, and this contour
map represents the SVM's partition boundary.
"""
train_data_x = train_data[:, 1]
train_data_y = train_data[:, 2]
train_data_tags = train_data[:, 0]
xrange = np.linspace(train_data_x.min(), train_data_x.max(), resolution)
yrange = np.linspace(train_data_y.min(), train_data_y.max(), resolution)
test_samples = np.array([(x, y) for x in xrange for y in yrange]).reshape(
resolution * resolution, 2
)
test_tags = model.predict(test_samples, classify=False)
grid = test_tags.reshape((len(xrange), len(yrange)))
# Plot contour map which represents the partition boundary
ax.contour(
xrange,
yrange,
np.asmatrix(grid).T,
levels=(-1, 0, 1),
linestyles=("--", "-", "--"),
linewidths=(1, 1, 1),
colors=colors,
)
# Plot all train samples
ax.scatter(
train_data_x,
train_data_y,
c=train_data_tags,
cmap=plt.cm.Dark2,
lw=0,
alpha=0.5,
)
# Plot support vectors
support = model.support
ax.scatter(
train_data_x[support],
train_data_y[support],
c=train_data_tags[support],
cmap=plt.cm.Dark2,
)
if __name__ == "__main__":
test_cancer_data()
test_demonstration()
plt.show()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/xgboost_classifier.py | machine_learning/xgboost_classifier.py | # XGBoost Classifier Example
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def data_handling(data: dict) -> tuple:
# Split dataset into features and target
# data is features
"""
>>> data_handling(({'data':'[5.1, 3.5, 1.4, 0.2]','target':([0])}))
('[5.1, 3.5, 1.4, 0.2]', [0])
>>> data_handling(
... {'data': '[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', 'target': ([0, 0])}
... )
('[4.9, 3.0, 1.4, 0.2], [4.7, 3.2, 1.3, 0.2]', [0, 0])
"""
return (data["data"], data["target"])
def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier:
"""
# THIS TEST IS BROKEN!! >>> xgboost(np.array([[5.1, 3.6, 1.4, 0.2]]), np.array([0]))
XGBClassifier(base_score=0.5, booster='gbtree', callbacks=None,
colsample_bylevel=1, colsample_bynode=1, colsample_bytree=1,
early_stopping_rounds=None, enable_categorical=False,
eval_metric=None, gamma=0, gpu_id=-1, grow_policy='depthwise',
importance_type=None, interaction_constraints='',
learning_rate=0.300000012, max_bin=256, max_cat_to_onehot=4,
max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1,
missing=nan, monotone_constraints='()', n_estimators=100,
n_jobs=0, num_parallel_tree=1, predictor='auto', random_state=0,
reg_alpha=0, reg_lambda=1, ...)
"""
classifier = XGBClassifier()
classifier.fit(features, target)
return classifier
def main() -> None:
"""
Url for the algorithm:
https://xgboost.readthedocs.io/en/stable/
Iris type dataset is used to demonstrate algorithm.
"""
# Load Iris dataset
iris = load_iris()
features, targets = data_handling(iris)
x_train, x_test, y_train, y_test = train_test_split(
features, targets, test_size=0.25
)
names = iris["target_names"]
# Create an XGBoost Classifier from the training data
xgboost_classifier = xgboost(x_train, y_train)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
xgboost_classifier,
x_test,
y_test,
display_labels=names,
cmap="Blues",
normalize="true",
)
plt.title("Normalized Confusion Matrix - IRIS Dataset")
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/apriori_algorithm.py | machine_learning/apriori_algorithm.py | """
Apriori Algorithm is a Association rule mining technique, also known as market basket
analysis, aims to discover interesting relationships or associations among a set of
items in a transactional or relational database.
For example, Apriori Algorithm states: "If a customer buys item A and item B, then they
are likely to buy item C." This rule suggests a relationship between items A, B, and C,
indicating that customers who purchased A and B are more likely to also purchase item C.
WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm
Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining
"""
from collections import Counter
from itertools import combinations
def load_data() -> list[list[str]]:
"""
Returns a sample transaction dataset.
>>> load_data()
[['milk'], ['milk', 'butter'], ['milk', 'bread'], ['milk', 'bread', 'chips']]
"""
return [["milk"], ["milk", "butter"], ["milk", "bread"], ["milk", "bread", "chips"]]
def prune(itemset: list, candidates: list, length: int) -> list:
"""
Prune candidate itemsets that are not frequent.
The goal of pruning is to filter out candidate itemsets that are not frequent. This
is done by checking if all the (k-1) subsets of a candidate itemset are present in
the frequent itemsets of the previous iteration (valid subsequences of the frequent
itemsets from the previous iteration).
Prunes candidate itemsets that are not frequent.
>>> itemset = ['X', 'Y', 'Z']
>>> candidates = [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']]
>>> prune(itemset, candidates, 2)
[['X', 'Y'], ['X', 'Z'], ['Y', 'Z']]
>>> itemset = ['1', '2', '3', '4']
>>> candidates = ['1', '2', '4']
>>> prune(itemset, candidates, 3)
[]
"""
itemset_counter = Counter(tuple(item) for item in itemset)
pruned = []
for candidate in candidates:
is_subsequence = True
for item in candidate:
item_tuple = tuple(item)
if (
item_tuple not in itemset_counter
or itemset_counter[item_tuple] < length - 1
):
is_subsequence = False
break
if is_subsequence:
pruned.append(candidate)
return pruned
def apriori(data: list[list[str]], min_support: int) -> list[tuple[list[str], int]]:
"""
Returns a list of frequent itemsets and their support counts.
>>> data = [['A', 'B', 'C'], ['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C']]
>>> apriori(data, 2)
[(['A', 'B'], 1), (['A', 'C'], 2), (['B', 'C'], 2)]
>>> data = [['1', '2', '3'], ['1', '2'], ['1', '3'], ['1', '4'], ['2', '3']]
>>> apriori(data, 3)
[]
"""
itemset = [list(transaction) for transaction in data]
frequent_itemsets = []
length = 1
while itemset:
# Count itemset support
counts = [0] * len(itemset)
for transaction in data:
for j, candidate in enumerate(itemset):
if all(item in transaction for item in candidate):
counts[j] += 1
# Prune infrequent itemsets
itemset = [item for i, item in enumerate(itemset) if counts[i] >= min_support]
# Append frequent itemsets (as a list to maintain order)
for i, item in enumerate(itemset):
frequent_itemsets.append((sorted(item), counts[i]))
length += 1
itemset = prune(itemset, list(combinations(itemset, length)), length)
return frequent_itemsets
if __name__ == "__main__":
"""
Apriori algorithm for finding frequent itemsets.
Args:
data: A list of transactions, where each transaction is a list of items.
min_support: The minimum support threshold for frequent itemsets.
Returns:
A list of frequent itemsets along with their support counts.
"""
import doctest
doctest.testmod()
# user-defined threshold or minimum support level
frequent_itemsets = apriori(data=load_data(), min_support=2)
print("\n".join(f"{itemset}: {support}" for itemset, support in frequent_itemsets))
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/__init__.py | machine_learning/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/principle_component_analysis.py | machine_learning/principle_component_analysis.py | """
Principal Component Analysis (PCA) is a dimensionality reduction technique
used in machine learning. It transforms high-dimensional data into a lower-dimensional
representation while retaining as much variance as possible.
This implementation follows best practices, including:
- Standardizing the dataset.
- Computing principal components using Singular Value Decomposition (SVD).
- Returning transformed data and explained variance ratio.
"""
import doctest
import numpy as np
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def collect_dataset() -> tuple[np.ndarray, np.ndarray]:
"""
Collects the dataset (Iris dataset) and returns feature matrix and target values.
:return: Tuple containing feature matrix (X) and target labels (y)
Example:
>>> X, y = collect_dataset()
>>> X.shape
(150, 4)
>>> y.shape
(150,)
"""
data = load_iris()
return np.array(data.data), np.array(data.target)
def apply_pca(data_x: np.ndarray, n_components: int) -> tuple[np.ndarray, np.ndarray]:
"""
Applies Principal Component Analysis (PCA) to reduce dimensionality.
:param data_x: Original dataset (features)
:param n_components: Number of principal components to retain
:return: Tuple containing transformed dataset and explained variance ratio
Example:
>>> X, _ = collect_dataset()
>>> transformed_X, variance = apply_pca(X, 2)
>>> transformed_X.shape
(150, 2)
>>> len(variance) == 2
True
"""
# Standardizing the dataset
scaler = StandardScaler()
data_x_scaled = scaler.fit_transform(data_x)
# Applying PCA
pca = PCA(n_components=n_components)
principal_components = pca.fit_transform(data_x_scaled)
return principal_components, pca.explained_variance_ratio_
def main() -> None:
"""
Driver function to execute PCA and display results.
"""
data_x, _data_y = collect_dataset()
# Number of principal components to retain
n_components = 2
# Apply PCA
transformed_data, variance_ratio = apply_pca(data_x, n_components)
print("Transformed Dataset (First 5 rows):")
print(transformed_data[:5])
print("\nExplained Variance Ratio:")
print(variance_ratio)
if __name__ == "__main__":
doctest.testmod()
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/astar.py | machine_learning/astar.py | """
The A* algorithm combines features of uniform-cost search and pure heuristic search to
efficiently compute optimal solutions.
The A* algorithm is a best-first search algorithm in which the cost associated with a
node is f(n) = g(n) + h(n), where g(n) is the cost of the path from the initial state to
node n and h(n) is the heuristic estimate or the cost or a path from node n to a goal.
The A* algorithm introduces a heuristic into a regular graph-searching algorithm,
essentially planning ahead at each step so a more optimal decision is made. For this
reason, A* is known as an algorithm with brains.
https://en.wikipedia.org/wiki/A*_search_algorithm
"""
import numpy as np
class Cell:
"""
Class cell represents a cell in the world which have the properties:
position: represented by tuple of x and y coordinates initially set to (0,0).
parent: Contains the parent cell object visited before we arrived at this cell.
g, h, f: Parameters used when calling our heuristic function.
"""
def __init__(self):
self.position = (0, 0)
self.parent = None
self.g = 0
self.h = 0
self.f = 0
"""
Overrides equals method because otherwise cell assign will give
wrong results.
"""
def __eq__(self, cell):
return self.position == cell.position
def showcell(self):
print(self.position)
class Gridworld:
"""
Gridworld class represents the external world here a grid M*M
matrix.
world_size: create a numpy array with the given world_size default is 5.
"""
def __init__(self, world_size=(5, 5)):
self.w = np.zeros(world_size)
self.world_x_limit = world_size[0]
self.world_y_limit = world_size[1]
def show(self):
print(self.w)
def get_neighbours(self, cell):
"""
Return the neighbours of cell
"""
neughbour_cord = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
current_x = cell.position[0]
current_y = cell.position[1]
neighbours = []
for n in neughbour_cord:
x = current_x + n[0]
y = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
c = Cell()
c.position = (x, y)
c.parent = cell
neighbours.append(c)
return neighbours
def astar(world, start, goal):
"""
Implementation of a start algorithm.
world : Object of the world object.
start : Object of the cell as start position.
stop : Object of the cell as goal position.
>>> p = Gridworld()
>>> start = Cell()
>>> start.position = (0,0)
>>> goal = Cell()
>>> goal.position = (4,4)
>>> astar(p, start, goal)
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
"""
_open = []
_closed = []
_open.append(start)
while _open:
min_f = np.argmin([n.f for n in _open])
current = _open[min_f]
_closed.append(_open.pop(min_f))
if current == goal:
break
for n in world.get_neighbours(current):
for c in _closed:
if c == n:
continue
n.g = current.g + 1
x1, y1 = n.position
x2, y2 = goal.position
n.h = (y2 - y1) ** 2 + (x2 - x1) ** 2
n.f = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(n)
path = []
while current.parent is not None:
path.append(current.position)
current = current.parent
path.append(current.position)
return path[::-1]
if __name__ == "__main__":
world = Gridworld()
# Start position and goal
start = Cell()
start.position = (0, 0)
goal = Cell()
goal.position = (4, 4)
print(f"path from {start.position} to {goal.position}")
s = astar(world, start, goal)
# Just for visual reasons.
for i in s:
world.w[i] = 1
print(world.w)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/frequent_pattern_growth.py | machine_learning/frequent_pattern_growth.py | """
The Frequent Pattern Growth algorithm (FP-Growth) is a widely used data mining
technique for discovering frequent itemsets in large transaction databases.
It overcomes some of the limitations of traditional methods such as Apriori by
efficiently constructing the FP-Tree
WIKI: https://athena.ecs.csus.edu/~mei/associationcw/FpGrowth.html
Examples: https://www.javatpoint.com/fp-growth-algorithm-in-data-mining
"""
from __future__ import annotations
from dataclasses import dataclass, field
@dataclass
class TreeNode:
"""
A node in a Frequent Pattern tree.
Args:
name: The name of this node.
num_occur: The number of occurrences of the node.
parent_node: The parent node.
Example:
>>> parent = TreeNode("Parent", 1, None)
>>> child = TreeNode("Child", 2, parent)
>>> child.name
'Child'
>>> child.count
2
"""
name: str
count: int
parent: TreeNode | None = None
children: dict[str, TreeNode] = field(default_factory=dict)
node_link: TreeNode | None = None
def __repr__(self) -> str:
return f"TreeNode({self.name!r}, {self.count!r}, {self.parent!r})"
def inc(self, num_occur: int) -> None:
self.count += num_occur
def disp(self, ind: int = 1) -> None:
print(f"{' ' * ind} {self.name} {self.count}")
for child in self.children.values():
child.disp(ind + 1)
def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]:
"""
Create Frequent Pattern tree
Args:
data_set: A list of transactions, where each transaction is a list of items.
min_sup: The minimum support threshold.
Items with support less than this will be pruned. Default is 1.
Returns:
The root of the FP-Tree.
header_table: The header table dictionary with item information.
Example:
>>> data_set = [
... ['A', 'B', 'C'],
... ['A', 'C'],
... ['A', 'B', 'E'],
... ['A', 'B', 'C', 'E'],
... ['B', 'E']
... ]
>>> min_sup = 2
>>> fp_tree, header_table = create_tree(data_set, min_sup)
>>> fp_tree
TreeNode('Null Set', 1, None)
>>> len(header_table)
4
>>> header_table["A"]
[[4, None], TreeNode('A', 4, TreeNode('Null Set', 1, None))]
>>> header_table["E"][1] # doctest: +NORMALIZE_WHITESPACE
TreeNode('E', 1, TreeNode('B', 3, TreeNode('A', 4, TreeNode('Null Set', 1, None))))
>>> sorted(header_table)
['A', 'B', 'C', 'E']
>>> fp_tree.name
'Null Set'
>>> sorted(fp_tree.children)
['A', 'B']
>>> fp_tree.children['A'].name
'A'
>>> sorted(fp_tree.children['A'].children)
['B', 'C']
"""
header_table: dict = {}
for trans in data_set:
for item in trans:
header_table[item] = header_table.get(item, [0, None])
header_table[item][0] += 1
for k in list(header_table):
if header_table[k][0] < min_sup:
del header_table[k]
if not (freq_item_set := set(header_table)):
return TreeNode("Null Set", 1, None), {}
for key, value in header_table.items():
header_table[key] = [value, None]
fp_tree = TreeNode("Null Set", 1, None) # Parent is None for the root node
for tran_set in data_set:
local_d = {
item: header_table[item][0] for item in tran_set if item in freq_item_set
}
if local_d:
sorted_items = sorted(
local_d.items(), key=lambda item_info: item_info[1], reverse=True
)
ordered_items = [item[0] for item in sorted_items]
update_tree(ordered_items, fp_tree, header_table, 1)
return fp_tree, header_table
def update_tree(items: list, in_tree: TreeNode, header_table: dict, count: int) -> None:
"""
Update the FP-Tree with a transaction.
Args:
items: List of items in the transaction.
in_tree: The current node in the FP-Tree.
header_table: The header table dictionary with item information.
count: The count of the transaction.
Example:
>>> data_set = [
... ['A', 'B', 'C'],
... ['A', 'C'],
... ['A', 'B', 'E'],
... ['A', 'B', 'C', 'E'],
... ['B', 'E']
... ]
>>> min_sup = 2
>>> fp_tree, header_table = create_tree(data_set, min_sup)
>>> fp_tree
TreeNode('Null Set', 1, None)
>>> transaction = ['A', 'B', 'E']
>>> update_tree(transaction, fp_tree, header_table, 1)
>>> fp_tree
TreeNode('Null Set', 1, None)
>>> fp_tree.children['A'].children['B'].children['E'].children
{}
>>> fp_tree.children['A'].children['B'].children['E'].count
2
>>> header_table['E'][1].name
'E'
"""
if items[0] in in_tree.children:
in_tree.children[items[0]].inc(count)
else:
in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)
if header_table[items[0]][1] is None:
header_table[items[0]][1] = in_tree.children[items[0]]
else:
update_header(header_table[items[0]][1], in_tree.children[items[0]])
if len(items) > 1:
update_tree(items[1:], in_tree.children[items[0]], header_table, count)
def update_header(node_to_test: TreeNode, target_node: TreeNode) -> TreeNode:
"""
Update the header table with a node link.
Args:
node_to_test: The node to be updated in the header table.
target_node: The node to link to.
Example:
>>> data_set = [
... ['A', 'B', 'C'],
... ['A', 'C'],
... ['A', 'B', 'E'],
... ['A', 'B', 'C', 'E'],
... ['B', 'E']
... ]
>>> min_sup = 2
>>> fp_tree, header_table = create_tree(data_set, min_sup)
>>> fp_tree
TreeNode('Null Set', 1, None)
>>> node1 = TreeNode("A", 3, None)
>>> node2 = TreeNode("B", 4, None)
>>> node1
TreeNode('A', 3, None)
>>> node1 = update_header(node1, node2)
>>> node1
TreeNode('A', 3, None)
>>> node1.node_link
TreeNode('B', 4, None)
>>> node2.node_link is None
True
"""
while node_to_test.node_link is not None:
node_to_test = node_to_test.node_link
if node_to_test.node_link is None:
node_to_test.node_link = target_node
# Return the updated node
return node_to_test
def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None:
"""
Ascend the FP-Tree from a leaf node to its root, adding item names to the prefix
path.
Args:
leaf_node: The leaf node to start ascending from.
prefix_path: A list to store the item as they are ascended.
Example:
>>> data_set = [
... ['A', 'B', 'C'],
... ['A', 'C'],
... ['A', 'B', 'E'],
... ['A', 'B', 'C', 'E'],
... ['B', 'E']
... ]
>>> min_sup = 2
>>> fp_tree, header_table = create_tree(data_set, min_sup)
>>> path = []
>>> ascend_tree(fp_tree.children['A'], path)
>>> path # ascending from a leaf node 'A'
['A']
"""
if leaf_node.parent is not None:
prefix_path.append(leaf_node.name)
ascend_tree(leaf_node.parent, prefix_path)
def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001
"""
Find the conditional pattern base for a given base pattern.
Args:
base_pat: The base pattern for which to find the conditional pattern base.
tree_node: The node in the FP-Tree.
Example:
>>> data_set = [
... ['A', 'B', 'C'],
... ['A', 'C'],
... ['A', 'B', 'E'],
... ['A', 'B', 'C', 'E'],
... ['B', 'E']
... ]
>>> min_sup = 2
>>> fp_tree, header_table = create_tree(data_set, min_sup)
>>> fp_tree
TreeNode('Null Set', 1, None)
>>> len(header_table)
4
>>> base_pattern = frozenset(['A'])
>>> sorted(find_prefix_path(base_pattern, fp_tree.children['A']))
[]
"""
cond_pats: dict = {}
while tree_node is not None:
prefix_path: list = []
ascend_tree(tree_node, prefix_path)
if len(prefix_path) > 1:
cond_pats[frozenset(prefix_path[1:])] = tree_node.count
tree_node = tree_node.node_link
return cond_pats
def mine_tree(
in_tree: TreeNode, # noqa: ARG001
header_table: dict,
min_sup: int,
pre_fix: set,
freq_item_list: list,
) -> None:
"""
Mine the FP-Tree recursively to discover frequent itemsets.
Args:
in_tree: The FP-Tree to mine.
header_table: The header table dictionary with item information.
min_sup: The minimum support threshold.
pre_fix: A set of items as a prefix for the itemsets being mined.
freq_item_list: A list to store the frequent itemsets.
Example:
>>> data_set = [
... ['A', 'B', 'C'],
... ['A', 'C'],
... ['A', 'B', 'E'],
... ['A', 'B', 'C', 'E'],
... ['B', 'E']
... ]
>>> min_sup = 2
>>> fp_tree, header_table = create_tree(data_set, min_sup)
>>> fp_tree
TreeNode('Null Set', 1, None)
>>> frequent_itemsets = []
>>> mine_tree(fp_tree, header_table, min_sup, set([]), frequent_itemsets)
>>> expe_itm = [{'C'}, {'C', 'A'}, {'E'}, {'A', 'E'}, {'E', 'B'}, {'A'}, {'B'}]
>>> all(expected in frequent_itemsets for expected in expe_itm)
True
"""
sorted_items = sorted(header_table.items(), key=lambda item_info: item_info[1][0])
big_l = [item[0] for item in sorted_items]
for base_pat in big_l:
new_freq_set = pre_fix.copy()
new_freq_set.add(base_pat)
freq_item_list.append(new_freq_set)
cond_patt_bases = find_prefix_path(base_pat, header_table[base_pat][1])
my_cond_tree, my_head = create_tree(list(cond_patt_bases), min_sup)
if my_head is not None:
# Pass header_table[base_pat][1] as node_to_test to update_header
header_table[base_pat][1] = update_header(
header_table[base_pat][1], my_cond_tree
)
mine_tree(my_cond_tree, my_head, min_sup, new_freq_set, freq_item_list)
if __name__ == "__main__":
from doctest import testmod
testmod()
data_set: list[frozenset] = [
frozenset(["bread", "milk", "cheese"]),
frozenset(["bread", "milk"]),
frozenset(["bread", "diapers"]),
frozenset(["bread", "milk", "diapers"]),
frozenset(["milk", "diapers"]),
frozenset(["milk", "cheese"]),
frozenset(["diapers", "cheese"]),
frozenset(["bread", "milk", "cheese", "diapers"]),
]
print(f"{len(data_set) = }")
fp_tree, header_table = create_tree(data_set, min_sup=3)
print(f"{fp_tree = }")
print(f"{len(header_table) = }")
freq_items: list = []
mine_tree(fp_tree, header_table, 3, set(), freq_items)
print(f"{freq_items = }")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/mfcc.py | machine_learning/mfcc.py | """
Mel Frequency Cepstral Coefficients (MFCC) Calculation
MFCC is an algorithm widely used in audio and speech processing to represent the
short-term power spectrum of a sound signal in a more compact and
discriminative way. It is particularly popular in speech and audio processing
tasks such as speech recognition and speaker identification.
How Mel Frequency Cepstral Coefficients are Calculated:
1. Preprocessing:
- Load an audio signal and normalize it to ensure that the values fall
within a specific range (e.g., between -1 and 1).
- Frame the audio signal into overlapping, fixed-length segments, typically
using a technique like windowing to reduce spectral leakage.
2. Fourier Transform:
- Apply a Fast Fourier Transform (FFT) to each audio frame to convert it
from the time domain to the frequency domain. This results in a
representation of the audio frame as a sequence of frequency components.
3. Power Spectrum:
- Calculate the power spectrum by taking the squared magnitude of each
frequency component obtained from the FFT. This step measures the energy
distribution across different frequency bands.
4. Mel Filterbank:
- Apply a set of triangular filterbanks spaced in the Mel frequency scale
to the power spectrum. These filters mimic the human auditory system's
frequency response. Each filterbank sums the power spectrum values within
its band.
5. Logarithmic Compression:
- Take the logarithm (typically base 10) of the filterbank values to
compress the dynamic range. This step mimics the logarithmic response of
the human ear to sound intensity.
6. Discrete Cosine Transform (DCT):
- Apply the Discrete Cosine Transform to the log filterbank energies to
obtain the MFCC coefficients. This transformation helps decorrelate the
filterbank energies and captures the most important features of the audio
signal.
7. Feature Extraction:
- Select a subset of the DCT coefficients to form the feature vector.
Often, the first few coefficients (e.g., 12-13) are used for most
applications.
References:
- Mel-Frequency Cepstral Coefficients (MFCCs):
https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
- Speech and Language Processing by Daniel Jurafsky & James H. Martin:
https://web.stanford.edu/~jurafsky/slp3/
- Mel Frequency Cepstral Coefficient (MFCC) tutorial
http://practicalcryptography.com/miscellaneous/machine-learning
/guide-mel-frequency-cepstral-coefficients-mfccs/
Author: Amir Lavasani
"""
import logging
import numpy as np
import scipy.fftpack as fft
from scipy.signal import get_window
logging.basicConfig(filename=f"{__file__}.log", level=logging.INFO)
def mfcc(
audio: np.ndarray,
sample_rate: int,
ftt_size: int = 1024,
hop_length: int = 20,
mel_filter_num: int = 10,
dct_filter_num: int = 40,
) -> np.ndarray:
"""
Calculate Mel Frequency Cepstral Coefficients (MFCCs) from an audio signal.
Args:
audio: The input audio signal.
sample_rate: The sample rate of the audio signal (in Hz).
ftt_size: The size of the FFT window (default is 1024).
hop_length: The hop length for frame creation (default is 20ms).
mel_filter_num: The number of Mel filters (default is 10).
dct_filter_num: The number of DCT filters (default is 40).
Returns:
A matrix of MFCCs for the input audio.
Raises:
ValueError: If the input audio is empty.
Example:
>>> sample_rate = 44100 # Sample rate of 44.1 kHz
>>> duration = 2.0 # Duration of 1 second
>>> t = np.linspace(0, duration, int(sample_rate * duration), endpoint=False)
>>> audio = 0.5 * np.sin(2 * np.pi * 440.0 * t) # Generate a 440 Hz sine wave
>>> mfccs = mfcc(audio, sample_rate)
>>> mfccs.shape
(40, 101)
"""
logging.info(f"Sample rate: {sample_rate}Hz")
logging.info(f"Audio duration: {len(audio) / sample_rate}s")
logging.info(f"Audio min: {np.min(audio)}")
logging.info(f"Audio max: {np.max(audio)}")
# normalize audio
audio_normalized = normalize(audio)
logging.info(f"Normalized audio min: {np.min(audio_normalized)}")
logging.info(f"Normalized audio max: {np.max(audio_normalized)}")
# frame audio into
audio_framed = audio_frames(
audio_normalized, sample_rate, ftt_size=ftt_size, hop_length=hop_length
)
logging.info(f"Framed audio shape: {audio_framed.shape}")
logging.info(f"First frame: {audio_framed[0]}")
# convert to frequency domain
# For simplicity we will choose the Hanning window.
window = get_window("hann", ftt_size, fftbins=True)
audio_windowed = audio_framed * window
logging.info(f"Windowed audio shape: {audio_windowed.shape}")
logging.info(f"First frame: {audio_windowed[0]}")
audio_fft = calculate_fft(audio_windowed, ftt_size)
logging.info(f"fft audio shape: {audio_fft.shape}")
logging.info(f"First frame: {audio_fft[0]}")
audio_power = calculate_signal_power(audio_fft)
logging.info(f"power audio shape: {audio_power.shape}")
logging.info(f"First frame: {audio_power[0]}")
filters = mel_spaced_filterbank(sample_rate, mel_filter_num, ftt_size)
logging.info(f"filters shape: {filters.shape}")
audio_filtered = np.dot(filters, np.transpose(audio_power))
audio_log = 10.0 * np.log10(audio_filtered)
logging.info(f"audio_log shape: {audio_log.shape}")
dct_filters = discrete_cosine_transform(dct_filter_num, mel_filter_num)
cepstral_coefficents = np.dot(dct_filters, audio_log)
logging.info(f"cepstral_coefficents shape: {cepstral_coefficents.shape}")
return cepstral_coefficents
def normalize(audio: np.ndarray) -> np.ndarray:
"""
Normalize an audio signal by scaling it to have values between -1 and 1.
Args:
audio: The input audio signal.
Returns:
The normalized audio signal.
Examples:
>>> audio = np.array([1, 2, 3, 4, 5])
>>> normalized_audio = normalize(audio)
>>> float(np.max(normalized_audio))
1.0
>>> float(np.min(normalized_audio))
0.2
"""
# Divide the entire audio signal by the maximum absolute value
return audio / np.max(np.abs(audio))
def audio_frames(
audio: np.ndarray,
sample_rate: int,
hop_length: int = 20,
ftt_size: int = 1024,
) -> np.ndarray:
"""
Split an audio signal into overlapping frames.
Args:
audio: The input audio signal.
sample_rate: The sample rate of the audio signal.
hop_length: The length of the hopping (default is 20ms).
ftt_size: The size of the FFT window (default is 1024).
Returns:
An array of overlapping frames.
Examples:
>>> audio = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]*1000)
>>> sample_rate = 8000
>>> frames = audio_frames(audio, sample_rate, hop_length=10, ftt_size=512)
>>> frames.shape
(126, 512)
"""
hop_size = np.round(sample_rate * hop_length / 1000).astype(int)
# Pad the audio signal to handle edge cases
audio = np.pad(audio, int(ftt_size / 2), mode="reflect")
# Calculate the number of frames
frame_count = int((len(audio) - ftt_size) / hop_size) + 1
# Initialize an array to store the frames
frames = np.zeros((frame_count, ftt_size))
# Split the audio signal into frames
for n in range(frame_count):
frames[n] = audio[n * hop_size : n * hop_size + ftt_size]
return frames
def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarray:
"""
Calculate the Fast Fourier Transform (FFT) of windowed audio data.
Args:
audio_windowed: The windowed audio signal.
ftt_size: The size of the FFT (default is 1024).
Returns:
The FFT of the audio data.
Examples:
>>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> audio_fft = calculate_fft(audio_windowed, ftt_size=4)
>>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j,
... -1.5-0.8660254j])))
True
"""
# Transpose the audio data to have time in rows and channels in columns
audio_transposed = np.transpose(audio_windowed)
# Initialize an array to store the FFT results
audio_fft = np.empty(
(int(1 + ftt_size // 2), audio_transposed.shape[1]),
dtype=np.complex64,
order="F",
)
# Compute FFT for each channel
for n in range(audio_fft.shape[1]):
audio_fft[:, n] = fft.fft(audio_transposed[:, n], axis=0)[: audio_fft.shape[0]]
# Transpose the FFT results back to the original shape
return np.transpose(audio_fft)
def calculate_signal_power(audio_fft: np.ndarray) -> np.ndarray:
"""
Calculate the power of the audio signal from its FFT.
Args:
audio_fft: The FFT of the audio signal.
Returns:
The power of the audio signal.
Examples:
>>> audio_fft = np.array([1+2j, 2+3j, 3+4j, 4+5j])
>>> power = calculate_signal_power(audio_fft)
>>> np.allclose(power, np.array([5, 13, 25, 41]))
True
"""
# Calculate the power by squaring the absolute values of the FFT coefficients
return np.square(np.abs(audio_fft))
def freq_to_mel(freq: float) -> float:
"""
Convert a frequency in Hertz to the mel scale.
Args:
freq: The frequency in Hertz.
Returns:
The frequency in mel scale.
Examples:
>>> float(round(freq_to_mel(1000), 2))
999.99
"""
# Use the formula to convert frequency to the mel scale
return 2595.0 * np.log10(1.0 + freq / 700.0)
def mel_to_freq(mels: float) -> float:
"""
Convert a frequency in the mel scale to Hertz.
Args:
mels: The frequency in mel scale.
Returns:
The frequency in Hertz.
Examples:
>>> round(mel_to_freq(999.99), 2)
1000.01
"""
# Use the formula to convert mel scale to frequency
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
def mel_spaced_filterbank(
sample_rate: int, mel_filter_num: int = 10, ftt_size: int = 1024
) -> np.ndarray:
"""
Create a Mel-spaced filter bank for audio processing.
Args:
sample_rate: The sample rate of the audio.
mel_filter_num: The number of mel filters (default is 10).
ftt_size: The size of the FFT (default is 1024).
Returns:
Mel-spaced filter bank.
Examples:
>>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10))
0.0004603981
"""
freq_min = 0
freq_high = sample_rate // 2
logging.info(f"Minimum frequency: {freq_min}")
logging.info(f"Maximum frequency: {freq_high}")
# Calculate filter points and mel frequencies
filter_points, mel_freqs = get_filter_points(
sample_rate,
freq_min,
freq_high,
mel_filter_num,
ftt_size,
)
filters = get_filters(filter_points, ftt_size)
# normalize filters
# taken from the librosa library
enorm = 2.0 / (mel_freqs[2 : mel_filter_num + 2] - mel_freqs[:mel_filter_num])
return filters * enorm[:, np.newaxis]
def get_filters(filter_points: np.ndarray, ftt_size: int) -> np.ndarray:
"""
Generate filters for audio processing.
Args:
filter_points: A list of filter points.
ftt_size: The size of the FFT.
Returns:
A matrix of filters.
Examples:
>>> get_filters(np.array([0, 20, 51, 95, 161, 256], dtype=int), 512).shape
(4, 257)
"""
num_filters = len(filter_points) - 2
filters = np.zeros((num_filters, int(ftt_size / 2) + 1))
for n in range(num_filters):
start = filter_points[n]
mid = filter_points[n + 1]
end = filter_points[n + 2]
# Linearly increase values from 0 to 1
filters[n, start:mid] = np.linspace(0, 1, mid - start)
# Linearly decrease values from 1 to 0
filters[n, mid:end] = np.linspace(1, 0, end - mid)
return filters
def get_filter_points(
sample_rate: int,
freq_min: int,
freq_high: int,
mel_filter_num: int = 10,
ftt_size: int = 1024,
) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate the filter points and frequencies for mel frequency filters.
Args:
sample_rate: The sample rate of the audio.
freq_min: The minimum frequency in Hertz.
freq_high: The maximum frequency in Hertz.
mel_filter_num: The number of mel filters (default is 10).
ftt_size: The size of the FFT (default is 1024).
Returns:
Filter points and corresponding frequencies.
Examples:
>>> filter_points = get_filter_points(8000, 0, 4000, mel_filter_num=4, ftt_size=512)
>>> filter_points[0]
array([ 0, 20, 51, 95, 161, 256])
>>> filter_points[1]
array([ 0. , 324.46707094, 799.33254207, 1494.30973963,
2511.42581671, 4000. ])
"""
# Convert minimum and maximum frequencies to mel scale
fmin_mel = freq_to_mel(freq_min)
fmax_mel = freq_to_mel(freq_high)
logging.info(f"MEL min: {fmin_mel}")
logging.info(f"MEL max: {fmax_mel}")
# Generate equally spaced mel frequencies
mels = np.linspace(fmin_mel, fmax_mel, num=mel_filter_num + 2)
# Convert mel frequencies back to Hertz
freqs = mel_to_freq(mels)
# Calculate filter points as integer values
filter_points = np.floor((ftt_size + 1) / sample_rate * freqs).astype(int)
return filter_points, freqs
def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarray:
"""
Compute the Discrete Cosine Transform (DCT) basis matrix.
Args:
dct_filter_num: The number of DCT filters to generate.
filter_num: The number of the fbank filters.
Returns:
The DCT basis matrix.
Examples:
>>> float(round(discrete_cosine_transform(3, 5)[0][0], 5))
0.44721
"""
basis = np.empty((dct_filter_num, filter_num))
basis[0, :] = 1.0 / np.sqrt(filter_num)
samples = np.arange(1, 2 * filter_num, 2) * np.pi / (2.0 * filter_num)
for i in range(1, dct_filter_num):
basis[i, :] = np.cos(i * samples) * np.sqrt(2.0 / filter_num)
return basis
def example(wav_file_path: str = "./path-to-file/sample.wav") -> np.ndarray:
"""
Example function to calculate Mel Frequency Cepstral Coefficients
(MFCCs) from an audio file.
Args:
wav_file_path: The path to the WAV audio file.
Returns:
np.ndarray: The computed MFCCs for the audio.
"""
from scipy.io import wavfile
# Load the audio from the WAV file
sample_rate, audio = wavfile.read(wav_file_path)
# Calculate MFCCs
return mfcc(audio, sample_rate)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/polynomial_regression.py | machine_learning/polynomial_regression.py | """
Polynomial regression is a type of regression analysis that models the relationship
between a predictor x and the response y as an mth-degree polynomial:
y = β₀ + β₁x + β₂x² + ... + βₘxᵐ + ε
By treating x, x², ..., xᵐ as distinct variables, we see that polynomial regression is a
special case of multiple linear regression. Therefore, we can use ordinary least squares
(OLS) estimation to estimate the vector of model parameters β = (β₀, β₁, β₂, ..., βₘ)
for polynomial regression:
β = (XᵀX)⁻¹Xᵀy = X⁺y
where X is the design matrix, y is the response vector, and X⁺ denotes the Moore-Penrose
pseudoinverse of X. In the case of polynomial regression, the design matrix is
|1 x₁ x₁² ⋯ x₁ᵐ|
X = |1 x₂ x₂² ⋯ x₂ᵐ|
|⋮ ⋮ ⋮ ⋱ ⋮ |
|1 xₙ xₙ² ⋯ xₙᵐ|
In OLS estimation, inverting XᵀX to compute X⁺ can be very numerically unstable. This
implementation sidesteps this need to invert XᵀX by computing X⁺ using singular value
decomposition (SVD):
β = VΣ⁺Uᵀy
where UΣVᵀ is an SVD of X.
References:
- https://en.wikipedia.org/wiki/Polynomial_regression
- https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
- https://en.wikipedia.org/wiki/Numerical_methods_for_linear_least_squares
- https://en.wikipedia.org/wiki/Singular_value_decomposition
"""
import matplotlib.pyplot as plt
import numpy as np
class PolynomialRegression:
__slots__ = "degree", "params"
def __init__(self, degree: int) -> None:
"""
@raises ValueError: if the polynomial degree is negative
"""
if degree < 0:
raise ValueError("Polynomial degree must be non-negative")
self.degree = degree
self.params = None
@staticmethod
def _design_matrix(data: np.ndarray, degree: int) -> np.ndarray:
"""
Constructs a polynomial regression design matrix for the given input data. For
input data x = (x₁, x₂, ..., xₙ) and polynomial degree m, the design matrix is
the Vandermonde matrix
|1 x₁ x₁² ⋯ x₁ᵐ|
X = |1 x₂ x₂² ⋯ x₂ᵐ|
|⋮ ⋮ ⋮ ⋱ ⋮ |
|1 xₙ xₙ² ⋯ xₙᵐ|
Reference: https://en.wikipedia.org/wiki/Vandermonde_matrix
@param data: the input predictor values x, either for model fitting or for
prediction
@param degree: the polynomial degree m
@returns: the Vandermonde matrix X (see above)
@raises ValueError: if input data is not N x 1
>>> x = np.array([0, 1, 2])
>>> PolynomialRegression._design_matrix(x, degree=0)
array([[1],
[1],
[1]])
>>> PolynomialRegression._design_matrix(x, degree=1)
array([[1, 0],
[1, 1],
[1, 2]])
>>> PolynomialRegression._design_matrix(x, degree=2)
array([[1, 0, 0],
[1, 1, 1],
[1, 2, 4]])
>>> PolynomialRegression._design_matrix(x, degree=3)
array([[1, 0, 0, 0],
[1, 1, 1, 1],
[1, 2, 4, 8]])
>>> PolynomialRegression._design_matrix(np.array([[0, 0], [0 , 0]]), degree=3)
Traceback (most recent call last):
...
ValueError: Data must have dimensions N x 1
"""
_rows, *remaining = data.shape
if remaining:
raise ValueError("Data must have dimensions N x 1")
return np.vander(data, N=degree + 1, increasing=True)
def fit(self, x_train: np.ndarray, y_train: np.ndarray) -> None:
"""
Computes the polynomial regression model parameters using ordinary least squares
(OLS) estimation:
β = (XᵀX)⁻¹Xᵀy = X⁺y
where X⁺ denotes the Moore-Penrose pseudoinverse of the design matrix X. This
function computes X⁺ using singular value decomposition (SVD).
References:
- https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
- https://en.wikipedia.org/wiki/Singular_value_decomposition
- https://en.wikipedia.org/wiki/Multicollinearity
@param x_train: the predictor values x for model fitting
@param y_train: the response values y for model fitting
@raises ArithmeticError: if X isn't full rank, then XᵀX is singular and β
doesn't exist
>>> x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
>>> y = x**3 - 2 * x**2 + 3 * x - 5
>>> poly_reg = PolynomialRegression(degree=3)
>>> poly_reg.fit(x, y)
>>> poly_reg.params
array([-5., 3., -2., 1.])
>>> poly_reg = PolynomialRegression(degree=20)
>>> poly_reg.fit(x, y)
Traceback (most recent call last):
...
ArithmeticError: Design matrix is not full rank, can't compute coefficients
Make sure errors don't grow too large:
>>> coefs = np.array([-250, 50, -2, 36, 20, -12, 10, 2, -1, -15, 1])
>>> y = PolynomialRegression._design_matrix(x, len(coefs) - 1) @ coefs
>>> poly_reg = PolynomialRegression(degree=len(coefs) - 1)
>>> poly_reg.fit(x, y)
>>> np.allclose(poly_reg.params, coefs, atol=10e-3)
True
"""
X = PolynomialRegression._design_matrix(x_train, self.degree) # noqa: N806
_, cols = X.shape
if np.linalg.matrix_rank(X) < cols:
raise ArithmeticError(
"Design matrix is not full rank, can't compute coefficients"
)
# np.linalg.pinv() computes the Moore-Penrose pseudoinverse using SVD
self.params = np.linalg.pinv(X) @ y_train
def predict(self, data: np.ndarray) -> np.ndarray:
"""
Computes the predicted response values y for the given input data by
constructing the design matrix X and evaluating y = Xβ.
@param data: the predictor values x for prediction
@returns: the predicted response values y = Xβ
@raises ArithmeticError: if this function is called before the model
parameters are fit
>>> x = np.array([0, 1, 2, 3, 4])
>>> y = x**3 - 2 * x**2 + 3 * x - 5
>>> poly_reg = PolynomialRegression(degree=3)
>>> poly_reg.fit(x, y)
>>> poly_reg.predict(np.array([-1]))
array([-11.])
>>> poly_reg.predict(np.array([-2]))
array([-27.])
>>> poly_reg.predict(np.array([6]))
array([157.])
>>> PolynomialRegression(degree=3).predict(x)
Traceback (most recent call last):
...
ArithmeticError: Predictor hasn't been fit yet
"""
if self.params is None:
raise ArithmeticError("Predictor hasn't been fit yet")
return PolynomialRegression._design_matrix(data, self.degree) @ self.params
def main() -> None:
"""
Fit a polynomial regression model to predict fuel efficiency using seaborn's mpg
dataset
>>> pass # Placeholder, function is only for demo purposes
"""
import seaborn as sns
mpg_data = sns.load_dataset("mpg")
poly_reg = PolynomialRegression(degree=2)
poly_reg.fit(mpg_data.weight, mpg_data.mpg)
weight_sorted = np.sort(mpg_data.weight)
predictions = poly_reg.predict(weight_sorted)
plt.scatter(mpg_data.weight, mpg_data.mpg, color="gray", alpha=0.5)
plt.plot(weight_sorted, predictions, color="red", linewidth=3)
plt.title("Predicting Fuel Efficiency Using Polynomial Regression")
plt.xlabel("Weight (lbs)")
plt.ylabel("Fuel Efficiency (mpg)")
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/k_nearest_neighbours.py | machine_learning/k_nearest_neighbours.py | """
k-Nearest Neighbours (kNN) is a simple non-parametric supervised learning
algorithm used for classification. Given some labelled training data, a given
point is classified using its k nearest neighbours according to some distance
metric. The most commonly occurring label among the neighbours becomes the label
of the given point. In effect, the label of the given point is decided by a
majority vote.
This implementation uses the commonly used Euclidean distance metric, but other
distance metrics can also be used.
Reference: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
"""
from collections import Counter
from heapq import nsmallest
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
class KNN:
def __init__(
self,
train_data: np.ndarray[float],
train_target: np.ndarray[int],
class_labels: list[str],
) -> None:
"""
Create a kNN classifier using the given training data and class labels
"""
self.data = zip(train_data, train_target)
self.labels = class_labels
@staticmethod
def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float:
"""
Calculate the Euclidean distance between two points
>>> KNN._euclidean_distance(np.array([0, 0]), np.array([3, 4]))
5.0
>>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11]))
10.0
"""
return float(np.linalg.norm(a - b))
def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
"""
Classify a given point using the kNN algorithm
>>> train_X = np.array(
... [[0, 0], [1, 0], [0, 1], [0.5, 0.5], [3, 3], [2, 3], [3, 2]]
... )
>>> train_y = np.array([0, 0, 0, 0, 1, 1, 1])
>>> classes = ['A', 'B']
>>> knn = KNN(train_X, train_y, classes)
>>> point = np.array([1.2, 1.2])
>>> knn.classify(point)
'A'
"""
# Distances of all points from the point to be classified
distances = (
(self._euclidean_distance(data_point[0], pred_point), data_point[1])
for data_point in self.data
)
# Choosing k points with the shortest distances
votes = (i[1] for i in nsmallest(k, distances))
# Most commonly occurring class is the one into which the point is classified
result = Counter(votes).most_common(1)[0][0]
return self.labels[result]
if __name__ == "__main__":
import doctest
doctest.testmod()
iris = datasets.load_iris()
X = np.array(iris["data"])
y = np.array(iris["target"])
iris_classes = iris["target_names"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
iris_point = np.array([4.4, 3.1, 1.3, 1.4])
classifier = KNN(X_train, y_train, iris_classes)
print(classifier.classify(iris_point, k=3))
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/linear_regression.py | machine_learning/linear_regression.py | """
Linear regression is the most basic type of regression commonly used for
predictive analysis. The idea is pretty simple: we have a dataset and we have
features associated with it. Features should be chosen very cautiously
as they determine how much our model will be able to make future predictions.
We try to set the weight of these features, over many iterations, so that they best
fit our dataset. In this particular code, I had used a CSGO dataset (ADR vs
Rating). We try to best fit a line through dataset and estimate the parameters.
"""
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "numpy",
# ]
# ///
import httpx
import numpy as np
def collect_dataset():
"""Collect dataset of CSGO
The dataset contains ADR vs Rating of a Player
:return : dataset obtained from the link, as matrix
"""
response = httpx.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv",
timeout=10,
)
lines = response.text.splitlines()
data = []
for item in lines:
item = item.split(",")
data.append(item)
data.pop(0) # This is for removing the labels from the list
dataset = np.matrix(data)
return dataset
def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):
"""Run steep gradient descent and updates the Feature vector accordingly_
:param data_x : contains the dataset
:param data_y : contains the output associated with each data-entry
:param len_data : length of the data_
:param alpha : Learning rate of the model
:param theta : Feature vector (weight's for our model)
;param return : Updated Feature's, using
curr_features - alpha_ * gradient(w.r.t. feature)
>>> import numpy as np
>>> data_x = np.array([[1, 2], [3, 4]])
>>> data_y = np.array([5, 6])
>>> len_data = len(data_x)
>>> alpha = 0.01
>>> theta = np.array([0.1, 0.2])
>>> run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
array([0.196, 0.343])
"""
n = len_data
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_grad = np.dot(prod, data_x)
theta = theta - (alpha / n) * sum_grad
return theta
def sum_of_square_error(data_x, data_y, len_data, theta):
"""Return sum of square error for error calculation
:param data_x : contains our dataset
:param data_y : contains the output (result vector)
:param len_data : len of the dataset
:param theta : contains the feature vector
:return : sum of square error computed from given feature's
Example:
>>> vc_x = np.array([[1.1], [2.1], [3.1]])
>>> vc_y = np.array([1.2, 2.2, 3.2])
>>> round(sum_of_square_error(vc_x, vc_y, 3, np.array([1])),3)
np.float64(0.005)
"""
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_elem = np.sum(np.square(prod))
error = sum_elem / (2 * len_data)
return error
def run_linear_regression(data_x, data_y):
"""Implement Linear regression over the dataset
:param data_x : contains our dataset
:param data_y : contains the output (result vector)
:return : feature for line of best fit (Feature vector)
"""
iterations = 100000
alpha = 0.0001550
no_features = data_x.shape[1]
len_data = data_x.shape[0] - 1
theta = np.zeros((1, no_features))
for i in range(iterations):
theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
error = sum_of_square_error(data_x, data_y, len_data, theta)
print(f"At Iteration {i + 1} - Error is {error:.5f}")
return theta
def mean_absolute_error(predicted_y, original_y):
"""Return sum of square error for error calculation
:param predicted_y : contains the output of prediction (result vector)
:param original_y : contains values of expected outcome
:return : mean absolute error computed from given feature's
>>> predicted_y = [3, -0.5, 2, 7]
>>> original_y = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(predicted_y, original_y)
0.5
"""
total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y))
return total / len(original_y)
def main():
"""Driver function"""
data = collect_dataset()
len_data = data.shape[0]
data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float)
data_y = data[:, -1].astype(float)
theta = run_linear_regression(data_x, data_y)
len_result = theta.shape[1]
print("Resultant Feature vector : ")
for i in range(len_result):
print(f"{theta[0, i]:.5f}")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/lstm/lstm_prediction.py | machine_learning/lstm/lstm_prediction.py | """
Create a Long Short Term Memory (LSTM) network model
An LSTM is a type of Recurrent Neural Network (RNN) as discussed at:
* https://colah.github.io/posts/2015-08-Understanding-LSTMs
* https://en.wikipedia.org/wiki/Long_short-term_memory
"""
import numpy as np
import pandas as pd
from keras.layers import LSTM, Dense
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
if __name__ == "__main__":
"""
First part of building a model is to get the data and prepare
it for our model. You can use any dataset for stock prediction
make sure you set the price column on line number 21. Here we
use a dataset which have the price on 3rd column.
"""
sample_data = pd.read_csv("sample_data.csv", header=None)
len_data = sample_data.shape[:1][0]
# If you're using some other dataset input the target column
actual_data = sample_data.iloc[:, 1:2]
actual_data = actual_data.to_numpy().reshape(len_data, 1)
actual_data = MinMaxScaler().fit_transform(actual_data)
look_back = 10
forward_days = 5
periods = 20
division = len_data - periods * look_back
train_data = actual_data[:division]
test_data = actual_data[division - look_back :]
train_x, train_y = [], []
test_x, test_y = [], []
for i in range(len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
x_train = np.array(train_x)
x_test = np.array(test_x)
y_train = np.array([list(i.ravel()) for i in train_y])
y_test = np.array([list(i.ravel()) for i in test_y])
model = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
history = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
pred = model.predict(x_test)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/lstm/__init__.py | machine_learning/lstm/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/local_weighted_learning/local_weighted_learning.py | machine_learning/local_weighted_learning/local_weighted_learning.py | """
Locally weighted linear regression, also called local regression, is a type of
non-parametric linear regression that prioritizes data closest to a given
prediction point. The algorithm estimates the vector of model coefficients β
using weighted least squares regression:
β = (XᵀWX)⁻¹(XᵀWy),
where X is the design matrix, y is the response vector, and W is the diagonal
weight matrix.
This implementation calculates wᵢ, the weight of the ith training sample, using
the Gaussian weight:
wᵢ = exp(-‖xᵢ - x‖²/(2τ²)),
where xᵢ is the ith training sample, x is the prediction point, τ is the
"bandwidth", and ‖x‖ is the Euclidean norm (also called the 2-norm or the L²
norm). The bandwidth τ controls how quickly the weight of a training sample
decreases as its distance from the prediction point increases. One can think of
the Gaussian weight as a bell curve centered around the prediction point: a
training sample is weighted lower if it's farther from the center, and τ
controls the spread of the bell curve.
Other types of locally weighted regression such as locally estimated scatterplot
smoothing (LOESS) typically use different weight functions.
References:
- https://en.wikipedia.org/wiki/Local_regression
- https://en.wikipedia.org/wiki/Weighted_least_squares
- https://cs229.stanford.edu/notes2022fall/main_notes.pdf
"""
import matplotlib.pyplot as plt
import numpy as np
def weight_matrix(point: np.ndarray, x_train: np.ndarray, tau: float) -> np.ndarray:
"""
Calculate the weight of every point in the training data around a given
prediction point
Args:
point: x-value at which the prediction is being made
x_train: ndarray of x-values for training
tau: bandwidth value, controls how quickly the weight of training values
decreases as the distance from the prediction point increases
Returns:
m x m weight matrix around the prediction point, where m is the size of
the training set
>>> weight_matrix(
... np.array([1., 1.]),
... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]),
... 0.6
... )
array([[1.43807972e-207, 0.00000000e+000, 0.00000000e+000],
[0.00000000e+000, 0.00000000e+000, 0.00000000e+000],
[0.00000000e+000, 0.00000000e+000, 0.00000000e+000]])
"""
m = len(x_train) # Number of training samples
weights = np.eye(m) # Initialize weights as identity matrix
for j in range(m):
diff = point - x_train[j]
weights[j, j] = np.exp(diff @ diff.T / (-2.0 * tau**2))
return weights
def local_weight(
point: np.ndarray, x_train: np.ndarray, y_train: np.ndarray, tau: float
) -> np.ndarray:
"""
Calculate the local weights at a given prediction point using the weight
matrix for that point
Args:
point: x-value at which the prediction is being made
x_train: ndarray of x-values for training
y_train: ndarray of y-values for training
tau: bandwidth value, controls how quickly the weight of training values
decreases as the distance from the prediction point increases
Returns:
ndarray of local weights
>>> local_weight(
... np.array([1., 1.]),
... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]),
... np.array([[1.01, 1.66, 3.5]]),
... 0.6
... )
array([[0.00873174],
[0.08272556]])
"""
weight_mat = weight_matrix(point, x_train, tau)
weight = np.linalg.inv(x_train.T @ weight_mat @ x_train) @ (
x_train.T @ weight_mat @ y_train.T
)
return weight
def local_weight_regression(
x_train: np.ndarray, y_train: np.ndarray, tau: float
) -> np.ndarray:
"""
Calculate predictions for each point in the training data
Args:
x_train: ndarray of x-values for training
y_train: ndarray of y-values for training
tau: bandwidth value, controls how quickly the weight of training values
decreases as the distance from the prediction point increases
Returns:
ndarray of predictions
>>> local_weight_regression(
... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]),
... np.array([[1.01, 1.66, 3.5]]),
... 0.6
... )
array([1.07173261, 1.65970737, 3.50160179])
"""
y_pred = np.zeros(len(x_train)) # Initialize array of predictions
for i, item in enumerate(x_train):
y_pred[i] = np.dot(item, local_weight(item, x_train, y_train, tau)).item()
return y_pred
def load_data(
dataset_name: str, x_name: str, y_name: str
) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Load data from seaborn and split it into x and y points
>>> pass # No doctests, function is for demo purposes only
"""
import seaborn as sns
data = sns.load_dataset(dataset_name)
x_data = np.array(data[x_name])
y_data = np.array(data[y_name])
one = np.ones(len(y_data))
# pairing elements of one and x_data
x_train = np.column_stack((one, x_data))
return x_train, x_data, y_data
def plot_preds(
x_train: np.ndarray,
preds: np.ndarray,
x_data: np.ndarray,
y_data: np.ndarray,
x_name: str,
y_name: str,
) -> None:
"""
Plot predictions and display the graph
>>> pass # No doctests, function is for demo purposes only
"""
x_train_sorted = np.sort(x_train, axis=0)
plt.scatter(x_data, y_data, color="blue")
plt.plot(
x_train_sorted[:, 1],
preds[x_train[:, 1].argsort(0)],
color="yellow",
linewidth=5,
)
plt.title("Local Weighted Regression")
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
# Demo with a dataset from the seaborn module
training_data_x, total_bill, tip = load_data("tips", "total_bill", "tip")
predictions = local_weight_regression(training_data_x, tip, 5)
plot_preds(training_data_x, predictions, total_bill, tip, "total_bill", "tip")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/local_weighted_learning/__init__.py | machine_learning/local_weighted_learning/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/forecasting/run.py | machine_learning/forecasting/run.py | """
this is code for forecasting
but I modified it and used it for safety checker of data
for ex: you have an online shop and for some reason some data are
missing (the amount of data that u expected are not supposed to be)
then we can use it
*ps : 1. ofc we can use normal statistic method but in this case
the data is quite absurd and only a little^^
2. ofc u can use this and modified it for forecasting purpose
for the next 3 months sales or something,
u can just adjust it for ur own purpose
"""
from warnings import simplefilter
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def linear_regression_prediction(
train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list
) -> float:
"""
First method: linear regression
input : training data (date, total_user, total_event) in list of float
output : list of total user prediction in float
>>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
>>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors
True
"""
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
y = np.array(train_usr)
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
"""
second method: Sarimax
sarimax is a statistic method which using previous input
and learn its pattern to predict future data
input : training data (total_user, with exog data = total_event) in list of float
output : list of total user prediction in float
>>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2])
6.6666671111109626
"""
# Suppress the User Warning raised by SARIMAX due to insufficient observations
simplefilter("ignore", UserWarning)
order = (1, 2, 1)
seasonal_order = (1, 1, 1, 7)
model = SARIMAX(
train_user, exog=train_match, order=order, seasonal_order=seasonal_order
)
model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match])
return float(result[0])
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
"""
Third method: Support vector regressor
svr is quite the same with svm(support vector machine)
it uses the same principles as the SVM for classification,
with only a few minor differences and the only different is that
it suits better for regression purpose
input : training data (date, total_user, total_event) in list of float
where x = list of set (date and total event)
output : list of total user prediction in float
>>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4])
1.634932078116079
"""
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
return float(y_pred[0])
def interquartile_range_checker(train_user: list) -> float:
"""
Optional method: interquatile range
input : list of total user in float
output : low limit of input in float
this method can be used to check whether some data is outlier or not
>>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10])
2.8
"""
train_user.sort()
q1 = np.percentile(train_user, 25)
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return float(low_lim)
def data_safety_checker(list_vote: list, actual_result: float) -> bool:
"""
Used to review all the votes (list result prediction)
and compare it to the actual result.
input : list of predictions
output : print whether it's safe or not
>>> data_safety_checker([2, 3, 4], 5.0)
False
"""
safe = 0
not_safe = 0
if not isinstance(actual_result, float):
raise TypeError("Actual result should be float. Value passed is a list")
for i in list_vote:
if i > actual_result:
safe = not_safe + 1
elif abs(abs(i) - abs(actual_result)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
"""
data column = total user in a day, how much online event held in one day,
what day is that(sunday-saturday)
"""
data_input_df = pd.read_csv("ex_data.csv")
# start normalization
normalize_df = Normalizer().fit_transform(data_input_df.values)
# split data
total_date = normalize_df[:, 2].tolist()
total_user = normalize_df[:, 0].tolist()
total_match = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
x = normalize_df[:, [1, 2]].tolist()
x_train = x[: len(x) - 1]
x_test = x[len(x) - 1 :]
# for linear regression & sarimax
train_date = total_date[: len(total_date) - 1]
train_user = total_user[: len(total_user) - 1]
train_match = total_match[: len(total_match) - 1]
test_date = total_date[len(total_date) - 1 :]
test_user = total_user[len(total_user) - 1 :]
test_match = total_match[len(total_match) - 1 :]
# voting system with forecasting
res_vote = [
linear_regression_prediction(
train_date, train_user, train_match, test_date, test_match
),
sarimax_predictor(train_user, train_match, test_match),
support_vector_regressor(x_train, x_test, train_user),
]
# check the safety of today's data
not_str = "" if data_safety_checker(res_vote, test_user[0]) else "not "
print(f"Today's data is {not_str}safe.")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/machine_learning/forecasting/__init__.py | machine_learning/forecasting/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/__init__.py | data_structures/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/heap_generic.py | data_structures/heap/heap_generic.py | from collections.abc import Callable
class Heap:
"""
A generic Heap class, can be used as min or max by passing the key function
accordingly.
"""
def __init__(self, key: Callable | None = None) -> None:
# Stores actual heap items.
self.arr: list = []
# Stores indexes of each item for supporting updates and deletion.
self.pos_map: dict = {}
# Stores current size of heap.
self.size = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
self.key = key or (lambda x: x)
def _parent(self, i: int) -> int | None:
"""Returns parent index of given index if exists else None"""
return int((i - 1) / 2) if i > 0 else None
def _left(self, i: int) -> int | None:
"""Returns left-child-index of given index if exists else None"""
left = int(2 * i + 1)
return left if 0 < left < self.size else None
def _right(self, i: int) -> int | None:
"""Returns right-child-index of given index if exists else None"""
right = int(2 * i + 2)
return right if 0 < right < self.size else None
def _swap(self, i: int, j: int) -> None:
"""Performs changes required for swapping two elements in the heap"""
# First update the indexes of the items in index map.
self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
def _cmp(self, i: int, j: int) -> bool:
"""Compares the two items using default comparison"""
return self.arr[i][1] < self.arr[j][1]
def _get_valid_parent(self, i: int) -> int:
"""
Returns index of valid parent as per desired ordering among given index and
both it's children
"""
left = self._left(i)
right = self._right(i)
valid_parent = i
if left is not None and not self._cmp(left, valid_parent):
valid_parent = left
if right is not None and not self._cmp(right, valid_parent):
valid_parent = right
return valid_parent
def _heapify_up(self, index: int) -> None:
"""Fixes the heap in upward direction of given index"""
parent = self._parent(index)
while parent is not None and not self._cmp(index, parent):
self._swap(index, parent)
index, parent = parent, self._parent(parent)
def _heapify_down(self, index: int) -> None:
"""Fixes the heap in downward direction of given index"""
valid_parent = self._get_valid_parent(index)
while valid_parent != index:
self._swap(index, valid_parent)
index, valid_parent = valid_parent, self._get_valid_parent(valid_parent)
def update_item(self, item: int, item_value: int) -> None:
"""Updates given item value in heap if present"""
if item not in self.pos_map:
return
index = self.pos_map[item]
self.arr[index] = [item, self.key(item_value)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(index)
self._heapify_down(index)
def delete_item(self, item: int) -> None:
"""Deletes given item from heap if present"""
if item not in self.pos_map:
return
index = self.pos_map[item]
del self.pos_map[item]
self.arr[index] = self.arr[self.size - 1]
self.pos_map[self.arr[self.size - 1][0]] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(index)
self._heapify_down(index)
def insert_item(self, item: int, item_value: int) -> None:
"""Inserts given item with given value in heap"""
arr_len = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(item_value)])
else:
self.arr[self.size] = [item, self.key(item_value)]
self.pos_map[item] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def get_top(self) -> tuple | None:
"""Returns top item tuple (Calculated value, item) from heap if present"""
return self.arr[0] if self.size else None
def extract_top(self) -> tuple | None:
"""
Return top item tuple (Calculated value, item) from heap and removes it as well
if present
"""
top_item_tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def test_heap() -> None:
"""
>>> h = Heap() # Max-heap
>>> h.insert_item(5, 34)
>>> h.insert_item(6, 31)
>>> h.insert_item(7, 37)
>>> h.get_top()
[7, 37]
>>> h.extract_top()
[7, 37]
>>> h.extract_top()
[5, 34]
>>> h.extract_top()
[6, 31]
>>> h = Heap(key=lambda x: -x) # Min heap
>>> h.insert_item(5, 34)
>>> h.insert_item(6, 31)
>>> h.insert_item(7, 37)
>>> h.get_top()
[6, -31]
>>> h.extract_top()
[6, -31]
>>> h.extract_top()
[5, -34]
>>> h.extract_top()
[7, -37]
>>> h.insert_item(8, 45)
>>> h.insert_item(9, 40)
>>> h.insert_item(10, 50)
>>> h.get_top()
[9, -40]
>>> h.update_item(10, 30)
>>> h.get_top()
[10, -30]
>>> h.delete_item(10)
>>> h.get_top()
[9, -40]
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/binomial_heap.py | data_structures/heap/binomial_heap.py | """
Binomial Heap
Reference: Advanced Data Structures, Peter Brass
"""
class Node:
"""
Node in a doubly-linked binomial tree, containing:
- value
- size of left subtree
- link to left, right and parent nodes
"""
def __init__(self, val):
self.val = val
# Number of nodes in left subtree
self.left_tree_size = 0
self.left = None
self.right = None
self.parent = None
def merge_trees(self, other):
"""
In-place merge of two binomial trees of equal size.
Returns the root of the resulting tree
"""
assert self.left_tree_size == other.left_tree_size, "Unequal Sizes of Blocks"
if self.val < other.val:
other.left = self.right
other.parent = None
if self.right:
self.right.parent = other
self.right = other
self.left_tree_size = self.left_tree_size * 2 + 1
return self
else:
self.left = other.right
self.parent = None
if other.right:
other.right.parent = self
other.right = self
other.left_tree_size = other.left_tree_size * 2 + 1
return other
class BinomialHeap:
r"""
Min-oriented priority queue implemented with the Binomial Heap data
structure implemented with the BinomialHeap class. It supports:
- Insert element in a heap with n elements: Guaranteed logn, amoratized 1
- Merge (meld) heaps of size m and n: O(logn + logm)
- Delete Min: O(logn)
- Peek (return min without deleting it): O(1)
Example:
Create a random permutation of 30 integers to be inserted and 19 of them deleted
>>> import numpy as np
>>> permutation = np.random.permutation(list(range(30)))
Create a Heap and insert the 30 integers
__init__() test
>>> first_heap = BinomialHeap()
30 inserts - insert() test
>>> for number in permutation:
... first_heap.insert(number)
Size test
>>> first_heap.size
30
Deleting - delete() test
>>> [int(first_heap.delete_min()) for _ in range(20)]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Create a new Heap
>>> second_heap = BinomialHeap()
>>> vals = [17, 20, 31, 34]
>>> for value in vals:
... second_heap.insert(value)
The heap should have the following structure:
17
/ \
# 31
/ \
20 34
/ \ / \
# # # #
preOrder() test
>>> " ".join(str(x) for x in second_heap.pre_order())
"(17, 0) ('#', 1) (31, 1) (20, 2) ('#', 3) ('#', 3) (34, 2) ('#', 3) ('#', 3)"
printing Heap - __str__() test
>>> print(second_heap)
17
-#
-31
--20
---#
---#
--34
---#
---#
mergeHeaps() test
>>>
>>> merged = second_heap.merge_heaps(first_heap)
>>> merged.peek()
17
values in merged heap; (merge is inplace)
>>> results = []
>>> while not first_heap.is_empty():
... results.append(int(first_heap.delete_min()))
>>> results
[17, 20, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31, 34]
"""
def __init__(self, bottom_root=None, min_node=None, heap_size=0):
self.size = heap_size
self.bottom_root = bottom_root
self.min_node = min_node
def merge_heaps(self, other):
"""
In-place merge of two binomial heaps.
Both of them become the resulting merged heap
"""
# Empty heaps corner cases
if other.size == 0:
return None
if self.size == 0:
self.size = other.size
self.bottom_root = other.bottom_root
self.min_node = other.min_node
return None
# Update size
self.size = self.size + other.size
# Update min.node
if self.min_node.val > other.min_node.val:
self.min_node = other.min_node
# Merge
# Order roots by left_subtree_size
combined_roots_list = []
i, j = self.bottom_root, other.bottom_root
while i or j:
if i and ((not j) or i.left_tree_size < j.left_tree_size):
combined_roots_list.append((i, True))
i = i.parent
else:
combined_roots_list.append((j, False))
j = j.parent
# Insert links between them
for i in range(len(combined_roots_list) - 1):
if combined_roots_list[i][1] != combined_roots_list[i + 1][1]:
combined_roots_list[i][0].parent = combined_roots_list[i + 1][0]
combined_roots_list[i + 1][0].left = combined_roots_list[i][0]
# Consecutively merge roots with same left_tree_size
i = combined_roots_list[0][0]
while i.parent:
if (
(i.left_tree_size == i.parent.left_tree_size) and (not i.parent.parent)
) or (
i.left_tree_size == i.parent.left_tree_size
and i.left_tree_size != i.parent.parent.left_tree_size
):
# Neighbouring Nodes
previous_node = i.left
next_node = i.parent.parent
# Merging trees
i = i.merge_trees(i.parent)
# Updating links
i.left = previous_node
i.parent = next_node
if previous_node:
previous_node.parent = i
if next_node:
next_node.left = i
else:
i = i.parent
# Updating self.bottom_root
while i.left:
i = i.left
self.bottom_root = i
# Update other
other.size = self.size
other.bottom_root = self.bottom_root
other.min_node = self.min_node
# Return the merged heap
return self
def insert(self, val):
"""
insert a value in the heap
"""
if self.size == 0:
self.bottom_root = Node(val)
self.size = 1
self.min_node = self.bottom_root
else:
# Create new node
new_node = Node(val)
# Update size
self.size += 1
# update min_node
if val < self.min_node.val:
self.min_node = new_node
# Put new_node as a bottom_root in heap
self.bottom_root.left = new_node
new_node.parent = self.bottom_root
self.bottom_root = new_node
# Consecutively merge roots with same left_tree_size
while (
self.bottom_root.parent
and self.bottom_root.left_tree_size
== self.bottom_root.parent.left_tree_size
):
# Next node
next_node = self.bottom_root.parent.parent
# Merge
self.bottom_root = self.bottom_root.merge_trees(self.bottom_root.parent)
# Update Links
self.bottom_root.parent = next_node
self.bottom_root.left = None
if next_node:
next_node.left = self.bottom_root
def peek(self):
"""
return min element without deleting it
"""
return self.min_node.val
def is_empty(self):
return self.size == 0
def delete_min(self):
"""
delete min element and return it
"""
# assert not self.isEmpty(), "Empty Heap"
# Save minimal value
min_value = self.min_node.val
# Last element in heap corner case
if self.size == 1:
# Update size
self.size = 0
# Update bottom root
self.bottom_root = None
# Update min_node
self.min_node = None
return min_value
# No right subtree corner case
# The structure of the tree implies that this should be the bottom root
# and there is at least one other root
if self.min_node.right is None:
# Update size
self.size -= 1
# Update bottom root
self.bottom_root = self.bottom_root.parent
self.bottom_root.left = None
# Update min_node
self.min_node = self.bottom_root
i = self.bottom_root.parent
while i:
if i.val < self.min_node.val:
self.min_node = i
i = i.parent
return min_value
# General case
# Find the BinomialHeap of the right subtree of min_node
bottom_of_new = self.min_node.right
bottom_of_new.parent = None
min_of_new = bottom_of_new
size_of_new = 1
# Size, min_node and bottom_root
while bottom_of_new.left:
size_of_new = size_of_new * 2 + 1
bottom_of_new = bottom_of_new.left
if bottom_of_new.val < min_of_new.val:
min_of_new = bottom_of_new
# Corner case of single root on top left path
if (not self.min_node.left) and (not self.min_node.parent):
self.size = size_of_new
self.bottom_root = bottom_of_new
self.min_node = min_of_new
# print("Single root, multiple nodes case")
return min_value
# Remaining cases
# Construct heap of right subtree
new_heap = BinomialHeap(
bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new
)
# Update size
self.size = self.size - 1 - size_of_new
# Neighbour nodes
previous_node = self.min_node.left
next_node = self.min_node.parent
# Initialize new bottom_root and min_node
self.min_node = previous_node or next_node
self.bottom_root = next_node
# Update links of previous_node and search below for new min_node and
# bottom_root
if previous_node:
previous_node.parent = next_node
# Update bottom_root and search for min_node below
self.bottom_root = previous_node
self.min_node = previous_node
while self.bottom_root.left:
self.bottom_root = self.bottom_root.left
if self.bottom_root.val < self.min_node.val:
self.min_node = self.bottom_root
if next_node:
next_node.left = previous_node
# Search for new min_node above min_node
i = next_node
while i:
if i.val < self.min_node.val:
self.min_node = i
i = i.parent
# Merge heaps
self.merge_heaps(new_heap)
return int(min_value)
def pre_order(self):
"""
Returns the Pre-order representation of the heap including
values of nodes plus their level distance from the root;
Empty nodes appear as #
"""
# Find top root
top_root = self.bottom_root
while top_root.parent:
top_root = top_root.parent
# preorder
heap_pre_order = []
self.__traversal(top_root, heap_pre_order)
return heap_pre_order
def __traversal(self, curr_node, preorder, level=0):
"""
Pre-order traversal of nodes
"""
if curr_node:
preorder.append((curr_node.val, level))
self.__traversal(curr_node.left, preorder, level + 1)
self.__traversal(curr_node.right, preorder, level + 1)
else:
preorder.append(("#", level))
def __str__(self):
"""
Overwriting str for a pre-order print of nodes in heap;
Performance is poor, so use only for small examples
"""
if self.is_empty():
return ""
preorder_heap = self.pre_order()
return "\n".join(("-" * level + str(value)) for value, level in preorder_heap)
# Unit Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/heap.py | data_structures/heap/heap.py | from __future__ import annotations
from abc import abstractmethod
from collections.abc import Iterable
from typing import Protocol, TypeVar
class Comparable(Protocol):
@abstractmethod
def __lt__(self: T, other: T) -> bool:
pass
@abstractmethod
def __gt__(self: T, other: T) -> bool:
pass
@abstractmethod
def __eq__(self: T, other: object) -> bool:
pass
T = TypeVar("T", bound=Comparable)
class Heap[T: Comparable]:
"""A Max Heap Implementation
>>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]
>>> h = Heap()
>>> h.build_max_heap(unsorted)
>>> h
[209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
>>>
>>> h.extract_max()
209
>>> h
[201, 107, 25, 103, 11, 15, 1, 9, 7, 5]
>>>
>>> h.insert(100)
>>> h
[201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11]
>>>
>>> h.heap_sort()
>>> h
[1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201]
"""
def __init__(self) -> None:
self.h: list[T] = []
self.heap_size: int = 0
def __repr__(self) -> str:
return str(self.h)
def parent_index(self, child_idx: int) -> int | None:
"""
returns the parent index based on the given child index
>>> h = Heap()
>>> h.build_max_heap([103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5])
>>> h
[209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5]
>>> h.parent_index(-1) # returns none if index is <=0
>>> h.parent_index(0) # returns none if index is <=0
>>> h.parent_index(1)
0
>>> h.parent_index(2)
0
>>> h.parent_index(3)
1
>>> h.parent_index(4)
1
>>> h.parent_index(5)
2
>>> h.parent_index(10.5)
4.0
>>> h.parent_index(209.0)
104.0
>>> h.parent_index("Test")
Traceback (most recent call last):
...
TypeError: '>' not supported between instances of 'str' and 'int'
"""
if child_idx > 0:
return (child_idx - 1) // 2
return None
def left_child_idx(self, parent_idx: int) -> int | None:
"""
return the left child index if the left child exists.
if not, return None.
"""
left_child_index = 2 * parent_idx + 1
if left_child_index < self.heap_size:
return left_child_index
return None
def right_child_idx(self, parent_idx: int) -> int | None:
"""
return the right child index if the right child exists.
if not, return None.
"""
right_child_index = 2 * parent_idx + 2
if right_child_index < self.heap_size:
return right_child_index
return None
def max_heapify(self, index: int) -> None:
"""
correct a single violation of the heap property in a subtree's root.
It is the function that is responsible for restoring the property
of Max heap i.e the maximum element is always at top.
"""
if index < self.heap_size:
violation: int = index
left_child = self.left_child_idx(index)
right_child = self.right_child_idx(index)
# check which child is larger than its parent
if left_child is not None and self.h[left_child] > self.h[violation]:
violation = left_child
if right_child is not None and self.h[right_child] > self.h[violation]:
violation = right_child
# if violation indeed exists
if violation != index:
# swap to fix the violation
self.h[violation], self.h[index] = self.h[index], self.h[violation]
# fix the subsequent violation recursively if any
self.max_heapify(violation)
def build_max_heap(self, collection: Iterable[T]) -> None:
"""
build max heap from an unsorted array
>>> h = Heap()
>>> h.build_max_heap([20,40,50,20,10])
>>> h
[50, 40, 20, 20, 10]
>>> h = Heap()
>>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0])
>>> h
[9, 8, 7, 4, 5, 6, 3, 2, 1, 0]
>>> h = Heap()
>>> h.build_max_heap([514,5,61,57,8,99,105])
>>> h
[514, 57, 105, 5, 8, 99, 61]
>>> h = Heap()
>>> h.build_max_heap([514,5,61.6,57,8,9.9,105])
>>> h
[514, 57, 105, 5, 8, 9.9, 61.6]
"""
self.h = list(collection)
self.heap_size = len(self.h)
if self.heap_size > 1:
# max_heapify from right to left but exclude leaves (last level)
for i in range(self.heap_size // 2 - 1, -1, -1):
self.max_heapify(i)
def extract_max(self) -> T:
"""
get and remove max from heap
>>> h = Heap()
>>> h.build_max_heap([20,40,50,20,10])
>>> h.extract_max()
50
>>> h = Heap()
>>> h.build_max_heap([514,5,61,57,8,99,105])
>>> h.extract_max()
514
>>> h = Heap()
>>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0])
>>> h.extract_max()
9
"""
if self.heap_size >= 2:
me = self.h[0]
self.h[0] = self.h.pop(-1)
self.heap_size -= 1
self.max_heapify(0)
return me
elif self.heap_size == 1:
self.heap_size -= 1
return self.h.pop(-1)
else:
raise Exception("Empty heap")
def insert(self, value: T) -> None:
"""
insert a new value into the max heap
>>> h = Heap()
>>> h.insert(10)
>>> h
[10]
>>> h = Heap()
>>> h.insert(10)
>>> h.insert(10)
>>> h
[10, 10]
>>> h = Heap()
>>> h.insert(10)
>>> h.insert(10.1)
>>> h
[10.1, 10]
>>> h = Heap()
>>> h.insert(0.1)
>>> h.insert(0)
>>> h.insert(9)
>>> h.insert(5)
>>> h
[9, 5, 0.1, 0]
"""
self.h.append(value)
idx = (self.heap_size - 1) // 2
self.heap_size += 1
while idx >= 0:
self.max_heapify(idx)
idx = (idx - 1) // 2
def heap_sort(self) -> None:
size = self.heap_size
for j in range(size - 1, 0, -1):
self.h[0], self.h[j] = self.h[j], self.h[0]
self.heap_size -= 1
self.max_heapify(0)
self.heap_size = size
if __name__ == "__main__":
import doctest
# run doc test
doctest.testmod()
# demo
for unsorted in [
[0],
[2],
[3, 5],
[5, 3],
[5, 5],
[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 3, 5],
[0, 2, 2, 3, 5],
[2, 5, 3, 0, 2, 3, 0, 3],
[6, 1, 2, 7, 9, 3, 4, 5, 10, 8],
[103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5],
[-45, -2, -5],
]:
print(f"unsorted array: {unsorted}")
heap: Heap[int] = Heap()
heap.build_max_heap(unsorted)
print(f"after build heap: {heap}")
print(f"max value: {heap.extract_max()}")
print(f"after max value removed: {heap}")
heap.insert(100)
print(f"after new value 100 inserted: {heap}")
heap.heap_sort()
print(f"heap-sorted array: {heap}\n")
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/randomized_heap.py | data_structures/heap/randomized_heap.py | #!/usr/bin/env python3
from __future__ import annotations
import random
from collections.abc import Iterable
from typing import Any, TypeVar
T = TypeVar("T", bound=bool)
class RandomizedHeapNode[T: bool]:
"""
One node of the randomized heap. Contains the value and references to
two children.
"""
def __init__(self, value: T) -> None:
self._value: T = value
self.left: RandomizedHeapNode[T] | None = None
self.right: RandomizedHeapNode[T] | None = None
@property
def value(self) -> T:
"""
Return the value of the node.
>>> rhn = RandomizedHeapNode(10)
>>> rhn.value
10
>>> rhn = RandomizedHeapNode(-10)
>>> rhn.value
-10
"""
return self._value
@staticmethod
def merge(
root1: RandomizedHeapNode[T] | None, root2: RandomizedHeapNode[T] | None
) -> RandomizedHeapNode[T] | None:
"""
Merge 2 nodes together.
>>> rhn1 = RandomizedHeapNode(10)
>>> rhn2 = RandomizedHeapNode(20)
>>> RandomizedHeapNode.merge(rhn1, rhn2).value
10
>>> rhn1 = RandomizedHeapNode(20)
>>> rhn2 = RandomizedHeapNode(10)
>>> RandomizedHeapNode.merge(rhn1, rhn2).value
10
>>> rhn1 = RandomizedHeapNode(5)
>>> rhn2 = RandomizedHeapNode(0)
>>> RandomizedHeapNode.merge(rhn1, rhn2).value
0
"""
if not root1:
return root2
if not root2:
return root1
if root1.value > root2.value:
root1, root2 = root2, root1
if random.choice([True, False]):
root1.left, root1.right = root1.right, root1.left
root1.left = RandomizedHeapNode.merge(root1.left, root2)
return root1
class RandomizedHeap[T: bool]:
"""
A data structure that allows inserting a new value and to pop the smallest
values. Both operations take O(logN) time where N is the size of the
structure.
Wiki: https://en.wikipedia.org/wiki/Randomized_meldable_heap
>>> RandomizedHeap([2, 3, 1, 5, 1, 7]).to_sorted_list()
[1, 1, 2, 3, 5, 7]
>>> rh = RandomizedHeap()
>>> rh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
>>> rh.insert(1)
>>> rh.insert(-1)
>>> rh.insert(0)
>>> rh.to_sorted_list()
[-1, 0, 1]
"""
def __init__(self, data: Iterable[T] | None = ()) -> None:
"""
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
self._root: RandomizedHeapNode[T] | None = None
if data:
for item in data:
self.insert(item)
def insert(self, value: T) -> None:
"""
Insert the value into the heap.
>>> rh = RandomizedHeap()
>>> rh.insert(3)
>>> rh.insert(1)
>>> rh.insert(3)
>>> rh.insert(7)
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
self._root = RandomizedHeapNode.merge(self._root, RandomizedHeapNode(value))
def pop(self) -> T | None:
"""
Pop the smallest value from the heap and return it.
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.pop()
1
>>> rh.pop()
3
>>> rh.pop()
3
>>> rh.pop()
7
>>> rh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
"""
result = self.top()
if self._root is None:
return None
self._root = RandomizedHeapNode.merge(self._root.left, self._root.right)
return result
def top(self) -> T:
"""
Return the smallest value from the heap.
>>> rh = RandomizedHeap()
>>> rh.insert(3)
>>> rh.top()
3
>>> rh.insert(1)
>>> rh.top()
1
>>> rh.insert(3)
>>> rh.top()
1
>>> rh.insert(7)
>>> rh.top()
1
"""
if not self._root:
raise IndexError("Can't get top element for the empty heap.")
return self._root.value
def clear(self) -> None:
"""
Clear the heap.
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.clear()
>>> rh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
"""
self._root = None
def to_sorted_list(self) -> list[Any]:
"""
Returns sorted list containing all the values in the heap.
>>> rh = RandomizedHeap([3, 1, 3, 7])
>>> rh.to_sorted_list()
[1, 3, 3, 7]
"""
result = []
while self:
result.append(self.pop())
return result
def __bool__(self) -> bool:
"""
Check if the heap is not empty.
>>> rh = RandomizedHeap()
>>> bool(rh)
False
>>> rh.insert(1)
>>> bool(rh)
True
>>> rh.clear()
>>> bool(rh)
False
"""
return self._root is not None
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/min_heap.py | data_structures/heap/min_heap.py | # Min heap data structure
# with decrease key functionality - in O(log(n)) time
class Node:
def __init__(self, name, val):
self.name = name
self.val = val
def __str__(self):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__(self, other):
return self.val < other.val
class MinHeap:
"""
>>> r = Node("R", -1)
>>> b = Node("B", 6)
>>> a = Node("A", 3)
>>> x = Node("X", 1)
>>> e = Node("E", 4)
>>> print(b)
Node(B, 6)
>>> myMinHeap = MinHeap([r, b, a, x, e])
>>> myMinHeap.decrease_key(b, -17)
>>> print(b)
Node(B, -17)
>>> myMinHeap["B"]
-17
"""
def __init__(self, array):
self.idx_of_element = {}
self.heap_dict = {}
self.heap = self.build_heap(array)
def __getitem__(self, key):
return self.get_value(key)
def get_parent_idx(self, idx):
return (idx - 1) // 2
def get_left_child_idx(self, idx):
return idx * 2 + 1
def get_right_child_idx(self, idx):
return idx * 2 + 2
def get_value(self, key):
return self.heap_dict[key]
def build_heap(self, array):
last_idx = len(array) - 1
start_from = self.get_parent_idx(last_idx)
for idx, i in enumerate(array):
self.idx_of_element[i] = idx
self.heap_dict[i.name] = i.val
for i in range(start_from, -1, -1):
self.sift_down(i, array)
return array
# this is min-heapify method
def sift_down(self, idx, array):
while True:
left = self.get_left_child_idx(idx)
right = self.get_right_child_idx(idx)
smallest = idx
if left < len(array) and array[left] < array[idx]:
smallest = left
if right < len(array) and array[right] < array[smallest]:
smallest = right
if smallest != idx:
array[idx], array[smallest] = array[smallest], array[idx]
(
self.idx_of_element[array[idx]],
self.idx_of_element[array[smallest]],
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
idx = smallest
else:
break
def sift_up(self, idx):
p = self.get_parent_idx(idx)
while p >= 0 and self.heap[p] > self.heap[idx]:
self.heap[p], self.heap[idx] = self.heap[idx], self.heap[p]
self.idx_of_element[self.heap[p]], self.idx_of_element[self.heap[idx]] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
idx = p
p = self.get_parent_idx(idx)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
self.idx_of_element[self.heap[0]], self.idx_of_element[self.heap[-1]] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
x = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap)
return x
def insert(self, node):
self.heap.append(node)
self.idx_of_element[node] = len(self.heap) - 1
self.heap_dict[node.name] = node.val
self.sift_up(len(self.heap) - 1)
def is_empty(self):
return len(self.heap) == 0
def decrease_key(self, node, new_value):
assert self.heap[self.idx_of_element[node]].val > new_value, (
"newValue must be less that current value"
)
node.val = new_value
self.heap_dict[node.name] = new_value
self.sift_up(self.idx_of_element[node])
# USAGE
r = Node("R", -1)
b = Node("B", 6)
a = Node("A", 3)
x = Node("X", 1)
e = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
my_min_heap = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/max_heap.py | data_structures/heap/max_heap.py | class BinaryHeap:
"""
A max-heap implementation in Python
>>> binary_heap = BinaryHeap()
>>> binary_heap.insert(6)
>>> binary_heap.insert(10)
>>> binary_heap.insert(15)
>>> binary_heap.insert(12)
>>> binary_heap.pop()
15
>>> binary_heap.pop()
12
>>> binary_heap.get_list
[10, 6]
>>> len(binary_heap)
2
"""
def __init__(self):
self.__heap = [0]
self.__size = 0
def __swap_up(self, i: int) -> None:
"""Swap the element up"""
temporary = self.__heap[i]
while i // 2 > 0:
if self.__heap[i] > self.__heap[i // 2]:
self.__heap[i] = self.__heap[i // 2]
self.__heap[i // 2] = temporary
i //= 2
def insert(self, value: int) -> None:
"""Insert new element"""
self.__heap.append(value)
self.__size += 1
self.__swap_up(self.__size)
def __swap_down(self, i: int) -> None:
"""Swap the element down"""
while self.__size >= 2 * i:
if 2 * i + 1 > self.__size: # noqa: SIM114
bigger_child = 2 * i
elif self.__heap[2 * i] > self.__heap[2 * i + 1]:
bigger_child = 2 * i
else:
bigger_child = 2 * i + 1
temporary = self.__heap[i]
if self.__heap[i] < self.__heap[bigger_child]:
self.__heap[i] = self.__heap[bigger_child]
self.__heap[bigger_child] = temporary
i = bigger_child
def pop(self) -> int:
"""Pop the root element"""
max_value = self.__heap[1]
self.__heap[1] = self.__heap[self.__size]
self.__size -= 1
self.__heap.pop()
self.__swap_down(1)
return max_value
@property
def get_list(self):
return self.__heap[1:]
def __len__(self):
"""Length of the array"""
return self.__size
if __name__ == "__main__":
import doctest
doctest.testmod()
# create an instance of BinaryHeap
binary_heap = BinaryHeap()
binary_heap.insert(6)
binary_heap.insert(10)
binary_heap.insert(15)
binary_heap.insert(12)
# pop root(max-values because it is max heap)
print(binary_heap.pop()) # 15
print(binary_heap.pop()) # 12
# get the list and size after operations
print(binary_heap.get_list)
print(len(binary_heap))
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/__init__.py | data_structures/heap/__init__.py | python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false | |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/heap/skew_heap.py | data_structures/heap/skew_heap.py | #!/usr/bin/env python3
from __future__ import annotations
from collections.abc import Iterable, Iterator
from typing import Any, TypeVar
T = TypeVar("T", bound=bool)
class SkewNode[T: bool]:
"""
One node of the skew heap. Contains the value and references to
two children.
"""
def __init__(self, value: T) -> None:
self._value: T = value
self.left: SkewNode[T] | None = None
self.right: SkewNode[T] | None = None
@property
def value(self) -> T:
"""
Return the value of the node.
>>> SkewNode(0).value
0
>>> SkewNode(3.14159).value
3.14159
>>> SkewNode("hello").value
'hello'
>>> SkewNode(None).value
>>> SkewNode(True).value
True
>>> SkewNode([]).value
[]
>>> SkewNode({}).value
{}
>>> SkewNode(set()).value
set()
>>> SkewNode(0.0).value
0.0
>>> SkewNode(-1e-10).value
-1e-10
>>> SkewNode(10).value
10
>>> SkewNode(-10.5).value
-10.5
>>> SkewNode().value
Traceback (most recent call last):
...
TypeError: SkewNode.__init__() missing 1 required positional argument: 'value'
"""
return self._value
@staticmethod
def merge(
root1: SkewNode[T] | None, root2: SkewNode[T] | None
) -> SkewNode[T] | None:
"""
Merge 2 nodes together.
>>> SkewNode.merge(SkewNode(10),SkewNode(-10.5)).value
-10.5
>>> SkewNode.merge(SkewNode(10),SkewNode(10.5)).value
10
>>> SkewNode.merge(SkewNode(10),SkewNode(10)).value
10
>>> SkewNode.merge(SkewNode(-100),SkewNode(-10.5)).value
-100
"""
if not root1:
return root2
if not root2:
return root1
if root1.value > root2.value:
root1, root2 = root2, root1
result = root1
temp = root1.right
result.right = root1.left
result.left = SkewNode.merge(temp, root2)
return result
class SkewHeap[T: bool]:
"""
A data structure that allows inserting a new value and to pop the smallest
values. Both operations take O(logN) time where N is the size of the
structure.
Wiki: https://en.wikipedia.org/wiki/Skew_heap
Visualization: https://www.cs.usfca.edu/~galles/visualization/SkewHeap.html
>>> list(SkewHeap([2, 3, 1, 5, 1, 7]))
[1, 1, 2, 3, 5, 7]
>>> sh = SkewHeap()
>>> sh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
>>> sh.insert(1)
>>> sh.insert(-1)
>>> sh.insert(0)
>>> list(sh)
[-1, 0, 1]
"""
def __init__(self, data: Iterable[T] | None = ()) -> None:
"""
>>> sh = SkewHeap([3, 1, 3, 7])
>>> list(sh)
[1, 3, 3, 7]
"""
self._root: SkewNode[T] | None = None
if data:
for item in data:
self.insert(item)
def __bool__(self) -> bool:
"""
Check if the heap is not empty.
>>> sh = SkewHeap()
>>> bool(sh)
False
>>> sh.insert(1)
>>> bool(sh)
True
>>> sh.clear()
>>> bool(sh)
False
"""
return self._root is not None
def __iter__(self) -> Iterator[T]:
"""
Returns sorted list containing all the values in the heap.
>>> sh = SkewHeap([3, 1, 3, 7])
>>> list(sh)
[1, 3, 3, 7]
"""
result: list[Any] = []
while self:
result.append(self.pop())
# Pushing items back to the heap not to clear it.
for item in result:
self.insert(item)
return iter(result)
def insert(self, value: T) -> None:
"""
Insert the value into the heap.
>>> sh = SkewHeap()
>>> sh.insert(3)
>>> sh.insert(1)
>>> sh.insert(3)
>>> sh.insert(7)
>>> list(sh)
[1, 3, 3, 7]
"""
self._root = SkewNode.merge(self._root, SkewNode(value))
def pop(self) -> T | None:
"""
Pop the smallest value from the heap and return it.
>>> sh = SkewHeap([3, 1, 3, 7])
>>> sh.pop()
1
>>> sh.pop()
3
>>> sh.pop()
3
>>> sh.pop()
7
>>> sh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
"""
result = self.top()
self._root = (
SkewNode.merge(self._root.left, self._root.right) if self._root else None
)
return result
def top(self) -> T:
"""
Return the smallest value from the heap.
>>> sh = SkewHeap()
>>> sh.insert(3)
>>> sh.top()
3
>>> sh.insert(1)
>>> sh.top()
1
>>> sh.insert(3)
>>> sh.top()
1
>>> sh.insert(7)
>>> sh.top()
1
"""
if not self._root:
raise IndexError("Can't get top element for the empty heap.")
return self._root.value
def clear(self) -> None:
"""
Clear the heap.
>>> sh = SkewHeap([3, 1, 3, 7])
>>> sh.clear()
>>> sh.pop()
Traceback (most recent call last):
...
IndexError: Can't get top element for the empty heap.
"""
self._root = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
TheAlgorithms/Python | https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/data_structures/kd_tree/build_kdtree.py | data_structures/kd_tree/build_kdtree.py | # Created by: Ramy-Badr-Ahmed (https://github.com/Ramy-Badr-Ahmed)
# in Pull Request: #11532
# https://github.com/TheAlgorithms/Python/pull/11532
#
# Please mention me (@Ramy-Badr-Ahmed) in any issue or pull request
# addressing bugs/corrections to this file.
# Thank you!
from data_structures.kd_tree.kd_node import KDNode
def build_kdtree(points: list[list[float]], depth: int = 0) -> KDNode | None:
"""
Builds a KD-Tree from a list of points.
Args:
points: The list of points to build the KD-Tree from.
depth: The current depth in the tree
(used to determine axis for splitting).
Returns:
The root node of the KD-Tree,
or None if no points are provided.
"""
if not points:
return None
k = len(points[0]) # Dimensionality of the points
axis = depth % k
# Sort point list and choose median as pivot element
points.sort(key=lambda point: point[axis])
median_idx = len(points) // 2
# Create node and construct subtrees
left_points = points[:median_idx]
right_points = points[median_idx + 1 :]
return KDNode(
point=points[median_idx],
left=build_kdtree(left_points, depth + 1),
right=build_kdtree(right_points, depth + 1),
)
| python | MIT | 2c15b8c54eb8130e83640fe1d911c10eb6cd70d4 | 2026-01-04T14:38:15.231112Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.