repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/recursive_quick_sort.py
sorts/recursive_quick_sort.py
def quick_sort(data: list) -> list: """ >>> for data in ([2, 1, 0], [2.2, 1.1, 0], "quick_sort"): ... quick_sort(data) == sorted(data) True True True """ if len(data) <= 1: return data else: return [ *quick_sort([e for e in data[1:] if e <= data[0]]), data[0], *quick_sort([e for e in data[1:] if e > data[0]]), ] if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/bubble_sort.py
sorts/bubble_sort.py
from typing import Any def bubble_sort_iterative(collection: list[Any]) -> list[Any]: """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bubble_sort_iterative([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] >>> bubble_sort_iterative([]) [] >>> bubble_sort_iterative([-2, -45, -5]) [-45, -5, -2] >>> bubble_sort_iterative([-23, 0, 6, -4, 34]) [-23, -4, 0, 6, 34] >>> bubble_sort_iterative([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True >>> bubble_sort_iterative([]) == sorted([]) True >>> bubble_sort_iterative([-2, -45, -5]) == sorted([-2, -45, -5]) True >>> bubble_sort_iterative([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) True >>> bubble_sort_iterative(['d', 'a', 'b', 'e']) == sorted(['d', 'a', 'b', 'e']) True >>> bubble_sort_iterative(['z', 'a', 'y', 'b', 'x', 'c']) ['a', 'b', 'c', 'x', 'y', 'z'] >>> bubble_sort_iterative([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] >>> bubble_sort_iterative([1, 3.3, 5, 7.7, 2, 4.4, 6]) [1, 2, 3.3, 4.4, 5, 6, 7.7] >>> import random >>> collection_arg = random.sample(range(-50, 50), 100) >>> bubble_sort_iterative(collection_arg) == sorted(collection_arg) True >>> import string >>> collection_arg = random.choices(string.ascii_letters + string.digits, k=100) >>> bubble_sort_iterative(collection_arg) == sorted(collection_arg) True """ length = len(collection) for i in reversed(range(length)): swapped = False for j in range(i): if collection[j] > collection[j + 1]: swapped = True collection[j], collection[j + 1] = collection[j + 1], collection[j] if not swapped: break # Stop iteration if the collection is sorted. return collection def bubble_sort_recursive(collection: list[Any]) -> list[Any]: """It is similar iterative bubble sort but recursive. :param collection: mutable ordered sequence of elements :return: the same list in ascending order Examples: >>> bubble_sort_recursive([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] >>> bubble_sort_iterative([]) [] >>> bubble_sort_recursive([-2, -45, -5]) [-45, -5, -2] >>> bubble_sort_recursive([-23, 0, 6, -4, 34]) [-23, -4, 0, 6, 34] >>> bubble_sort_recursive([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True >>> bubble_sort_recursive([]) == sorted([]) True >>> bubble_sort_recursive([-2, -45, -5]) == sorted([-2, -45, -5]) True >>> bubble_sort_recursive([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) True >>> bubble_sort_recursive(['d', 'a', 'b', 'e']) == sorted(['d', 'a', 'b', 'e']) True >>> bubble_sort_recursive(['z', 'a', 'y', 'b', 'x', 'c']) ['a', 'b', 'c', 'x', 'y', 'z'] >>> bubble_sort_recursive([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] >>> bubble_sort_recursive([1, 3.3, 5, 7.7, 2, 4.4, 6]) [1, 2, 3.3, 4.4, 5, 6, 7.7] >>> bubble_sort_recursive(['a', 'Z', 'B', 'C', 'A', 'c']) ['A', 'B', 'C', 'Z', 'a', 'c'] >>> import random >>> collection_arg = random.sample(range(-50, 50), 100) >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) True >>> import string >>> collection_arg = random.choices(string.ascii_letters + string.digits, k=100) >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) True """ length = len(collection) swapped = False for i in range(length - 1): if collection[i] > collection[i + 1]: collection[i], collection[i + 1] = collection[i + 1], collection[i] swapped = True return collection if not swapped else bubble_sort_recursive(collection) if __name__ == "__main__": import doctest from random import sample from timeit import timeit doctest.testmod() # Benchmark: Iterative seems slightly faster than recursive. num_runs = 10_000 unsorted = sample(range(-50, 50), 100) timer_iterative = timeit( "bubble_sort_iterative(unsorted[:])", globals=globals(), number=num_runs ) print("\nIterative bubble sort:") print(*bubble_sort_iterative(unsorted), sep=",") print(f"Processing time (iterative): {timer_iterative:.5f}s for {num_runs:,} runs") unsorted = sample(range(-50, 50), 100) timer_recursive = timeit( "bubble_sort_recursive(unsorted[:])", globals=globals(), number=num_runs ) print("\nRecursive bubble sort:") print(*bubble_sort_recursive(unsorted), sep=",") print(f"Processing time (recursive): {timer_recursive:.5f}s for {num_runs:,} runs")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/shrink_shell_sort.py
sorts/shrink_shell_sort.py
""" This function implements the shell sort algorithm which is slightly faster than its pure implementation. This shell sort is implemented using a gap, which shrinks by a certain factor each iteration. In this implementation, the gap is initially set to the length of the collection. The gap is then reduced by a certain factor (1.3) each iteration. For each iteration, the algorithm compares elements that are a certain number of positions apart (determined by the gap). If the element at the higher position is greater than the element at the lower position, the two elements are swapped. The process is repeated until the gap is equal to 1. The reason this is more efficient is that it reduces the number of comparisons that need to be made. By using a smaller gap, the list is sorted more quickly. """ def shell_sort(collection: list) -> list: """Implementation of shell sort algorithm in Python :param collection: Some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending >>> shell_sort([3, 2, 1]) [1, 2, 3] >>> shell_sort([]) [] >>> shell_sort([1]) [1] """ # Choose an initial gap value gap = len(collection) # Set the gap value to be decreased by a factor of 1.3 # after each iteration shrink = 1.3 # Continue sorting until the gap is 1 while gap > 1: # Decrease the gap value gap = int(gap / shrink) # Sort the elements using insertion sort for i in range(gap, len(collection)): temp = collection[i] j = i while j >= gap and collection[j - gap] > temp: collection[j] = collection[j - gap] j -= gap collection[j] = temp return collection if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/merge_insertion_sort.py
sorts/merge_insertion_sort.py
""" This is a pure Python implementation of the merge-insertion sort algorithm Source: https://en.wikipedia.org/wiki/Merge-insertion_sort For doctests run following command: python3 -m doctest -v merge_insertion_sort.py or python -m doctest -v merge_insertion_sort.py For manual testing run: python3 merge_insertion_sort.py """ from __future__ import annotations def binary_search_insertion(sorted_list, item): """ >>> binary_search_insertion([1, 2, 7, 9, 10], 4) [1, 2, 4, 7, 9, 10] """ left = 0 right = len(sorted_list) - 1 while left <= right: middle = (left + right) // 2 if left == right: if sorted_list[middle] < item: left = middle + 1 break elif sorted_list[middle] < item: left = middle + 1 else: right = middle - 1 sorted_list.insert(left, item) return sorted_list def merge(left, right): """ >>> merge([[1, 6], [9, 10]], [[2, 3], [4, 5], [7, 8]]) [[1, 6], [2, 3], [4, 5], [7, 8], [9, 10]] """ result = [] while left and right: if left[0][0] < right[0][0]: result.append(left.pop(0)) else: result.append(right.pop(0)) return result + left + right def sortlist_2d(list_2d): """ >>> sortlist_2d([[9, 10], [1, 6], [7, 8], [2, 3], [4, 5]]) [[1, 6], [2, 3], [4, 5], [7, 8], [9, 10]] """ length = len(list_2d) if length <= 1: return list_2d middle = length // 2 return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:])) def merge_insertion_sort(collection: list[int]) -> list[int]: """Pure implementation of merge-insertion sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> merge_insertion_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_insertion_sort([99]) [99] >>> merge_insertion_sort([-2, -5, -45]) [-45, -5, -2] Testing with all permutations on range(0,5): >>> import itertools >>> permutations = list(itertools.permutations([0, 1, 2, 3, 4])) >>> all(merge_insertion_sort(p) == [0, 1, 2, 3, 4] for p in permutations) True """ if len(collection) <= 1: return collection """ Group the items into two pairs, and leave one element if there is a last odd item. Example: [999, 100, 75, 40, 10000] -> [999, 100], [75, 40]. Leave 10000. """ two_paired_list = [] has_last_odd_item = False for i in range(0, len(collection), 2): if i == len(collection) - 1: has_last_odd_item = True else: """ Sort two-pairs in each groups. Example: [999, 100], [75, 40] -> [100, 999], [40, 75] """ if collection[i] < collection[i + 1]: two_paired_list.append([collection[i], collection[i + 1]]) else: two_paired_list.append([collection[i + 1], collection[i]]) """ Sort two_paired_list. Example: [100, 999], [40, 75] -> [40, 75], [100, 999] """ sorted_list_2d = sortlist_2d(two_paired_list) """ 40 < 100 is sure because it has already been sorted. Generate the sorted_list of them so that you can avoid unnecessary comparison. Example: group0 group1 40 100 75 999 -> group0 group1 [40, 100] 75 999 """ result = [i[0] for i in sorted_list_2d] """ 100 < 999 is sure because it has already been sorted. Put 999 in last of the sorted_list so that you can avoid unnecessary comparison. Example: group0 group1 [40, 100] 75 999 -> group0 group1 [40, 100, 999] 75 """ result.append(sorted_list_2d[-1][1]) """ Insert the last odd item left if there is. Example: group0 group1 [40, 100, 999] 75 -> group0 group1 [40, 100, 999, 10000] 75 """ if has_last_odd_item: pivot = collection[-1] result = binary_search_insertion(result, pivot) """ Insert the remaining items. In this case, 40 < 75 is sure because it has already been sorted. Therefore, you only need to insert 75 into [100, 999, 10000], so that you can avoid unnecessary comparison. Example: group0 group1 [40, 100, 999, 10000] ^ You don't need to compare with this as 40 < 75 is already sure. 75 -> [40, 75, 100, 999, 10000] """ is_last_odd_item_inserted_before_this_index = False for i in range(len(sorted_list_2d) - 1): if result[i] == collection[-1] and has_last_odd_item: is_last_odd_item_inserted_before_this_index = True pivot = sorted_list_2d[i][1] # If last_odd_item is inserted before the item's index, # you should forward index one more. if is_last_odd_item_inserted_before_this_index: result = result[: i + 2] + binary_search_insertion(result[i + 2 :], pivot) else: result = result[: i + 1] + binary_search_insertion(result[i + 1 :], pivot) return result if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] print(merge_insertion_sort(unsorted))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/msd_radix_sort.py
sorts/msd_radix_sort.py
""" Python implementation of the MSD radix sort algorithm. It used the binary representation of the integers to sort them. https://en.wikipedia.org/wiki/Radix_sort """ from __future__ import annotations def msd_radix_sort(list_of_ints: list[int]) -> list[int]: """ Implementation of the MSD radix sort algorithm. Only works with positive integers :param list_of_ints: A list of integers :return: Returns the sorted list >>> msd_radix_sort([40, 12, 1, 100, 4]) [1, 4, 12, 40, 100] >>> msd_radix_sort([]) [] >>> msd_radix_sort([123, 345, 123, 80]) [80, 123, 123, 345] >>> msd_radix_sort([1209, 834598, 1, 540402, 45]) [1, 45, 1209, 540402, 834598] >>> msd_radix_sort([-1, 34, 45]) Traceback (most recent call last): ... ValueError: All numbers must be positive """ if not list_of_ints: return [] if min(list_of_ints) < 0: raise ValueError("All numbers must be positive") most_bits = max(len(bin(x)[2:]) for x in list_of_ints) return _msd_radix_sort(list_of_ints, most_bits) def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]: """ Sort the given list based on the bit at bit_position. Numbers with a 0 at that position will be at the start of the list, numbers with a 1 at the end. :param list_of_ints: A list of integers :param bit_position: the position of the bit that gets compared :return: Returns a partially sorted list >>> _msd_radix_sort([45, 2, 32], 1) [2, 32, 45] >>> _msd_radix_sort([10, 4, 12], 2) [4, 12, 10] """ if bit_position == 0 or len(list_of_ints) in [0, 1]: return list_of_ints zeros = [] ones = [] # Split numbers based on bit at bit_position from the right for number in list_of_ints: if (number >> (bit_position - 1)) & 1: # number has a one at bit bit_position ones.append(number) else: # number has a zero at bit bit_position zeros.append(number) # recursively split both lists further zeros = _msd_radix_sort(zeros, bit_position - 1) ones = _msd_radix_sort(ones, bit_position - 1) # recombine lists res = zeros res.extend(ones) return res def msd_radix_sort_inplace(list_of_ints: list[int]): """ Inplace implementation of the MSD radix sort algorithm. Sorts based on the binary representation of the integers. >>> lst = [1, 345, 23, 89, 0, 3] >>> msd_radix_sort_inplace(lst) >>> lst == sorted(lst) True >>> lst = [1, 43, 0, 0, 0, 24, 3, 3] >>> msd_radix_sort_inplace(lst) >>> lst == sorted(lst) True >>> lst = [] >>> msd_radix_sort_inplace(lst) >>> lst == [] True >>> lst = [-1, 34, 23, 4, -42] >>> msd_radix_sort_inplace(lst) Traceback (most recent call last): ... ValueError: All numbers must be positive """ length = len(list_of_ints) if not list_of_ints or length == 1: return if min(list_of_ints) < 0: raise ValueError("All numbers must be positive") most_bits = max(len(bin(x)[2:]) for x in list_of_ints) _msd_radix_sort_inplace(list_of_ints, most_bits, 0, length) def _msd_radix_sort_inplace( list_of_ints: list[int], bit_position: int, begin_index: int, end_index: int ): """ Sort the given list based on the bit at bit_position. Numbers with a 0 at that position will be at the start of the list, numbers with a 1 at the end. >>> lst = [45, 2, 32, 24, 534, 2932] >>> _msd_radix_sort_inplace(lst, 1, 0, 3) >>> lst == [32, 2, 45, 24, 534, 2932] True >>> lst = [0, 2, 1, 3, 12, 10, 4, 90, 54, 2323, 756] >>> _msd_radix_sort_inplace(lst, 2, 4, 7) >>> lst == [0, 2, 1, 3, 12, 4, 10, 90, 54, 2323, 756] True """ if bit_position == 0 or end_index - begin_index <= 1: return bit_position -= 1 i = begin_index j = end_index - 1 while i <= j: changed = False if not (list_of_ints[i] >> bit_position) & 1: # found zero at the beginning i += 1 changed = True if (list_of_ints[j] >> bit_position) & 1: # found one at the end j -= 1 changed = True if changed: continue list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i] j -= 1 if j != i: i += 1 _msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i) _msd_radix_sort_inplace(list_of_ints, bit_position, i, end_index) if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/gnome_sort.py
sorts/gnome_sort.py
""" Gnome Sort Algorithm (A.K.A. Stupid Sort) This algorithm iterates over a list comparing an element with the previous one. If order is not respected, it swaps element backward until order is respected with previous element. It resumes the initial iteration from element new position. For doctests run following command: python3 -m doctest -v gnome_sort.py For manual testing run: python3 gnome_sort.py """ def gnome_sort(lst: list) -> list: """ Pure implementation of the gnome sort algorithm in Python Take some mutable ordered collection with heterogeneous comparable items inside as arguments, return the same collection ordered by ascending. Examples: >>> gnome_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> gnome_sort([]) [] >>> gnome_sort([-2, -5, -45]) [-45, -5, -2] >>> "".join(gnome_sort(list(set("Gnomes are stupid!")))) ' !Gadeimnoprstu' """ if len(lst) <= 1: return lst i = 1 while i < len(lst): if lst[i - 1] <= lst[i]: i += 1 else: lst[i - 1], lst[i] = lst[i], lst[i - 1] i -= 1 if i == 0: i = 1 return lst if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/sorts/cocktail_shaker_sort.py
sorts/cocktail_shaker_sort.py
""" An implementation of the cocktail shaker sort algorithm in pure Python. https://en.wikipedia.org/wiki/Cocktail_shaker_sort """ def cocktail_shaker_sort(arr: list[int]) -> list[int]: """ Sorts a list using the Cocktail Shaker Sort algorithm. :param arr: List of elements to be sorted. :return: Sorted list. >>> cocktail_shaker_sort([4, 5, 2, 1, 2]) [1, 2, 2, 4, 5] >>> cocktail_shaker_sort([-4, 5, 0, 1, 2, 11]) [-4, 0, 1, 2, 5, 11] >>> cocktail_shaker_sort([0.1, -2.4, 4.4, 2.2]) [-2.4, 0.1, 2.2, 4.4] >>> cocktail_shaker_sort([1, 2, 3, 4, 5]) [1, 2, 3, 4, 5] >>> cocktail_shaker_sort([-4, -5, -24, -7, -11]) [-24, -11, -7, -5, -4] >>> cocktail_shaker_sort(["elderberry", "banana", "date", "apple", "cherry"]) ['apple', 'banana', 'cherry', 'date', 'elderberry'] >>> cocktail_shaker_sort((-4, -5, -24, -7, -11)) Traceback (most recent call last): ... TypeError: 'tuple' object does not support item assignment """ start, end = 0, len(arr) - 1 while start < end: swapped = False # Pass from left to right for i in range(start, end): if arr[i] > arr[i + 1]: arr[i], arr[i + 1] = arr[i + 1], arr[i] swapped = True if not swapped: break end -= 1 # Decrease the end pointer after each pass # Pass from right to left for i in range(end, start, -1): if arr[i] < arr[i - 1]: arr[i], arr[i - 1] = arr[i - 1], arr[i] swapped = True if not swapped: break start += 1 # Increase the start pointer after each pass return arr if __name__ == "__main__": import doctest doctest.testmod() user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] print(f"{cocktail_shaker_sort(unsorted) = }")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/cnn_classification.py
computer_vision/cnn_classification.py
""" Convolutional Neural Network Objective : To train a CNN model detect if TB is present in Lung X-ray or not. Resources CNN Theory : https://en.wikipedia.org/wiki/Convolutional_neural_network Resources Tensorflow : https://www.tensorflow.org/tutorials/images/cnn Download dataset from : https://lhncbc.nlm.nih.gov/LHC-publications/pubs/TuberculosisChestXrayImageDataSets.html 1. Download the dataset folder and create two folder training set and test set in the parent dataset folder 2. Move 30-40 image from both TB positive and TB Negative folder in the test set folder 3. The labels of the images will be extracted from the folder name the image is present in. """ # Part 1 - Building the CNN import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) classifier = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPooling2D(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.Conv2D(32, (3, 3), activation="relu")) classifier.add(layers.MaxPooling2D(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') train_datagen = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) training_set = train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) test_set = test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions test_image = tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) test_image = tf.keras.preprocessing.image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) result = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: prediction = "Normal" if result[0][0] == 1: prediction = "Abnormality detected"
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/intensity_based_segmentation.py
computer_vision/intensity_based_segmentation.py
# Source: "https://www.ijcse.com/docs/IJCSE11-02-03-117.pdf" # Importing necessary libraries import matplotlib.pyplot as plt import numpy as np from PIL import Image def segment_image(image: np.ndarray, thresholds: list[int]) -> np.ndarray: """ Performs image segmentation based on intensity thresholds. Args: image: Input grayscale image as a 2D array. thresholds: Intensity thresholds to define segments. Returns: A labeled 2D array where each region corresponds to a threshold range. Example: >>> img = np.array([[80, 120, 180], [40, 90, 150], [20, 60, 100]]) >>> segment_image(img, [50, 100, 150]) array([[1, 2, 3], [0, 1, 2], [0, 1, 1]], dtype=int32) """ # Initialize segmented array with zeros segmented = np.zeros_like(image, dtype=np.int32) # Assign labels based on thresholds for i, threshold in enumerate(thresholds): segmented[image > threshold] = i + 1 return segmented if __name__ == "__main__": # Load the image image_path = "path_to_image" # Replace with your image path original_image = Image.open(image_path).convert("L") image_array = np.array(original_image) # Define thresholds thresholds = [50, 100, 150, 200] # Perform segmentation segmented_image = segment_image(image_array, thresholds) # Display the results plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.title("Original Image") plt.imshow(image_array, cmap="gray") plt.axis("off") plt.subplot(1, 2, 2) plt.title("Segmented Image") plt.imshow(segmented_image, cmap="tab20") plt.axis("off") plt.show()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/flip_augmentation.py
computer_vision/flip_augmentation.py
import glob import os import random from string import ascii_lowercase, digits import cv2 """ Flip image and bounding box for computer vision task https://paperswithcode.com/method/randomhorizontalflip """ # Params LABEL_DIR = "" IMAGE_DIR = "" OUTPUT_DIR = "" FLIP_TYPE = 1 # (0 is vertical, 1 is horizontal) def main() -> None: """ Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. """ img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR) print("Processing...") new_images, new_annos, paths = update_image_and_anno(img_paths, annos, FLIP_TYPE) for index, image in enumerate(new_images): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' letter_code = random_chars(32) file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cv2.imwrite(f"{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85]) print(f"Success {index + 1}/{len(new_images)} with {file_name}") annos_list = [] for anno in new_annos[index]: obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(obj) with open(f"{file_root}.txt", "w") as outfile: outfile.write("\n".join(line for line in annos_list)) def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: """ - label_dir <type: str>: Path to label include annotation of images - img_dir <type: str>: Path to folder contain images Return <type: list>: List of images path and labels """ img_paths = [] labels = [] for label_file in glob.glob(os.path.join(label_dir, "*.txt")): label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0] with open(label_file) as in_file: obj_lists = in_file.readlines() img_path = os.path.join(img_dir, f"{label_name}.jpg") boxes = [] for obj_list in obj_lists: obj = obj_list.rstrip("\n").split(" ") boxes.append( [ int(obj[0]), float(obj[1]), float(obj[2]), float(obj[3]), float(obj[4]), ] ) if not boxes: continue img_paths.append(img_path) labels.append(boxes) return img_paths, labels def update_image_and_anno( img_list: list, anno_list: list, flip_type: int = 1 ) -> tuple[list, list, list]: """ - img_list <type: list>: list of all images - anno_list <type: list>: list of all annotations of specific image - flip_type <type: int>: 0 is vertical, 1 is horizontal Return: - new_imgs_list <type: narray>: image after resize - new_annos_lists <type: list>: list of new annotation after scale - path_list <type: list>: list the name of image file """ new_annos_lists = [] path_list = [] new_imgs_list = [] for idx in range(len(img_list)): new_annos = [] path = img_list[idx] path_list.append(path) img_annos = anno_list[idx] img = cv2.imread(path) if flip_type == 1: new_img = cv2.flip(img, flip_type) for bbox in img_annos: x_center_new = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]]) elif flip_type == 0: new_img = cv2.flip(img, flip_type) for bbox in img_annos: y_center_new = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]]) new_annos_lists.append(new_annos) new_imgs_list.append(new_img) return new_imgs_list, new_annos_lists, path_list def random_chars(number_char: int = 32) -> str: """ Automatic generate random 32 characters. Get random string code: '7b7ad245cdff75241935e4dd860f3bad' >>> len(random_chars(32)) 32 """ assert number_char > 1, "The number of character should greater than 1" letter_code = ascii_lowercase + digits return "".join(random.choice(letter_code) for _ in range(number_char)) if __name__ == "__main__": main() print("DONE ✅")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/harris_corner.py
computer_vision/harris_corner.py
import cv2 import numpy as np """ Harris Corner Detector https://en.wikipedia.org/wiki/Harris_Corner_Detector """ class HarrisCorner: def __init__(self, k: float, window_size: int): """ k : is an empirically determined constant in [0.04,0.06] window_size : neighbourhoods considered """ if k in (0.04, 0.06): self.k = k self.window_size = window_size else: raise ValueError("invalid k value") def __str__(self) -> str: return str(self.k) def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]: """ Returns the image with corners identified img_path : path of the image output : list of the corner positions, image """ img = cv2.imread(img_path, 0) h, w = img.shape corner_list: list[list[int]] = [] color_img = img.copy() color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB) dy, dx = np.gradient(img) ixx = dx**2 iyy = dy**2 ixy = dx * dy k = 0.04 offset = self.window_size // 2 for y in range(offset, h - offset): for x in range(offset, w - offset): wxx = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() wyy = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() wxy = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() det = (wxx * wyy) - (wxy**2) trace = wxx + wyy r = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0), 0) color_img.itemset((y, x, 1), 0) color_img.itemset((y, x, 2), 255) return color_img, corner_list if __name__ == "__main__": edge_detect = HarrisCorner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/mean_threshold.py
computer_vision/mean_threshold.py
from PIL import Image """ Mean thresholding algorithm for image processing https://en.wikipedia.org/wiki/Thresholding_(image_processing) """ def mean_threshold(image: Image) -> Image: """ image: is a grayscale PIL image object """ height, width = image.size mean = 0 pixels = image.load() for i in range(width): for j in range(height): pixel = pixels[j, i] mean += pixel mean //= width * height for j in range(width): for i in range(height): pixels[i, j] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": image = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/__init__.py
computer_vision/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/haralick_descriptors.py
computer_vision/haralick_descriptors.py
""" https://en.wikipedia.org/wiki/Image_texture https://en.wikipedia.org/wiki/Co-occurrence_matrix#Application_to_image_analysis """ import imageio.v2 as imageio import numpy as np def root_mean_square_error(original: np.ndarray, reference: np.ndarray) -> float: """Simple implementation of Root Mean Squared Error for two N dimensional numpy arrays. Examples: >>> root_mean_square_error(np.array([1, 2, 3]), np.array([1, 2, 3])) 0.0 >>> root_mean_square_error(np.array([1, 2, 3]), np.array([2, 2, 2])) 0.816496580927726 >>> root_mean_square_error(np.array([1, 2, 3]), np.array([6, 4, 2])) 3.1622776601683795 """ return float(np.sqrt(((original - reference) ** 2).mean())) def normalize_image( image: np.ndarray, cap: float = 255.0, data_type: np.dtype = np.uint8 ) -> np.ndarray: """ Normalizes image in Numpy 2D array format, between ranges 0-cap, as to fit uint8 type. Args: image: 2D numpy array representing image as matrix, with values in any range cap: Maximum cap amount for normalization data_type: numpy data type to set output variable to Returns: return 2D numpy array of type uint8, corresponding to limited range matrix Examples: >>> normalize_image(np.array([[1, 2, 3], [4, 5, 10]]), ... cap=1.0, data_type=np.float64) array([[0. , 0.11111111, 0.22222222], [0.33333333, 0.44444444, 1. ]]) >>> normalize_image(np.array([[4, 4, 3], [1, 7, 2]])) array([[127, 127, 85], [ 0, 255, 42]], dtype=uint8) """ normalized = (image - np.min(image)) / (np.max(image) - np.min(image)) * cap return normalized.astype(data_type) def normalize_array(array: np.ndarray, cap: float = 1) -> np.ndarray: """Normalizes a 1D array, between ranges 0-cap. Args: array: List containing values to be normalized between cap range. cap: Maximum cap amount for normalization. Returns: return 1D numpy array, corresponding to limited range array Examples: >>> normalize_array(np.array([2, 3, 5, 7])) array([0. , 0.2, 0.6, 1. ]) >>> normalize_array(np.array([[5], [7], [11], [13]])) array([[0. ], [0.25], [0.75], [1. ]]) """ diff = np.max(array) - np.min(array) return (array - np.min(array)) / (1 if diff == 0 else diff) * cap def grayscale(image: np.ndarray) -> np.ndarray: """ Uses luminance weights to transform RGB channel to greyscale, by taking the dot product between the channel and the weights. Example: >>> grayscale(np.array([[[108, 201, 72], [255, 11, 127]], ... [[56, 56, 56], [128, 255, 107]]])) array([[158, 97], [ 56, 200]], dtype=uint8) """ return np.dot(image[:, :, 0:3], [0.299, 0.587, 0.114]).astype(np.uint8) def binarize(image: np.ndarray, threshold: float = 127.0) -> np.ndarray: """ Binarizes a grayscale image based on a given threshold value, setting values to 1 or 0 accordingly. Examples: >>> binarize(np.array([[128, 255], [101, 156]])) array([[1, 1], [0, 1]]) >>> binarize(np.array([[0.07, 1], [0.51, 0.3]]), threshold=0.5) array([[0, 1], [1, 0]]) """ return np.where(image > threshold, 1, 0) def transform( image: np.ndarray, kind: str, kernel: np.ndarray | None = None ) -> np.ndarray: """ Simple image transformation using one of two available filter functions: Erosion and Dilation. Args: image: binarized input image, onto which to apply transformation kind: Can be either 'erosion', in which case the :func:np.max function is called, or 'dilation', when :func:np.min is used instead. kernel: n x n kernel with shape < :attr:image.shape, to be used when applying convolution to original image Returns: returns a numpy array with same shape as input image, corresponding to applied binary transformation. Examples: >>> img = np.array([[1, 0.5], [0.2, 0.7]]) >>> img = binarize(img, threshold=0.5) >>> transform(img, 'erosion') array([[1, 1], [1, 1]], dtype=uint8) >>> transform(img, 'dilation') array([[0, 0], [0, 0]], dtype=uint8) """ if kernel is None: kernel = np.ones((3, 3)) if kind == "erosion": constant = 1 apply = np.max else: constant = 0 apply = np.min center_x, center_y = (x // 2 for x in kernel.shape) # Use padded image when applying convolution # to not go out of bounds of the original the image transformed = np.zeros(image.shape, dtype=np.uint8) padded = np.pad(image, 1, "constant", constant_values=constant) for x in range(center_x, padded.shape[0] - center_x): for y in range(center_y, padded.shape[1] - center_y): center = padded[ x - center_x : x + center_x + 1, y - center_y : y + center_y + 1 ] # Apply transformation method to the centered section of the image transformed[x - center_x, y - center_y] = apply(center[kernel == 1]) return transformed def opening_filter(image: np.ndarray, kernel: np.ndarray | None = None) -> np.ndarray: """ Opening filter, defined as the sequence of erosion and then a dilation filter on the same image. Examples: >>> img = np.array([[1, 0.5], [0.2, 0.7]]) >>> img = binarize(img, threshold=0.5) >>> opening_filter(img) array([[1, 1], [1, 1]], dtype=uint8) """ if kernel is None: np.ones((3, 3)) return transform(transform(image, "dilation", kernel), "erosion", kernel) def closing_filter(image: np.ndarray, kernel: np.ndarray | None = None) -> np.ndarray: """ Opening filter, defined as the sequence of dilation and then erosion filter on the same image. Examples: >>> img = np.array([[1, 0.5], [0.2, 0.7]]) >>> img = binarize(img, threshold=0.5) >>> closing_filter(img) array([[0, 0], [0, 0]], dtype=uint8) """ if kernel is None: kernel = np.ones((3, 3)) return transform(transform(image, "erosion", kernel), "dilation", kernel) def binary_mask( image_gray: np.ndarray, image_map: np.ndarray ) -> tuple[np.ndarray, np.ndarray]: """ Apply binary mask, or thresholding based on bit mask value (mapping mask is binary). Returns the mapped true value mask and its complementary false value mask. Example: >>> img = np.array([[[108, 201, 72], [255, 11, 127]], ... [[56, 56, 56], [128, 255, 107]]]) >>> gray = grayscale(img) >>> binary = binarize(gray) >>> morphological = opening_filter(binary) >>> binary_mask(gray, morphological) (array([[1, 1], [1, 1]], dtype=uint8), array([[158, 97], [ 56, 200]], dtype=uint8)) """ true_mask, false_mask = image_gray.copy(), image_gray.copy() true_mask[image_map == 1] = 1 false_mask[image_map == 0] = 0 return true_mask, false_mask def matrix_concurrency(image: np.ndarray, coordinate: tuple[int, int]) -> np.ndarray: """ Calculate sample co-occurrence matrix based on input image as well as selected coordinates on image. Implementation is made using basic iteration, as function to be performed (np.max) is non-linear and therefore not callable on the frequency domain. Example: >>> img = np.array([[[108, 201, 72], [255, 11, 127]], ... [[56, 56, 56], [128, 255, 107]]]) >>> gray = grayscale(img) >>> binary = binarize(gray) >>> morphological = opening_filter(binary) >>> mask_1 = binary_mask(gray, morphological)[0] >>> matrix_concurrency(mask_1, (0, 1)) array([[0., 0.], [0., 0.]]) """ matrix = np.zeros([np.max(image) + 1, np.max(image) + 1]) offset_x, offset_y = coordinate for x in range(1, image.shape[0] - 1): for y in range(1, image.shape[1] - 1): base_pixel = image[x, y] offset_pixel = image[x + offset_x, y + offset_y] matrix[base_pixel, offset_pixel] += 1 matrix_sum = np.sum(matrix) return matrix / (1 if matrix_sum == 0 else matrix_sum) def haralick_descriptors(matrix: np.ndarray) -> list[float]: """Calculates all 8 Haralick descriptors based on co-occurrence input matrix. All descriptors are as follows: Maximum probability, Inverse Difference, Homogeneity, Entropy, Energy, Dissimilarity, Contrast and Correlation Args: matrix: Co-occurrence matrix to use as base for calculating descriptors. Returns: Reverse ordered list of resulting descriptors Example: >>> img = np.array([[[108, 201, 72], [255, 11, 127]], ... [[56, 56, 56], [128, 255, 107]]]) >>> gray = grayscale(img) >>> binary = binarize(gray) >>> morphological = opening_filter(binary) >>> mask_1 = binary_mask(gray, morphological)[0] >>> concurrency = matrix_concurrency(mask_1, (0, 1)) >>> [float(f) for f in haralick_descriptors(concurrency)] [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] """ # Function np.indices could be used for bigger input types, # but np.ogrid works just fine i, j = np.ogrid[0 : matrix.shape[0], 0 : matrix.shape[1]] # np.indices() # Pre-calculate frequent multiplication and subtraction prod = np.multiply(i, j) sub = np.subtract(i, j) # Calculate numerical value of Maximum Probability maximum_prob = np.max(matrix) # Using the definition for each descriptor individually to calculate its matrix correlation = prod * matrix energy = np.power(matrix, 2) contrast = matrix * np.power(sub, 2) dissimilarity = matrix * np.abs(sub) inverse_difference = matrix / (1 + np.abs(sub)) homogeneity = matrix / (1 + np.power(sub, 2)) entropy = -(matrix[matrix > 0] * np.log(matrix[matrix > 0])) # Sum values for descriptors ranging from the first one to the last, # as all are their respective origin matrix and not the resulting value yet. return [ maximum_prob, correlation.sum(), energy.sum(), contrast.sum(), dissimilarity.sum(), inverse_difference.sum(), homogeneity.sum(), entropy.sum(), ] def get_descriptors( masks: tuple[np.ndarray, np.ndarray], coordinate: tuple[int, int] ) -> np.ndarray: """ Calculate all Haralick descriptors for a sequence of different co-occurrence matrices, given input masks and coordinates. Example: >>> img = np.array([[[108, 201, 72], [255, 11, 127]], ... [[56, 56, 56], [128, 255, 107]]]) >>> gray = grayscale(img) >>> binary = binarize(gray) >>> morphological = opening_filter(binary) >>> get_descriptors(binary_mask(gray, morphological), (0, 1)) array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) """ descriptors = np.array( [haralick_descriptors(matrix_concurrency(mask, coordinate)) for mask in masks] ) # Concatenate each individual descriptor into # one single list containing sequence of descriptors return np.concatenate(descriptors, axis=None) def euclidean(point_1: np.ndarray, point_2: np.ndarray) -> float: """ Simple method for calculating the euclidean distance between two points, with type np.ndarray. Example: >>> a = np.array([1, 0, -2]) >>> b = np.array([2, -1, 1]) >>> euclidean(a, b) 3.3166247903554 """ return float(np.sqrt(np.sum(np.square(point_1 - point_2)))) def get_distances(descriptors: np.ndarray, base: int) -> list[tuple[int, float]]: """ Calculate all Euclidean distances between a selected base descriptor and all other Haralick descriptors The resulting comparison is return in decreasing order, showing which descriptor is the most similar to the selected base. Args: descriptors: Haralick descriptors to compare with base index base: Haralick descriptor index to use as base when calculating respective euclidean distance to other descriptors. Returns: Ordered distances between descriptors Example: >>> index = 1 >>> img = np.array([[[108, 201, 72], [255, 11, 127]], ... [[56, 56, 56], [128, 255, 107]]]) >>> gray = grayscale(img) >>> binary = binarize(gray) >>> morphological = opening_filter(binary) >>> get_distances(get_descriptors( ... binary_mask(gray, morphological), (0, 1)), ... index) [(0, 0.0), (1, 0.0), (2, 0.0), (3, 0.0), (4, 0.0), (5, 0.0), \ (6, 0.0), (7, 0.0), (8, 0.0), (9, 0.0), (10, 0.0), (11, 0.0), (12, 0.0), \ (13, 0.0), (14, 0.0), (15, 0.0)] """ distances = np.array( [euclidean(descriptor, descriptors[base]) for descriptor in descriptors] ) # Normalize distances between range [0, 1] normalized_distances: list[float] = normalize_array(distances, 1).tolist() enum_distances = list(enumerate(normalized_distances)) enum_distances.sort(key=lambda tup: tup[1], reverse=True) return enum_distances if __name__ == "__main__": # Index to compare haralick descriptors to index = int(input()) q_value_list = [int(value) for value in input().split()] q_value = (q_value_list[0], q_value_list[1]) # Format is the respective filter to apply, # can be either 1 for the opening filter or else for the closing parameters = {"format": int(input()), "threshold": int(input())} # Number of images to perform methods on b_number = int(input()) files, descriptors = [], [] for _ in range(b_number): file = input().rstrip() files.append(file) # Open given image and calculate morphological filter, # respective masks and correspondent Harralick Descriptors. image = imageio.imread(file).astype(np.float32) gray = grayscale(image) threshold = binarize(gray, parameters["threshold"]) morphological = ( opening_filter(threshold) if parameters["format"] == 1 else closing_filter(threshold) ) masks = binary_mask(gray, morphological) descriptors.append(get_descriptors(masks, q_value)) # Transform ordered distances array into a sequence of indexes # corresponding to original file position distances = get_distances(np.array(descriptors), index) indexed_distances = np.array(distances).astype(np.uint8)[:, 0] # Finally, print distances considering the Haralick descriptions from the base # file to all other images using the morphology method of choice. print(f"Query: {files[index]}") print("Ranking:") for idx, file_idx in enumerate(indexed_distances): print(f"({idx}) {files[file_idx]}", end="\n")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/horn_schunck.py
computer_vision/horn_schunck.py
""" The Horn-Schunck method estimates the optical flow for every single pixel of a sequence of images. It works by assuming brightness constancy between two consecutive frames and smoothness in the optical flow. Useful resources: Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf """ from typing import SupportsIndex import numpy as np from scipy.ndimage import convolve def warp( image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray ) -> np.ndarray: """ Warps the pixels of an image into a new image using the horizontal and vertical flows. Pixels that are warped from an invalid location are set to 0. Parameters: image: Grayscale image horizontal_flow: Horizontal flow vertical_flow: Vertical flow Returns: Warped image >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \ np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \ np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]])) array([[0, 0, 0], [3, 1, 0], [0, 2, 3]]) """ flow = np.stack((horizontal_flow, vertical_flow), 2) # Create a grid of all pixel coordinates and subtract the flow to get the # target pixels coordinates grid = np.stack( np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2 ) grid = np.round(grid - flow).astype(np.int32) # Find the locations outside of the original image invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]])) grid[invalid] = 0 warped = image[grid[:, :, 1], grid[:, :, 0]] # Set pixels at invalid locations to 0 warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0 return warped def horn_schunck( image0: np.ndarray, image1: np.ndarray, num_iter: SupportsIndex, alpha: float | None = None, ) -> tuple[np.ndarray, np.ndarray]: """ This function performs the Horn-Schunck algorithm and returns the estimated optical flow. It is assumed that the input images are grayscale and normalized to be in [0, 1]. Parameters: image0: First image of the sequence image1: Second image of the sequence alpha: Regularization constant num_iter: Number of iterations performed Returns: estimated horizontal & vertical flow >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \ np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\ astype(np.int32) array([[[ 0, -1, -1], [ 0, -1, -1]], <BLANKLINE> [[ 0, 0, 0], [ 0, 0, 0]]], dtype=int32) """ if alpha is None: alpha = 0.1 # Initialize flow horizontal_flow = np.zeros_like(image0) vertical_flow = np.zeros_like(image0) # Prepare kernels for the calculation of the derivatives and the average velocity kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25 kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25 kernel_t = np.array([[1, 1], [1, 1]]) * 0.25 kernel_laplacian = np.array( [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]] ) # Iteratively refine the flow for _ in range(num_iter): warped_image = warp(image0, horizontal_flow, vertical_flow) derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x) derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y) derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t) avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian) avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian) # This updates the flow as proposed in the paper (Step 12) update = ( derivative_x * avg_horizontal_velocity + derivative_y * avg_vertical_velocity + derivative_t ) update = update / (alpha**2 + derivative_x**2 + derivative_y**2) horizontal_flow = avg_horizontal_velocity - derivative_x * update vertical_flow = avg_vertical_velocity - derivative_y * update return horizontal_flow, vertical_flow if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/mosaic_augmentation.py
computer_vision/mosaic_augmentation.py
"""Source: https://github.com/jason9075/opencv-mosaic-data-aug""" import glob import os import random from string import ascii_lowercase, digits import cv2 import numpy as np # Parameters OUTPUT_SIZE = (720, 1280) # Height, Width SCALE_RANGE = (0.4, 0.6) # if height or width lower than this scale, drop it. FILTER_TINY_SCALE = 1 / 100 LABEL_DIR = "" IMG_DIR = "" OUTPUT_DIR = "" NUMBER_IMAGES = 250 def main() -> None: """ Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. """ img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR) for index in range(NUMBER_IMAGES): idxs = random.sample(range(len(annos)), 4) new_image, new_annos, path = update_image_and_anno( img_paths, annos, idxs, OUTPUT_SIZE, SCALE_RANGE, filter_scale=FILTER_TINY_SCALE, ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' letter_code = random_chars(32) file_name = path.split(os.sep)[-1].rsplit(".", 1)[0] file_root = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cv2.imwrite(f"{file_root}.jpg", new_image, [cv2.IMWRITE_JPEG_QUALITY, 85]) print(f"Succeeded {index + 1}/{NUMBER_IMAGES} with {file_name}") annos_list = [] for anno in new_annos: width = anno[3] - anno[1] height = anno[4] - anno[2] x_center = anno[1] + width / 2 y_center = anno[2] + height / 2 obj = f"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(obj) with open(f"{file_root}.txt", "w") as outfile: outfile.write("\n".join(line for line in annos_list)) def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: """ - label_dir <type: str>: Path to label include annotation of images - img_dir <type: str>: Path to folder contain images Return <type: list>: List of images path and labels """ img_paths = [] labels = [] for label_file in glob.glob(os.path.join(label_dir, "*.txt")): label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0] with open(label_file) as in_file: obj_lists = in_file.readlines() img_path = os.path.join(img_dir, f"{label_name}.jpg") boxes = [] for obj_list in obj_lists: obj = obj_list.rstrip("\n").split(" ") xmin = float(obj[1]) - float(obj[3]) / 2 ymin = float(obj[2]) - float(obj[4]) / 2 xmax = float(obj[1]) + float(obj[3]) / 2 ymax = float(obj[2]) + float(obj[4]) / 2 boxes.append([int(obj[0]), xmin, ymin, xmax, ymax]) if not boxes: continue img_paths.append(img_path) labels.append(boxes) return img_paths, labels def update_image_and_anno( all_img_list: list, all_annos: list, idxs: list[int], output_size: tuple[int, int], scale_range: tuple[float, float], filter_scale: float = 0.0, ) -> tuple[list, list, str]: """ - all_img_list <type: list>: list of all images - all_annos <type: list>: list of all annotations of specific image - idxs <type: list>: index of image in list - output_size <type: tuple>: size of output image (Height, Width) - scale_range <type: tuple>: range of scale image - filter_scale <type: float>: the condition of downscale image and bounding box Return: - output_img <type: narray>: image after resize - new_anno <type: list>: list of new annotation after scale - path[0] <type: string>: get the name of image file """ output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8) scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) scale_y = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) divid_point_x = int(scale_x * output_size[1]) divid_point_y = int(scale_y * output_size[0]) new_anno = [] path_list = [] for i, index in enumerate(idxs): path = all_img_list[index] path_list.append(path) img_annos = all_annos[index] img = cv2.imread(path) if i == 0: # top-left img = cv2.resize(img, (divid_point_x, divid_point_y)) output_img[:divid_point_y, :divid_point_x, :] = img for bbox in img_annos: xmin = bbox[1] * scale_x ymin = bbox[2] * scale_y xmax = bbox[3] * scale_x ymax = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) elif i == 1: # top-right img = cv2.resize(img, (output_size[1] - divid_point_x, divid_point_y)) output_img[:divid_point_y, divid_point_x : output_size[1], :] = img for bbox in img_annos: xmin = scale_x + bbox[1] * (1 - scale_x) ymin = bbox[2] * scale_y xmax = scale_x + bbox[3] * (1 - scale_x) ymax = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) elif i == 2: # bottom-left img = cv2.resize(img, (divid_point_x, output_size[0] - divid_point_y)) output_img[divid_point_y : output_size[0], :divid_point_x, :] = img for bbox in img_annos: xmin = bbox[1] * scale_x ymin = scale_y + bbox[2] * (1 - scale_y) xmax = bbox[3] * scale_x ymax = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) else: # bottom-right img = cv2.resize( img, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) output_img[ divid_point_y : output_size[0], divid_point_x : output_size[1], : ] = img for bbox in img_annos: xmin = scale_x + bbox[1] * (1 - scale_x) ymin = scale_y + bbox[2] * (1 - scale_y) xmax = scale_x + bbox[3] * (1 - scale_x) ymax = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax]) # Remove bounding box small than scale of filter if filter_scale > 0: new_anno = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def random_chars(number_char: int) -> str: """ Automatic generate random 32 characters. Get random string code: '7b7ad245cdff75241935e4dd860f3bad' >>> len(random_chars(32)) 32 """ assert number_char > 1, "The number of character should greater than 1" letter_code = ascii_lowercase + digits return "".join(random.choice(letter_code) for _ in range(number_char)) if __name__ == "__main__": main() print("DONE ✅")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/computer_vision/pooling_functions.py
computer_vision/pooling_functions.py
# Source : https://computersciencewiki.org/index.php/Max-pooling_/_Pooling # Importing the libraries import numpy as np from PIL import Image # Maxpooling Function def maxpooling(arr: np.ndarray, size: int, stride: int) -> np.ndarray: """ This function is used to perform maxpooling on the input array of 2D matrix(image) Args: arr: numpy array size: size of pooling matrix stride: the number of pixels shifts over the input matrix Returns: numpy array of maxpooled matrix Sample Input Output: >>> maxpooling([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], 2, 2) array([[ 6., 8.], [14., 16.]]) >>> maxpooling([[147, 180, 122],[241, 76, 32],[126, 13, 157]], 2, 1) array([[241., 180.], [241., 157.]]) """ arr = np.array(arr) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix") i = 0 j = 0 mat_i = 0 mat_j = 0 # compute the shape of the output matrix maxpool_shape = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape updated_arr = np.zeros((maxpool_shape, maxpool_shape)) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix updated_arr[mat_i][mat_j] = np.max(arr[i : i + size, j : j + size]) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 j = 0 mat_j = 0 return updated_arr # Averagepooling Function def avgpooling(arr: np.ndarray, size: int, stride: int) -> np.ndarray: """ This function is used to perform avgpooling on the input array of 2D matrix(image) Args: arr: numpy array size: size of pooling matrix stride: the number of pixels shifts over the input matrix Returns: numpy array of avgpooled matrix Sample Input Output: >>> avgpooling([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], 2, 2) array([[ 3., 5.], [11., 13.]]) >>> avgpooling([[147, 180, 122],[241, 76, 32],[126, 13, 157]], 2, 1) array([[161., 102.], [114., 69.]]) """ arr = np.array(arr) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix") i = 0 j = 0 mat_i = 0 mat_j = 0 # compute the shape of the output matrix avgpool_shape = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape updated_arr = np.zeros((avgpool_shape, avgpool_shape)) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix updated_arr[mat_i][mat_j] = int(np.average(arr[i : i + size, j : j + size])) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 j = 0 mat_j = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="avgpooling", verbose=True) # Loading the image image = Image.open("path_to_image") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/networking_flow/ford_fulkerson.py
networking_flow/ford_fulkerson.py
""" Ford-Fulkerson Algorithm for Maximum Flow Problem * https://en.wikipedia.org/wiki/Ford%E2%80%93Fulkerson_algorithm Description: (1) Start with initial flow as 0 (2) Choose the augmenting path from source to sink and add the path to flow """ graph = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def breadth_first_search(graph: list, source: int, sink: int, parents: list) -> bool: """ This function returns True if there is a node that has not iterated. Args: graph: Adjacency matrix of graph source: Source sink: Sink parents: Parent list Returns: True if there is a node that has not iterated. >>> breadth_first_search(graph, 0, 5, [-1, -1, -1, -1, -1, -1]) True >>> breadth_first_search(graph, 0, 6, [-1, -1, -1, -1, -1, -1]) Traceback (most recent call last): ... IndexError: list index out of range """ visited = [False] * len(graph) # Mark all nodes as not visited queue = [] # breadth-first search queue # Source node queue.append(source) visited[source] = True while queue: u = queue.pop(0) # Pop the front node # Traverse all adjacent nodes of u for ind, node in enumerate(graph[u]): if visited[ind] is False and node > 0: queue.append(ind) visited[ind] = True parents[ind] = u return visited[sink] def ford_fulkerson(graph: list, source: int, sink: int) -> int: """ This function returns the maximum flow from source to sink in the given graph. CAUTION: This function changes the given graph. Args: graph: Adjacency matrix of graph source: Source sink: Sink Returns: Maximum flow >>> test_graph = [ ... [0, 16, 13, 0, 0, 0], ... [0, 0, 10, 12, 0, 0], ... [0, 4, 0, 0, 14, 0], ... [0, 0, 9, 0, 0, 20], ... [0, 0, 0, 7, 0, 4], ... [0, 0, 0, 0, 0, 0], ... ] >>> ford_fulkerson(test_graph, 0, 5) 23 """ # This array is filled by breadth-first search and to store path parent = [-1] * (len(graph)) max_flow = 0 # While there is a path from source to sink while breadth_first_search(graph, source, sink, parent): path_flow = int(1e9) # Infinite value s = sink while s != source: # Find the minimum value in the selected path path_flow = min(path_flow, graph[parent[s]][s]) s = parent[s] max_flow += path_flow v = sink while v != source: u = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow v = parent[v] return max_flow if __name__ == "__main__": from doctest import testmod testmod() print(f"{ford_fulkerson(graph, source=0, sink=5) = }")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/networking_flow/minimum_cut.py
networking_flow/minimum_cut.py
# Minimum cut on Ford_Fulkerson algorithm. test_graph = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def bfs(graph, s, t, parent): # Return True if there is node that has not iterated. visited = [False] * len(graph) queue = [s] visited[s] = True while queue: u = queue.pop(0) for ind in range(len(graph[u])): if visited[ind] is False and graph[u][ind] > 0: queue.append(ind) visited[ind] = True parent[ind] = u return visited[t] def mincut(graph, source, sink): """This array is filled by BFS and to store path >>> mincut(test_graph, source=0, sink=5) [(1, 3), (4, 3), (4, 5)] """ parent = [-1] * (len(graph)) max_flow = 0 res = [] temp = [i[:] for i in graph] # Record original cut, copy. while bfs(graph, source, sink, parent): path_flow = float("Inf") s = sink while s != source: # Find the minimum value in select path path_flow = min(path_flow, graph[parent[s]][s]) s = parent[s] max_flow += path_flow v = sink while v != source: u = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow v = parent[v] for i in range(len(graph)): for j in range(len(graph[0])): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j)) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/networking_flow/__init__.py
networking_flow/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/or_gate.py
boolean_algebra/or_gate.py
""" An OR Gate is a logic gate in boolean algebra which results to 0 (False) if both the inputs are 0, and 1 (True) otherwise. Following is the truth table of an AND Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 0 | | 0 | 1 | 1 | | 1 | 0 | 1 | | 1 | 1 | 1 | ------------------------------ Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ """ def or_gate(input_1: int, input_2: int) -> int: """ Calculate OR of the input values >>> or_gate(0, 0) 0 >>> or_gate(0, 1) 1 >>> or_gate(1, 0) 1 >>> or_gate(1, 1) 1 """ return int((input_1, input_2).count(1) != 0) if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/xor_gate.py
boolean_algebra/xor_gate.py
""" A XOR Gate is a logic gate in boolean algebra which results to 1 (True) if only one of the two inputs is 1, and 0 (False) if an even number of inputs are 1. Following is the truth table of a XOR Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 0 | | 0 | 1 | 1 | | 1 | 0 | 1 | | 1 | 1 | 0 | ------------------------------ Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ """ def xor_gate(input_1: int, input_2: int) -> int: """ calculate xor of the input values >>> xor_gate(0, 0) 0 >>> xor_gate(0, 1) 1 >>> xor_gate(1, 0) 1 >>> xor_gate(1, 1) 0 """ return (input_1, input_2).count(0) % 2 if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/xnor_gate.py
boolean_algebra/xnor_gate.py
""" A XNOR Gate is a logic gate in boolean algebra which results to 0 (False) if both the inputs are different, and 1 (True), if the inputs are same. It's similar to adding a NOT gate to an XOR gate Following is the truth table of a XNOR Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 1 | | 0 | 1 | 0 | | 1 | 0 | 0 | | 1 | 1 | 1 | ------------------------------ Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ """ def xnor_gate(input_1: int, input_2: int) -> int: """ Calculate XOR of the input values >>> xnor_gate(0, 0) 1 >>> xnor_gate(0, 1) 0 >>> xnor_gate(1, 0) 0 >>> xnor_gate(1, 1) 1 """ return 1 if input_1 == input_2 else 0 if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/karnaugh_map_simplification.py
boolean_algebra/karnaugh_map_simplification.py
""" https://en.wikipedia.org/wiki/Karnaugh_map https://www.allaboutcircuits.com/technical-articles/karnaugh-map-boolean-algebraic-simplification-technique """ def simplify_kmap(kmap: list[list[int]]) -> str: """ Simplify the Karnaugh map. >>> simplify_kmap(kmap=[[0, 1], [1, 1]]) "A'B + AB' + AB" >>> simplify_kmap(kmap=[[0, 0], [0, 0]]) '' >>> simplify_kmap(kmap=[[0, 1], [1, -1]]) "A'B + AB' + AB" >>> simplify_kmap(kmap=[[0, 1], [1, 2]]) "A'B + AB' + AB" >>> simplify_kmap(kmap=[[0, 1], [1, 1.1]]) "A'B + AB' + AB" >>> simplify_kmap(kmap=[[0, 1], [1, 'a']]) "A'B + AB' + AB" """ simplified_f = [] for a, row in enumerate(kmap): for b, item in enumerate(row): if item: term = ("A" if a else "A'") + ("B" if b else "B'") simplified_f.append(term) return " + ".join(simplified_f) def main() -> None: """ Main function to create and simplify a K-Map. >>> main() [0, 1] [1, 1] Simplified Expression: A'B + AB' + AB """ kmap = [[0, 1], [1, 1]] # Manually generate the product of [0, 1] and [0, 1] for row in kmap: print(row) print("Simplified Expression:") print(simplify_kmap(kmap)) if __name__ == "__main__": main() print(f"{simplify_kmap(kmap=[[0, 1], [1, 1]]) = }")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/imply_gate.py
boolean_algebra/imply_gate.py
""" An IMPLY Gate is a logic gate in boolean algebra which results to 1 if either input 1 is 0, or if input 1 is 1, then the output is 1 only if input 2 is 1. It is true if input 1 implies input 2. Following is the truth table of an IMPLY Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 1 | | 0 | 1 | 1 | | 1 | 0 | 0 | | 1 | 1 | 1 | ------------------------------ Refer - https://en.wikipedia.org/wiki/IMPLY_gate """ def imply_gate(input_1: int, input_2: int) -> int: """ Calculate IMPLY of the input values >>> imply_gate(0, 0) 1 >>> imply_gate(0, 1) 1 >>> imply_gate(1, 0) 0 >>> imply_gate(1, 1) 1 """ return int(input_1 == 0 or input_2 == 1) def recursive_imply_list(input_list: list[int]) -> int: """ Recursively calculates the implication of a list. Strictly the implication is applied consecutively left to right: ( (a -> b) -> c ) -> d ... >>> recursive_imply_list([]) Traceback (most recent call last): ... ValueError: Input list must contain at least two elements >>> recursive_imply_list([0]) Traceback (most recent call last): ... ValueError: Input list must contain at least two elements >>> recursive_imply_list([1]) Traceback (most recent call last): ... ValueError: Input list must contain at least two elements >>> recursive_imply_list([0, 0]) 1 >>> recursive_imply_list([0, 1]) 1 >>> recursive_imply_list([1, 0]) 0 >>> recursive_imply_list([1, 1]) 1 >>> recursive_imply_list([0, 0, 0]) 0 >>> recursive_imply_list([0, 0, 1]) 1 >>> recursive_imply_list([0, 1, 0]) 0 >>> recursive_imply_list([0, 1, 1]) 1 >>> recursive_imply_list([1, 0, 0]) 1 >>> recursive_imply_list([1, 0, 1]) 1 >>> recursive_imply_list([1, 1, 0]) 0 >>> recursive_imply_list([1, 1, 1]) 1 """ if len(input_list) < 2: raise ValueError("Input list must contain at least two elements") first_implication = imply_gate(input_list[0], input_list[1]) if len(input_list) == 2: return first_implication new_list = [first_implication, *input_list[2:]] return recursive_imply_list(new_list) if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/quine_mc_cluskey.py
boolean_algebra/quine_mc_cluskey.py
from __future__ import annotations from collections.abc import Sequence from typing import Literal def compare_string(string1: str, string2: str) -> str | Literal[False]: """ >>> compare_string('0010','0110') '0_10' >>> compare_string('0110','1101') False """ list1 = list(string1) list2 = list(string2) count = 0 for i in range(len(list1)): if list1[i] != list2[i]: count += 1 list1[i] = "_" if count > 1: return False else: return "".join(list1) def check(binary: list[str]) -> list[str]: """ >>> check(['0.00.01.5']) ['0.00.01.5'] """ pi = [] while True: check1 = ["$"] * len(binary) temp = [] for i in range(len(binary)): for j in range(i + 1, len(binary)): k = compare_string(binary[i], binary[j]) if k is False: check1[i] = "*" check1[j] = "*" temp.append("X") for i in range(len(binary)): if check1[i] == "$": pi.append(binary[i]) if len(temp) == 0: return pi binary = list(set(temp)) def decimal_to_binary(no_of_variable: int, minterms: Sequence[float]) -> list[str]: """ >>> decimal_to_binary(3,[1.5]) ['0.00.01.5'] """ temp = [] for minterm in minterms: string = "" for _ in range(no_of_variable): string = str(minterm % 2) + string minterm //= 2 temp.append(string) return temp def is_for_table(string1: str, string2: str, count: int) -> bool: """ >>> is_for_table('__1','011',2) True >>> is_for_table('01_','001',1) False """ list1 = list(string1) list2 = list(string2) count_n = sum(item1 != item2 for item1, item2 in zip(list1, list2)) return count_n == count def selection(chart: list[list[int]], prime_implicants: list[str]) -> list[str]: """ >>> selection([[1]],['0.00.01.5']) ['0.00.01.5'] >>> selection([[1]],['0.00.01.5']) ['0.00.01.5'] """ temp = [] select = [0] * len(chart) for i in range(len(chart[0])): count = sum(row[i] == 1 for row in chart) if count == 1: rem = max(j for j, row in enumerate(chart) if row[i] == 1) select[rem] = 1 for i, item in enumerate(select): if item != 1: continue for j in range(len(chart[0])): if chart[i][j] != 1: continue for row in chart: row[j] = 0 temp.append(prime_implicants[i]) while True: counts = [chart[i].count(1) for i in range(len(chart))] max_n = max(counts) rem = counts.index(max_n) if max_n == 0: return temp temp.append(prime_implicants[rem]) for j in range(len(chart[0])): if chart[rem][j] != 1: continue for i in range(len(chart)): chart[i][j] = 0 def prime_implicant_chart( prime_implicants: list[str], binary: list[str] ) -> list[list[int]]: """ >>> prime_implicant_chart(['0.00.01.5'],['0.00.01.5']) [[1]] """ chart = [[0 for x in range(len(binary))] for x in range(len(prime_implicants))] for i in range(len(prime_implicants)): count = prime_implicants[i].count("_") for j in range(len(binary)): if is_for_table(prime_implicants[i], binary[j], count): chart[i][j] = 1 return chart def main() -> None: no_of_variable = int(input("Enter the no. of variables\n")) minterms = [ float(x) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] binary = decimal_to_binary(no_of_variable, minterms) prime_implicants = check(binary) print("Prime Implicants are:") print(prime_implicants) chart = prime_implicant_chart(prime_implicants, binary) essential_prime_implicants = selection(chart, prime_implicants) print("Essential Prime Implicants are:") print(essential_prime_implicants) if __name__ == "__main__": import doctest doctest.testmod() main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/and_gate.py
boolean_algebra/and_gate.py
""" An AND Gate is a logic gate in boolean algebra which results to 1 (True) if all the inputs are 1 (True), and 0 (False) otherwise. Following is the truth table of a Two Input AND Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 0 | | 0 | 1 | 0 | | 1 | 0 | 0 | | 1 | 1 | 1 | ------------------------------ Refer - https://www.geeksforgeeks.org/logic-gates/ """ def and_gate(input_1: int, input_2: int) -> int: """ Calculate AND of the input values >>> and_gate(0, 0) 0 >>> and_gate(0, 1) 0 >>> and_gate(1, 0) 0 >>> and_gate(1, 1) 1 """ return int(input_1 and input_2) def n_input_and_gate(inputs: list[int]) -> int: """ Calculate AND of a list of input values >>> n_input_and_gate([1, 0, 1, 1, 0]) 0 >>> n_input_and_gate([1, 1, 1, 1, 1]) 1 """ return int(all(inputs)) if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/nimply_gate.py
boolean_algebra/nimply_gate.py
""" An NIMPLY Gate is a logic gate in boolean algebra which results to 0 if either input 1 is 0, or if input 1 is 1, then it is 0 only if input 2 is 1. It is false if input 1 implies input 2. It is the negated form of imply Following is the truth table of an NIMPLY Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 0 | | 0 | 1 | 0 | | 1 | 0 | 1 | | 1 | 1 | 0 | ------------------------------ Refer - https://en.wikipedia.org/wiki/NIMPLY_gate """ def nimply_gate(input_1: int, input_2: int) -> int: """ Calculate NIMPLY of the input values >>> nimply_gate(0, 0) 0 >>> nimply_gate(0, 1) 0 >>> nimply_gate(1, 0) 1 >>> nimply_gate(1, 1) 0 """ return int(input_1 == 1 and input_2 == 0) if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/__init__.py
boolean_algebra/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/not_gate.py
boolean_algebra/not_gate.py
""" A NOT Gate is a logic gate in boolean algebra which results to 0 (False) if the input is high, and 1 (True) if the input is low. Following is the truth table of a XOR Gate: ------------------------------ | Input | Output | ------------------------------ | 0 | 1 | | 1 | 0 | ------------------------------ Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ """ def not_gate(input_1: int) -> int: """ Calculate NOT of the input values >>> not_gate(0) 1 >>> not_gate(1) 0 """ return 1 if input_1 == 0 else 0 if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/nand_gate.py
boolean_algebra/nand_gate.py
""" A NAND Gate is a logic gate in boolean algebra which results to 0 (False) if both the inputs are 1, and 1 (True) otherwise. It's similar to adding a NOT gate along with an AND gate. Following is the truth table of a NAND Gate: ------------------------------ | Input 1 | Input 2 | Output | ------------------------------ | 0 | 0 | 1 | | 0 | 1 | 1 | | 1 | 0 | 1 | | 1 | 1 | 0 | ------------------------------ Refer - https://www.geeksforgeeks.org/logic-gates-in-python/ """ def nand_gate(input_1: int, input_2: int) -> int: """ Calculate NAND of the input values >>> nand_gate(0, 0) 1 >>> nand_gate(0, 1) 1 >>> nand_gate(1, 0) 1 >>> nand_gate(1, 1) 0 """ return int(not (input_1 and input_2)) if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/multiplexer.py
boolean_algebra/multiplexer.py
def mux(input0: int, input1: int, select: int) -> int: """ Implement a 2-to-1 Multiplexer. :param input0: The first input value (0 or 1). :param input1: The second input value (0 or 1). :param select: The select signal (0 or 1) to choose between input0 and input1. :return: The output based on the select signal. input1 if select else input0. https://www.electrically4u.com/solved-problems-on-multiplexer https://en.wikipedia.org/wiki/Multiplexer >>> mux(0, 1, 0) 0 >>> mux(0, 1, 1) 1 >>> mux(1, 0, 0) 1 >>> mux(1, 0, 1) 0 >>> mux(2, 1, 0) Traceback (most recent call last): ... ValueError: Inputs and select signal must be 0 or 1 >>> mux(0, -1, 0) Traceback (most recent call last): ... ValueError: Inputs and select signal must be 0 or 1 >>> mux(0, 1, 1.1) Traceback (most recent call last): ... ValueError: Inputs and select signal must be 0 or 1 """ if all(i in (0, 1) for i in (input0, input1, select)): return input1 if select else input0 raise ValueError("Inputs and select signal must be 0 or 1") if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/boolean_algebra/nor_gate.py
boolean_algebra/nor_gate.py
""" A NOR Gate is a logic gate in boolean algebra which results in false(0) if any of the inputs is 1, and True(1) if all inputs are 0. Following is the truth table of a NOR Gate: Truth Table of NOR Gate: | Input 1 | Input 2 | Output | | 0 | 0 | 1 | | 0 | 1 | 0 | | 1 | 0 | 0 | | 1 | 1 | 0 | Code provided by Akshaj Vishwanathan https://www.geeksforgeeks.org/logic-gates-in-python """ from collections.abc import Callable def nor_gate(input_1: int, input_2: int) -> int: """ >>> nor_gate(0, 0) 1 >>> nor_gate(0, 1) 0 >>> nor_gate(1, 0) 0 >>> nor_gate(1, 1) 0 >>> nor_gate(0.0, 0.0) 1 >>> nor_gate(0, -7) 0 """ return int(input_1 == input_2 == 0) def truth_table(func: Callable) -> str: """ >>> print(truth_table(nor_gate)) Truth Table of NOR Gate: | Input 1 | Input 2 | Output | | 0 | 0 | 1 | | 0 | 1 | 0 | | 1 | 0 | 0 | | 1 | 1 | 0 | """ def make_table_row(items: list | tuple) -> str: """ >>> make_table_row(("One", "Two", "Three")) '| One | Two | Three |' """ return f"| {' | '.join(f'{item:^8}' for item in items)} |" return "\n".join( ( "Truth Table of NOR Gate:", make_table_row(("Input 1", "Input 2", "Output")), *[make_table_row((i, j, func(i, j))) for i in (0, 1) for j in (0, 1)], ) ) if __name__ == "__main__": import doctest doctest.testmod() print(truth_table(nor_gate))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/docs/__init__.py
docs/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/docs/conf.py
docs/conf.py
from sphinx_pyproject import SphinxConfig project = SphinxConfig("../pyproject.toml", globalns=globals()).name
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/docs/source/__init__.py
docs/source/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/breadth_first_search_zero_one_shortest_path.py
graphs/breadth_first_search_zero_one_shortest_path.py
""" Finding the shortest path in 0-1-graph in O(E + V) which is faster than dijkstra. 0-1-graph is the weighted graph with the weights equal to 0 or 1. Link: https://codeforces.com/blog/entry/22276 """ from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class Edge: """Weighted directed graph edge.""" destination_vertex: int weight: int class AdjacencyList: """Graph adjacency list.""" def __init__(self, size: int): self._graph: list[list[Edge]] = [[] for _ in range(size)] self._size = size def __getitem__(self, vertex: int) -> Iterator[Edge]: """Get all the vertices adjacent to the given one.""" return iter(self._graph[vertex]) @property def size(self): return self._size def add_edge(self, from_vertex: int, to_vertex: int, weight: int): """ >>> g = AdjacencyList(2) >>> g.add_edge(0, 1, 0) >>> g.add_edge(1, 0, 1) >>> list(g[0]) [Edge(destination_vertex=1, weight=0)] >>> list(g[1]) [Edge(destination_vertex=0, weight=1)] >>> g.add_edge(0, 1, 2) Traceback (most recent call last): ... ValueError: Edge weight must be either 0 or 1. >>> g.add_edge(0, 2, 1) Traceback (most recent call last): ... ValueError: Vertex indexes must be in [0; size). """ if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1.") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size).") self._graph[from_vertex].append(Edge(to_vertex, weight)) def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int | None: """ Return the shortest distance from start_vertex to finish_vertex in 0-1-graph. 1 1 1 0--------->3 6--------7>------->8 | ^ ^ ^ |1 | | | |0 v 0| |0 1| 9-------->10 | | | ^ 1 v | | |0 1--------->2<-------4------->5 0 1 1 >>> g = AdjacencyList(11) >>> g.add_edge(0, 1, 0) >>> g.add_edge(0, 3, 1) >>> g.add_edge(1, 2, 0) >>> g.add_edge(2, 3, 0) >>> g.add_edge(4, 2, 1) >>> g.add_edge(4, 5, 1) >>> g.add_edge(4, 6, 1) >>> g.add_edge(5, 9, 0) >>> g.add_edge(6, 7, 1) >>> g.add_edge(7, 8, 1) >>> g.add_edge(8, 10, 1) >>> g.add_edge(9, 7, 0) >>> g.add_edge(9, 10, 1) >>> g.add_edge(1, 2, 2) Traceback (most recent call last): ... ValueError: Edge weight must be either 0 or 1. >>> g.get_shortest_path(0, 3) 0 >>> g.get_shortest_path(0, 4) Traceback (most recent call last): ... ValueError: No path from start_vertex to finish_vertex. >>> g.get_shortest_path(4, 10) 2 >>> g.get_shortest_path(4, 8) 2 >>> g.get_shortest_path(0, 1) 0 >>> g.get_shortest_path(1, 0) Traceback (most recent call last): ... ValueError: No path from start_vertex to finish_vertex. """ queue = deque([start_vertex]) distances: list[int | None] = [None] * self.size distances[start_vertex] = 0 while queue: current_vertex = queue.popleft() current_distance = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: new_distance = current_distance + edge.weight dest_vertex_distance = distances[edge.destination_vertex] if ( isinstance(dest_vertex_distance, int) and new_distance >= dest_vertex_distance ): continue distances[edge.destination_vertex] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex.") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/even_tree.py
graphs/even_tree.py
""" You are given a tree(a simple connected graph with no cycles). The tree has N nodes numbered from 1 to N and is rooted at node 1. Find the maximum number of edges you can remove from the tree to get a forest such that each connected component of the forest contains an even number of nodes. Constraints 2 <= 2 <= 100 Note: The tree input will be such that it can always be decomposed into components containing an even number of nodes. """ # pylint: disable=invalid-name from collections import defaultdict def dfs(start: int) -> int: """DFS traversal""" # pylint: disable=redefined-outer-name ret = 1 visited[start] = True for v in tree[start]: if v not in visited: ret += dfs(v) if ret % 2 == 0: cuts.append(start) return ret def even_tree(): """ 2 1 3 1 4 3 5 2 6 1 7 2 8 6 9 8 10 8 On removing edges (1,3) and (1,6), we can get the desired result 2. """ dfs(1) if __name__ == "__main__": n, m = 10, 9 tree = defaultdict(list) visited: dict[int, bool] = {} cuts: list[int] = [] count = 0 edges = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/graph_list.py
graphs/graph_list.py
#!/usr/bin/env python3 # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import TypeVar T = TypeVar("T") class GraphAdjacencyList[T]: """ Adjacency List type Graph Data Structure that accounts for directed and undirected Graphs. Initialize graph object indicating whether it's directed or undirected. Directed graph example: >>> d_graph = GraphAdjacencyList() >>> print(d_graph) {} >>> d_graph.add_edge(0, 1) {0: [1], 1: []} >>> d_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5) {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []} >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} >>> d_graph {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} >>> print(repr(d_graph)) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} Undirected graph example: >>> u_graph = GraphAdjacencyList(directed=False) >>> u_graph.add_edge(0, 1) {0: [1], 1: [0]} >>> u_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5) {0: [1], 1: [0, 2, 4, 5], 2: [1], 4: [1], 5: [1]} >>> u_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) {0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1], 5: [1], 6: [2], 7: [2]} >>> u_graph.add_edge(4, 5) {0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1, 5], 5: [1, 4], 6: [2], 7: [2]} >>> print(u_graph) {0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1, 5], 5: [1, 4], 6: [2], 7: [2]} >>> print(repr(u_graph)) {0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1, 5], 5: [1, 4], 6: [2], 7: [2]} >>> char_graph = GraphAdjacencyList(directed=False) >>> char_graph.add_edge('a', 'b') {'a': ['b'], 'b': ['a']} >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f') {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} >>> char_graph {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} """ def __init__(self, directed: bool = True) -> None: """ Parameters: directed: (bool) Indicates if graph is directed or undirected. Default is True. """ self.adj_list: dict[T, list[T]] = {} # dictionary of lists self.directed = directed def add_edge( self, source_vertex: T, destination_vertex: T ) -> GraphAdjacencyList[T]: """ Connects vertices together. Creates and Edge from source vertex to destination vertex. Vertices will be created if not found in graph """ if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) self.adj_list[destination_vertex].append(source_vertex) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) self.adj_list[destination_vertex] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(source_vertex) self.adj_list[source_vertex] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [source_vertex] # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. elif source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) self.adj_list[destination_vertex] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: self.adj_list[source_vertex] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [] return self def __repr__(self) -> str: return pformat(self.adj_list)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/edmonds_karp_multiple_source_and_sink.py
graphs/edmonds_karp_multiple_source_and_sink.py
class FlowNetwork: def __init__(self, graph, sources, sinks): self.source_index = None self.sink_index = None self.graph = graph self._normalize_graph(sources, sinks) self.vertices_count = len(graph) self.maximum_flow_algorithm = None # make only one source and one sink def _normalize_graph(self, sources, sinks): if sources is int: sources = [sources] if sinks is int: sinks = [sinks] if len(sources) == 0 or len(sinks) == 0: return self.source_index = sources[0] self.sink_index = sinks[0] # make fake vertex if there are more # than one source or sink if len(sources) > 1 or len(sinks) > 1: max_input_flow = 0 for i in sources: max_input_flow += sum(self.graph[i]) size = len(self.graph) + 1 for room in self.graph: room.insert(0, 0) self.graph.insert(0, [0] * size) for i in sources: self.graph[0][i + 1] = max_input_flow self.source_index = 0 size = len(self.graph) + 1 for room in self.graph: room.append(0) self.graph.append([0] * size) for i in sinks: self.graph[i + 1][size - 1] = max_input_flow self.sink_index = size - 1 def find_maximum_flow(self): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before.") if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def set_maximum_flow_algorithm(self, algorithm): self.maximum_flow_algorithm = algorithm(self) class FlowNetworkAlgorithmExecutor: def __init__(self, flow_network): self.flow_network = flow_network self.verticies_count = flow_network.verticesCount self.source_index = flow_network.sourceIndex self.sink_index = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that self.graph = flow_network.graph self.executed = False def execute(self): if not self.executed: self._algorithm() self.executed = True # You should override it def _algorithm(self): pass class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor): def __init__(self, flow_network): super().__init__(flow_network) # use this to save your result self.maximum_flow = -1 def get_maximum_flow(self): if not self.executed: raise Exception("You should execute algorithm before using its result!") return self.maximum_flow class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): def __init__(self, flow_network): super().__init__(flow_network) self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)] self.heights = [0] * self.verticies_count self.excesses = [0] * self.verticies_count def _algorithm(self): self.heights[self.source_index] = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule vertices_list = [ i for i in range(self.verticies_count) if i not in {self.source_index, self.sink_index} ] # move through list i = 0 while i < len(vertices_list): vertex_index = vertices_list[i] previous_height = self.heights[vertex_index] self.process_vertex(vertex_index) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0, vertices_list.pop(i)) i = 0 else: i += 1 self.maximum_flow = sum(self.preflow[self.source_index]) def process_vertex(self, vertex_index): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(vertex_index, neighbour_index) self.relabel(vertex_index) def push(self, from_index, to_index): preflow_delta = min( self.excesses[from_index], self.graph[from_index][to_index] - self.preflow[from_index][to_index], ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def relabel(self, vertex_index): min_height = None for to_index in range(self.verticies_count): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): min_height = self.heights[to_index] if min_height is not None: self.heights[vertex_index] = min_height + 1 if __name__ == "__main__": entrances = [0] exits = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network flow_network = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate maximum_flow = flow_network.find_maximum_flow() print(f"maximum flow is {maximum_flow}")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/dinic.py
graphs/dinic.py
INF = float("inf") class Dinic: def __init__(self, n): self.lvl = [0] * n self.ptr = [0] * n self.q = [0] * n self.adj = [[] for _ in range(n)] """ Here we will add our edges containing with the following parameters: vertex closest to source, vertex closest to sink and flow capacity through that edge ... """ def add_edge(self, a, b, c, rcap=0): self.adj[a].append([b, len(self.adj[b]), c, 0]) self.adj[b].append([a, len(self.adj[a]) - 1, rcap, 0]) # This is a sample depth first search to be used at max_flow def depth_first_search(self, vertex, sink, flow): if vertex == sink or not flow: return flow for i in range(self.ptr[vertex], len(self.adj[vertex])): e = self.adj[vertex][i] if self.lvl[e[0]] == self.lvl[vertex] + 1: p = self.depth_first_search(e[0], sink, min(flow, e[2] - e[3])) if p: self.adj[vertex][i][3] += p self.adj[e[0]][e[1]][3] -= p return p self.ptr[vertex] = self.ptr[vertex] + 1 return 0 # Here we calculate the flow that reaches the sink def max_flow(self, source, sink): flow, self.q[0] = 0, source for l in range(31): # l = 30 maybe faster for random data # noqa: E741 while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 while qi < qe and not self.lvl[sink]: v = self.q[qi] qi += 1 for e in self.adj[v]: if not self.lvl[e[0]] and (e[2] - e[3]) >> (30 - l): self.q[qe] = e[0] qe += 1 self.lvl[e[0]] = self.lvl[v] + 1 p = self.depth_first_search(source, sink, INF) while p: flow += p p = self.depth_first_search(source, sink, INF) if not self.lvl[sink]: break return flow # Example to use """ Will be a bipartite graph, than it has the vertices near the source(4) and the vertices near the sink(4) """ # Here we make a graphs with 10 vertex(source and sink includes) graph = Dinic(10) source = 0 sink = 9 """ Now we add the vertices next to the font in the font with 1 capacity in this edge (source -> source vertices) """ for vertex in range(1, 5): graph.add_edge(source, vertex, 1) """ We will do the same thing for the vertices near the sink, but from vertex to sink (sink vertices -> sink) """ for vertex in range(5, 9): graph.add_edge(vertex, sink, 1) """ Finally we add the verices near the sink to the vertices near the source. (source vertices -> sink vertices) """ for vertex in range(1, 5): graph.add_edge(vertex, vertex + 4, 1) # Now we can know that is the maximum flow(source -> sink) print(graph.max_flow(source, sink))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/greedy_min_vertex_cover.py
graphs/greedy_min_vertex_cover.py
""" * Author: Manuel Di Lullo (https://github.com/manueldilullo) * Description: Approximization algorithm for minimum vertex cover problem. Greedy Approach. Uses graphs represented with an adjacency list URL: https://mathworld.wolfram.com/MinimumVertexCover.html URL: https://cs.stackexchange.com/questions/129017/greedy-algorithm-for-vertex-cover """ import heapq def greedy_min_vertex_cover(graph: dict) -> set[int]: """ Greedy APX Algorithm for min Vertex Cover @input: graph (graph stored in an adjacency list where each vertex is represented with an integer) @example: >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} >>> greedy_min_vertex_cover(graph) {0, 1, 2, 4} """ # queue used to store nodes and their rank queue: list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(queue, [-1 * len(value), (key, value)]) # chosen_vertices = set of chosen vertices chosen_vertices = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices argmax = heapq.heappop(queue)[1][0] chosen_vertices.add(argmax) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: index = elem[1][1].index(argmax) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(queue) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/basic_graphs.py
graphs/basic_graphs.py
from collections import deque def _input(message): return input(message).strip().split(" ") def initialize_unweighted_directed_graph( node_count: int, edge_count: int ) -> dict[int, list[int]]: graph: dict[int, list[int]] = {} for i in range(node_count): graph[i + 1] = [] for e in range(edge_count): x, y = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> ")) graph[x].append(y) return graph def initialize_unweighted_undirected_graph( node_count: int, edge_count: int ) -> dict[int, list[int]]: graph: dict[int, list[int]] = {} for i in range(node_count): graph[i + 1] = [] for e in range(edge_count): x, y = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> ")) graph[x].append(y) graph[y].append(x) return graph def initialize_weighted_undirected_graph( node_count: int, edge_count: int ) -> dict[int, list[tuple[int, int]]]: graph: dict[int, list[tuple[int, int]]] = {} for i in range(node_count): graph[i + 1] = [] for e in range(edge_count): x, y, w = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> <weight> ")) graph[x].append((y, w)) graph[y].append((x, w)) return graph if __name__ == "__main__": n, m = (int(i) for i in _input("Number of nodes and edges: ")) graph_choice = int( _input( "Press 1 or 2 or 3 \n" "1. Unweighted directed \n" "2. Unweighted undirected \n" "3. Weighted undirected \n" )[0] ) g = { 1: initialize_unweighted_directed_graph, 2: initialize_unweighted_undirected_graph, 3: initialize_weighted_undirected_graph, }[graph_choice](n, m) """ -------------------------------------------------------------------------------- Depth First Search. Args : G - Dictionary of edges s - Starting Node Vars : vis - Set of visited nodes S - Traversal Stack -------------------------------------------------------------------------------- """ def dfs(g, s): """ >>> dfs({1: [2, 3], 2: [4, 5], 3: [], 4: [], 5: []}, 1) 1 2 4 5 3 """ vis, _s = {s}, [s] print(s) while _s: flag = 0 for i in g[_s[-1]]: if i not in vis: _s.append(i) vis.add(i) flag = 1 print(i) break if not flag: _s.pop() """ -------------------------------------------------------------------------------- Breadth First Search. Args : G - Dictionary of edges s - Starting Node Vars : vis - Set of visited nodes Q - Traversal Stack -------------------------------------------------------------------------------- """ def bfs(g, s): """ >>> bfs({1: [2, 3], 2: [4, 5], 3: [6, 7], 4: [], 5: [8], 6: [], 7: [], 8: []}, 1) 1 2 3 4 5 6 7 8 """ vis, q = {s}, deque([s]) print(s) while q: u = q.popleft() for v in g[u]: if v not in vis: vis.add(v) q.append(v) print(v) """ -------------------------------------------------------------------------------- Dijkstra's shortest path Algorithm Args : G - Dictionary of edges s - Starting Node Vars : dist - Dictionary storing shortest distance from s to every other node known - Set of knows nodes path - Preceding node in path -------------------------------------------------------------------------------- """ def dijk(g, s): """ dijk({1: [(2, 7), (3, 9), (6, 14)], 2: [(1, 7), (3, 10), (4, 15)], 3: [(1, 9), (2, 10), (4, 11), (6, 2)], 4: [(2, 15), (3, 11), (5, 6)], 5: [(4, 6), (6, 9)], 6: [(1, 14), (3, 2), (5, 9)]}, 1) 7 9 11 20 20 """ dist, known, path = {s: 0}, set(), {s: 0} while True: if len(known) == len(g) - 1: break mini = 100000 for key, value in dist: if key not in known and value < mini: mini = value u = key known.add(u) for v in g[u]: if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000): dist[v[0]] = dist[u] + v[1] path[v[0]] = u for key, value in dist.items(): if key != s: print(value) """ -------------------------------------------------------------------------------- Topological Sort -------------------------------------------------------------------------------- """ def topo(g, ind=None, q=None): if q is None: q = [1] if ind is None: ind = [0] * (len(g) + 1) # SInce oth Index is ignored for u in g: for v in g[u]: ind[v] += 1 q = deque() for i in g: if ind[i] == 0: q.append(i) if len(q) == 0: return v = q.popleft() print(v) for w in g[v]: ind[w] -= 1 if ind[w] == 0: q.append(w) topo(g, ind, q) """ -------------------------------------------------------------------------------- Reading an Adjacency matrix -------------------------------------------------------------------------------- """ def adjm(): r""" Reading an Adjacency matrix Parameters: None Returns: tuple: A tuple containing a list of edges and number of edges Example: >>> # Simulate user input for 3 nodes >>> input_data = "4\n0 1 0 1\n1 0 1 0\n0 1 0 1\n1 0 1 0\n" >>> import sys,io >>> original_input = sys.stdin >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing >>> adjm() ([(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)], 4) >>> sys.stdin = original_input # Restore original stdin """ n = int(input().strip()) a = [] for _ in range(n): a.append(tuple(map(int, input().strip().split()))) return a, n """ -------------------------------------------------------------------------------- Floyd Warshall's algorithm Args : G - Dictionary of edges s - Starting Node Vars : dist - Dictionary storing shortest distance from s to every other node known - Set of knows nodes path - Preceding node in path -------------------------------------------------------------------------------- """ def floy(a_and_n): (a, n) = a_and_n dist = list(a) path = [[0] * n for i in range(n)] for k in range(n): for i in range(n): for j in range(n): if dist[i][j] > dist[i][k] + dist[k][j]: dist[i][j] = dist[i][k] + dist[k][j] path[i][k] = k print(dist) """ -------------------------------------------------------------------------------- Prim's MST Algorithm Args : G - Dictionary of edges s - Starting Node Vars : dist - Dictionary storing shortest distance from s to nearest node known - Set of knows nodes path - Preceding node in path -------------------------------------------------------------------------------- """ def prim(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: if len(known) == len(g) - 1: break mini = 100000 for key, value in dist.items(): if key not in known and value < mini: mini = value u = key known.add(u) for v in g[u]: if v[0] not in known and v[1] < dist.get(v[0], 100000): dist[v[0]] = v[1] path[v[0]] = u return dist """ -------------------------------------------------------------------------------- Accepting Edge list Vars : n - Number of nodes m - Number of edges Returns : l - Edge list n - Number of Nodes -------------------------------------------------------------------------------- """ def edglist(): r""" Get the edges and number of edges from the user Parameters: None Returns: tuple: A tuple containing a list of edges and number of edges Example: >>> # Simulate user input for 3 edges and 4 vertices: (1, 2), (2, 3), (3, 4) >>> input_data = "4 3\n1 2\n2 3\n3 4\n" >>> import sys,io >>> original_input = sys.stdin >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing >>> edglist() ([(1, 2), (2, 3), (3, 4)], 4) >>> sys.stdin = original_input # Restore original stdin """ n, m = tuple(map(int, input().split(" "))) edges = [] for _ in range(m): edges.append(tuple(map(int, input().split(" ")))) return edges, n """ -------------------------------------------------------------------------------- Kruskal's MST Algorithm Args : E - Edge list n - Number of Nodes Vars : s - Set of all nodes as unique disjoint sets (initially) -------------------------------------------------------------------------------- """ def krusk(e_and_n): """ Sort edges on the basis of distance """ (e, n) = e_and_n e.sort(reverse=True, key=lambda x: x[2]) s = [{i} for i in range(1, n + 1)] while True: if len(s) == 1: break print(s) x = e.pop() for i in range(len(s)): if x[0] in s[i]: break for j in range(len(s)): if x[1] in s[j]: if i == j: break s[j].update(s[i]) s.pop(i) break def find_isolated_nodes(graph): """ Find the isolated node in the graph Parameters: graph (dict): A dictionary representing a graph. Returns: list: A list of isolated nodes. Examples: >>> graph1 = {1: [2, 3], 2: [1, 3], 3: [1, 2], 4: []} >>> find_isolated_nodes(graph1) [4] >>> graph2 = {'A': ['B', 'C'], 'B': ['A'], 'C': ['A'], 'D': []} >>> find_isolated_nodes(graph2) ['D'] >>> graph3 = {'X': [], 'Y': [], 'Z': []} >>> find_isolated_nodes(graph3) ['X', 'Y', 'Z'] >>> graph4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} >>> find_isolated_nodes(graph4) [] >>> graph5 = {} >>> find_isolated_nodes(graph5) [] """ isolated = [] for node in graph: if not graph[node]: isolated.append(node) return isolated
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/g_topological_sort.py
graphs/g_topological_sort.py
# Author: Phyllipe Bezerra (https://github.com/pmba) clothes = { 0: "underwear", 1: "pants", 2: "belt", 3: "suit", 4: "shoe", 5: "socks", 6: "shirt", 7: "tie", 8: "watch", } graph = [[1, 4], [2, 4], [3], [], [], [4], [2, 7], [3], []] visited = [0 for x in range(len(graph))] stack = [] def print_stack(stack, clothes): order = 1 while stack: current_clothing = stack.pop() print(order, clothes[current_clothing]) order += 1 def depth_first_search(u, visited, graph): visited[u] = 1 for v in graph[u]: if not visited[v]: depth_first_search(v, visited, graph) stack.append(u) def topological_sort(graph, visited): for v in range(len(graph)): if not visited[v]: depth_first_search(v, visited, graph) if __name__ == "__main__": topological_sort(graph, visited) print(stack) print_stack(stack, clothes)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/breadth_first_search_shortest_path.py
graphs/breadth_first_search_shortest_path.py
"""Breath First Search (BFS) can be used when finding the shortest path from a given source node to a target node in an unweighted graph. """ from __future__ import annotations graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class Graph: def __init__(self, graph: dict[str, list[str]], source_vertex: str) -> None: """ Graph is implemented as dictionary of adjacency lists. Also, Source vertex have to be defined upon initialization. """ self.graph = graph # mapping node to its parent in resulting breadth first tree self.parent: dict[str, str | None] = {} self.source_vertex = source_vertex def breath_first_search(self) -> None: """ This function is a helper for running breath first search on this graph. >>> g = Graph(graph, "G") >>> g.breath_first_search() >>> g.parent {'G': None, 'C': 'G', 'A': 'C', 'F': 'C', 'B': 'A', 'E': 'A', 'D': 'B'} """ visited = {self.source_vertex} self.parent[self.source_vertex] = None queue = [self.source_vertex] # first in first out queue while queue: vertex = queue.pop(0) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(adjacent_vertex) self.parent[adjacent_vertex] = vertex queue.append(adjacent_vertex) def shortest_path(self, target_vertex: str) -> str: """ This shortest path function returns a string, describing the result: 1.) No path is found. The string is a human readable message to indicate this. 2.) The shortest path is found. The string is in the form `v1(->v2->v3->...->vn)`, where v1 is the source vertex and vn is the target vertex, if it exists separately. >>> g = Graph(graph, "G") >>> g.breath_first_search() Case 1 - No path is found. >>> g.shortest_path("Foo") Traceback (most recent call last): ... ValueError: No path from vertex: G to vertex: Foo Case 2 - The path is found. >>> g.shortest_path("D") 'G->C->A->B->D' >>> g.shortest_path("G") 'G' """ if target_vertex == self.source_vertex: return self.source_vertex target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: msg = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(msg) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" if __name__ == "__main__": g = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/connected_components.py
graphs/connected_components.py
""" https://en.wikipedia.org/wiki/Component_(graph_theory) Finding connected components in graph """ test_graph_1 = {0: [1, 2], 1: [0, 3], 2: [0], 3: [1], 4: [5, 6], 5: [4, 6], 6: [4, 5]} test_graph_2 = {0: [1, 2, 3], 1: [0, 3], 2: [0], 3: [0, 1], 4: [], 5: []} def dfs(graph: dict, vert: int, visited: list) -> list: """ Use depth first search to find all vertices being in the same component as initial vertex >>> dfs(test_graph_1, 0, 5 * [False]) [0, 1, 3, 2] >>> dfs(test_graph_2, 0, 6 * [False]) [0, 1, 3, 2] """ visited[vert] = True connected_verts = [] for neighbour in graph[vert]: if not visited[neighbour]: connected_verts += dfs(graph, neighbour, visited) return [vert, *connected_verts] def connected_components(graph: dict) -> list: """ This function takes graph as a parameter and then returns the list of connected components >>> connected_components(test_graph_1) [[0, 1, 3, 2], [4, 5, 6]] >>> connected_components(test_graph_2) [[0, 1, 3, 2], [4], [5]] """ graph_size = len(graph) visited = graph_size * [False] components_list = [] for i in range(graph_size): if not visited[i]: i_connected = dfs(graph, i, visited) components_list.append(i_connected) return components_list if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/gale_shapley_bigraph.py
graphs/gale_shapley_bigraph.py
from __future__ import annotations def stable_matching( donor_pref: list[list[int]], recipient_pref: list[list[int]] ) -> list[int]: """ Finds the stable match in any bipartite graph, i.e a pairing where no 2 objects prefer each other over their partner. The function accepts the preferences of oegan donors and recipients (where both are assigned numbers from 0 to n-1) and returns a list where the index position corresponds to the donor and value at the index is the organ recipient. To better understand the algorithm, see also: https://github.com/akashvshroff/Gale_Shapley_Stable_Matching (README). https://www.youtube.com/watch?v=Qcv1IqHWAzg&t=13s (Numberphile YouTube). >>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]] >>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]] >>> stable_matching(donor_pref, recipient_pref) [1, 2, 3, 0] """ assert len(donor_pref) == len(recipient_pref) n = len(donor_pref) unmatched_donors = list(range(n)) donor_record = [-1] * n # who the donor has donated to rec_record = [-1] * n # who the recipient has received from num_donations = [0] * n while unmatched_donors: donor = unmatched_donors[0] donor_preference = donor_pref[donor] recipient = donor_preference[num_donations[donor]] num_donations[donor] += 1 rec_preference = recipient_pref[recipient] prev_donor = rec_record[recipient] if prev_donor != -1: if rec_preference.index(prev_donor) > rec_preference.index(donor): rec_record[recipient] = donor donor_record[donor] = recipient unmatched_donors.append(prev_donor) unmatched_donors.remove(donor) else: rec_record[recipient] = donor donor_record[donor] = recipient unmatched_donors.remove(donor) return donor_record
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/depth_first_search_2.py
graphs/depth_first_search_2.py
#!/usr/bin/python """Author: OMKAR PATHAK""" class Graph: def __init__(self): self.vertex = {} # for printing the Graph vertices def print_graph(self) -> None: """ Print the graph vertices. Example: >>> g = Graph() >>> g.add_edge(0, 1) >>> g.add_edge(0, 2) >>> g.add_edge(1, 2) >>> g.add_edge(2, 0) >>> g.add_edge(2, 3) >>> g.add_edge(3, 3) >>> g.print_graph() {0: [1, 2], 1: [2], 2: [0, 3], 3: [3]} 0 -> 1 -> 2 1 -> 2 2 -> 0 -> 3 3 -> 3 """ print(self.vertex) for i in self.vertex: print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]])) # for adding the edge between two vertices def add_edge(self, from_vertex: int, to_vertex: int) -> None: """ Add an edge between two vertices. :param from_vertex: The source vertex. :param to_vertex: The destination vertex. Example: >>> g = Graph() >>> g.add_edge(0, 1) >>> g.add_edge(0, 2) >>> g.print_graph() {0: [1, 2]} 0 -> 1 -> 2 """ # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(to_vertex) else: # else make a new vertex self.vertex[from_vertex] = [to_vertex] def dfs(self) -> None: """ Perform depth-first search (DFS) traversal on the graph and print the visited vertices. Example: >>> g = Graph() >>> g.add_edge(0, 1) >>> g.add_edge(0, 2) >>> g.add_edge(1, 2) >>> g.add_edge(2, 0) >>> g.add_edge(2, 3) >>> g.add_edge(3, 3) >>> g.dfs() 0 1 2 3 """ # visited array for storing already visited nodes visited = [False] * len(self.vertex) # call the recursive helper function for i in range(len(self.vertex)): if not visited[i]: self.dfs_recursive(i, visited) def dfs_recursive(self, start_vertex: int, visited: list) -> None: """ Perform a recursive depth-first search (DFS) traversal on the graph. :param start_vertex: The starting vertex for the traversal. :param visited: A list to track visited vertices. Example: >>> g = Graph() >>> g.add_edge(0, 1) >>> g.add_edge(0, 2) >>> g.add_edge(1, 2) >>> g.add_edge(2, 0) >>> g.add_edge(2, 3) >>> g.add_edge(3, 3) >>> visited = [False] * len(g.vertex) >>> g.dfs_recursive(0, visited) 0 1 2 3 """ # mark start vertex as visited visited[start_vertex] = True print(start_vertex, end="") # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: print(" ", end="") self.dfs_recursive(i, visited) if __name__ == "__main__": import doctest doctest.testmod() g = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/greedy_best_first.py
graphs/greedy_best_first.py
""" https://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS """ from __future__ import annotations Path = list[tuple[int, int]] # 0's are free path whereas 1's are obstacles TEST_GRIDS = [ [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ], [ [0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0], ], [ [0, 0, 1, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 1], [1, 0, 0, 1, 1], [0, 0, 0, 0, 0], ], ] delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class Node: """ >>> k = Node(0, 0, 4, 5, 0, None) >>> k.calculate_heuristic() 9 >>> n = Node(1, 4, 3, 4, 2, None) >>> n.calculate_heuristic() 2 >>> l = [k, n] >>> n == l[0] False >>> l.sort() >>> n == l[0] True """ def __init__( self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, g_cost: float, parent: Node | None, ): self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) self.goal_x = goal_x self.goal_y = goal_y self.g_cost = g_cost self.parent = parent self.f_cost = self.calculate_heuristic() def calculate_heuristic(self) -> float: """ The heuristic here is the Manhattan Distance Could elaborate to offer more than one choice """ dx = abs(self.pos_x - self.goal_x) dy = abs(self.pos_y - self.goal_y) return dx + dy def __lt__(self, other) -> bool: return self.f_cost < other.f_cost def __eq__(self, other) -> bool: return self.pos == other.pos class GreedyBestFirst: """ >>> grid = TEST_GRIDS[2] >>> gbf = GreedyBestFirst(grid, (0, 0), (len(grid) - 1, len(grid[0]) - 1)) >>> [x.pos for x in gbf.get_successors(gbf.start)] [(1, 0), (0, 1)] >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1]) (0, 1) >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1]) (1, 0) >>> gbf.retrace_path(gbf.start) [(0, 0)] >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE [(0, 0), (1, 0), (2, 0), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3), (4, 4)] """ def __init__( self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int] ): self.grid = grid self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) self.open_nodes = [self.start] self.closed_nodes: list[Node] = [] self.reached = False def search(self) -> Path | None: """ Search for the path, if a path is not found, only the starting position is returned """ while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() current_node = self.open_nodes.pop(0) if current_node.pos == self.target.pos: self.reached = True return self.retrace_path(current_node) self.closed_nodes.append(current_node) successors = self.get_successors(current_node) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(child_node) if not self.reached: return [self.start.pos] return None def get_successors(self, parent: Node) -> list[Node]: """ Returns a list of successors (both in the grid and free spaces) """ return [ Node( pos_x, pos_y, self.target.pos_x, self.target.pos_y, parent.g_cost + 1, parent, ) for action in delta if ( 0 <= (pos_x := parent.pos_x + action[1]) < len(self.grid[0]) and 0 <= (pos_y := parent.pos_y + action[0]) < len(self.grid) and self.grid[pos_y][pos_x] == 0 ) ] def retrace_path(self, node: Node | None) -> Path: """ Retrace the path from parents to parents until start node """ current_node = node path = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) current_node = current_node.parent path.reverse() return path if __name__ == "__main__": for idx, grid in enumerate(TEST_GRIDS): print(f"==grid-{idx + 1}==") init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") greedy_bf = GreedyBestFirst(grid, init, goal) path = greedy_bf.search() if path: for pos_x, pos_y in path: grid[pos_x][pos_y] = 2 for elem in grid: print(elem)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/minimum_path_sum.py
graphs/minimum_path_sum.py
def min_path_sum(grid: list) -> int: """ Find the path from top left to bottom right of array of numbers with the lowest possible sum and return the sum along this path. >>> min_path_sum([ ... [1, 3, 1], ... [1, 5, 1], ... [4, 2, 1], ... ]) 7 >>> min_path_sum([ ... [1, 0, 5, 6, 7], ... [8, 9, 0, 4, 2], ... [4, 4, 4, 5, 1], ... [9, 6, 3, 1, 0], ... [8, 4, 3, 2, 7], ... ]) 20 >>> min_path_sum(None) Traceback (most recent call last): ... TypeError: The grid does not contain the appropriate information >>> min_path_sum([[]]) Traceback (most recent call last): ... TypeError: The grid does not contain the appropriate information """ if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information") for cell_n in range(1, len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] row_above = grid[0] for row_n in range(1, len(grid)): current_row = grid[row_n] grid[row_n] = fill_row(current_row, row_above) row_above = grid[row_n] return grid[-1][-1] def fill_row(current_row: list, row_above: list) -> list: """ >>> fill_row([2, 2, 2], [1, 2, 3]) [3, 4, 5] """ current_row[0] += row_above[0] for cell_n in range(1, len(current_row)): current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/bidirectional_search.py
graphs/bidirectional_search.py
""" Bidirectional Search Algorithm. This algorithm searches from both the source and target nodes simultaneously, meeting somewhere in the middle. This approach can significantly reduce the search space compared to a traditional one-directional search. Time Complexity: O(b^(d/2)) where b is the branching factor and d is the depth Space Complexity: O(b^(d/2)) https://en.wikipedia.org/wiki/Bidirectional_search """ from collections import deque def expand_search( graph: dict[int, list[int]], queue: deque[int], parents: dict[int, int | None], opposite_direction_parents: dict[int, int | None], ) -> int | None: if not queue: return None current = queue.popleft() for neighbor in graph[current]: if neighbor in parents: continue parents[neighbor] = current queue.append(neighbor) # Check if this creates an intersection if neighbor in opposite_direction_parents: return neighbor return None def construct_path(current: int | None, parents: dict[int, int | None]) -> list[int]: path: list[int] = [] while current is not None: path.append(current) current = parents[current] return path def bidirectional_search( graph: dict[int, list[int]], start: int, goal: int ) -> list[int] | None: """ Perform bidirectional search on a graph to find the shortest path. Args: graph: A dictionary where keys are nodes and values are lists of adjacent nodes start: The starting node goal: The target node Returns: A list representing the path from start to goal, or None if no path exists Examples: >>> graph = { ... 0: [1, 2], ... 1: [0, 3, 4], ... 2: [0, 5, 6], ... 3: [1, 7], ... 4: [1, 8], ... 5: [2, 9], ... 6: [2, 10], ... 7: [3, 11], ... 8: [4, 11], ... 9: [5, 11], ... 10: [6, 11], ... 11: [7, 8, 9, 10], ... } >>> bidirectional_search(graph=graph, start=0, goal=11) [0, 1, 3, 7, 11] >>> bidirectional_search(graph=graph, start=5, goal=5) [5] >>> disconnected_graph = { ... 0: [1, 2], ... 1: [0], ... 2: [0], ... 3: [4], ... 4: [3], ... } >>> bidirectional_search(graph=disconnected_graph, start=0, goal=3) is None True """ if start == goal: return [start] # Check if start and goal are in the graph if start not in graph or goal not in graph: return None # Initialize forward and backward search dictionaries # Each maps a node to its parent in the search forward_parents: dict[int, int | None] = {start: None} backward_parents: dict[int, int | None] = {goal: None} # Initialize forward and backward search queues forward_queue = deque([start]) backward_queue = deque([goal]) # Intersection node (where the two searches meet) intersection = None # Continue until both queues are empty or an intersection is found while forward_queue and backward_queue and intersection is None: # Expand forward search intersection = expand_search( graph=graph, queue=forward_queue, parents=forward_parents, opposite_direction_parents=backward_parents, ) # If no intersection found, expand backward search if intersection is not None: break intersection = expand_search( graph=graph, queue=backward_queue, parents=backward_parents, opposite_direction_parents=forward_parents, ) # If no intersection found, there's no path if intersection is None: return None # Construct path from start to intersection forward_path: list[int] = construct_path( current=intersection, parents=forward_parents ) forward_path.reverse() # Construct path from intersection to goal backward_path: list[int] = construct_path( current=backward_parents[intersection], parents=backward_parents ) # Return the complete path return forward_path + backward_path def main() -> None: """ Run example of bidirectional search algorithm. Examples: >>> main() # doctest: +NORMALIZE_WHITESPACE Path from 0 to 11: [0, 1, 3, 7, 11] Path from 5 to 5: [5] Path from 0 to 3: None """ # Example graph represented as an adjacency list example_graph = { 0: [1, 2], 1: [0, 3, 4], 2: [0, 5, 6], 3: [1, 7], 4: [1, 8], 5: [2, 9], 6: [2, 10], 7: [3, 11], 8: [4, 11], 9: [5, 11], 10: [6, 11], 11: [7, 8, 9, 10], } # Test case 1: Path exists start, goal = 0, 11 path = bidirectional_search(graph=example_graph, start=start, goal=goal) print(f"Path from {start} to {goal}: {path}") # Test case 2: Start and goal are the same start, goal = 5, 5 path = bidirectional_search(graph=example_graph, start=start, goal=goal) print(f"Path from {start} to {goal}: {path}") # Test case 3: No path exists (disconnected graph) disconnected_graph = { 0: [1, 2], 1: [0], 2: [0], 3: [4], 4: [3], } start, goal = 0, 3 path = bidirectional_search(graph=disconnected_graph, start=start, goal=goal) print(f"Path from {start} to {goal}: {path}") if __name__ == "__main__": main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/deep_clone_graph.py
graphs/deep_clone_graph.py
""" LeetCode 133. Clone Graph https://leetcode.com/problems/clone-graph/ Given a reference of a node in a connected undirected graph. Return a deep copy (clone) of the graph. Each node in the graph contains a value (int) and a list (List[Node]) of its neighbors. """ from dataclasses import dataclass @dataclass class Node: value: int = 0 neighbors: list["Node"] | None = None def __post_init__(self) -> None: """ >>> Node(3).neighbors [] """ self.neighbors = self.neighbors or [] def __hash__(self) -> int: """ >>> hash(Node(3)) != 0 True """ return id(self) def clone_graph(node: Node | None) -> Node | None: """ This function returns a clone of a connected undirected graph. >>> clone_graph(Node(1)) Node(value=1, neighbors=[]) >>> clone_graph(Node(1, [Node(2)])) Node(value=1, neighbors=[Node(value=2, neighbors=[])]) >>> clone_graph(None) is None True """ if not node: return None originals_to_clones = {} # map nodes to clones stack = [node] while stack: original = stack.pop() if original in originals_to_clones: continue originals_to_clones[original] = Node(original.value) stack.extend(original.neighbors or []) for original, clone in originals_to_clones.items(): for neighbor in original.neighbors or []: cloned_neighbor = originals_to_clones[neighbor] if not clone.neighbors: clone.neighbors = [] clone.neighbors.append(cloned_neighbor) return originals_to_clones[node] if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/bidirectional_breadth_first_search.py
graphs/bidirectional_breadth_first_search.py
""" https://en.wikipedia.org/wiki/Bidirectional_search """ from __future__ import annotations import time Path = list[tuple[int, int]] grid = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class Node: def __init__( self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Node | None ): self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) self.goal_x = goal_x self.goal_y = goal_y self.parent = parent class BreadthFirstSearch: """ # Comment out slow pytests... # 9.15s call graphs/bidirectional_breadth_first_search.py:: \ # graphs.bidirectional_breadth_first_search.BreadthFirstSearch # >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1)) # >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1]) (0, 1) # >>> [x.pos for x in bfs.get_successors(bfs.start)] [(1, 0), (0, 1)] # >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1]) (1, 0) # >>> bfs.retrace_path(bfs.start) [(0, 0)] # >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)] """ def __init__(self, start: tuple[int, int], goal: tuple[int, int]): self.start = Node(start[1], start[0], goal[1], goal[0], None) self.target = Node(goal[1], goal[0], goal[1], goal[0], None) self.node_queue = [self.start] self.reached = False def search(self) -> Path | None: while self.node_queue: current_node = self.node_queue.pop(0) if current_node.pos == self.target.pos: self.reached = True return self.retrace_path(current_node) successors = self.get_successors(current_node) for node in successors: self.node_queue.append(node) if not self.reached: return [self.start.pos] return None def get_successors(self, parent: Node) -> list[Node]: """ Returns a list of successors (both in the grid and free spaces) """ successors = [] for action in delta: pos_x = parent.pos_x + action[1] pos_y = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent) ) return successors def retrace_path(self, node: Node | None) -> Path: """ Retrace the path from parents to parents until start node """ current_node = node path = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) current_node = current_node.parent path.reverse() return path class BidirectionalBreadthFirstSearch: """ >>> bd_bfs = BidirectionalBreadthFirstSearch((0, 0), (len(grid) - 1, ... len(grid[0]) - 1)) >>> bd_bfs.fwd_bfs.start.pos == bd_bfs.bwd_bfs.target.pos True >>> bd_bfs.retrace_bidirectional_path(bd_bfs.fwd_bfs.start, ... bd_bfs.bwd_bfs.start) [(0, 0)] >>> bd_bfs.search() # doctest: +NORMALIZE_WHITESPACE [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 3), (2, 4), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (6, 6)] """ def __init__(self, start, goal): self.fwd_bfs = BreadthFirstSearch(start, goal) self.bwd_bfs = BreadthFirstSearch(goal, start) self.reached = False def search(self) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: current_fwd_node = self.fwd_bfs.node_queue.pop(0) current_bwd_node = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: self.reached = True return self.retrace_bidirectional_path( current_fwd_node, current_bwd_node ) self.fwd_bfs.target = current_bwd_node self.bwd_bfs.target = current_fwd_node successors = { self.fwd_bfs: self.fwd_bfs.get_successors(current_fwd_node), self.bwd_bfs: self.bwd_bfs.get_successors(current_bwd_node), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(node) if not self.reached: return [self.fwd_bfs.start.pos] return None def retrace_bidirectional_path(self, fwd_node: Node, bwd_node: Node) -> Path: fwd_path = self.fwd_bfs.retrace_path(fwd_node) bwd_path = self.bwd_bfs.retrace_path(bwd_node) bwd_path.pop() bwd_path.reverse() path = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) start_bfs_time = time.time() bfs = BreadthFirstSearch(init, goal) path = bfs.search() bfs_time = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) start_bd_bfs_time = time.time() bd_bfs = BidirectionalBreadthFirstSearch(init, goal) bd_path = bd_bfs.search() bd_bfs_time = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/dijkstra_binary_grid.py
graphs/dijkstra_binary_grid.py
""" This script implements the Dijkstra algorithm on a binary grid. The grid consists of 0s and 1s, where 1 represents a walkable node and 0 represents an obstacle. The algorithm finds the shortest path from a start node to a destination node. Diagonal movement can be allowed or disallowed. """ from heapq import heappop, heappush import numpy as np def dijkstra( grid: np.ndarray, source: tuple[int, int], destination: tuple[int, int], allow_diagonal: bool, ) -> tuple[float | int, list[tuple[int, int]]]: """ Implements Dijkstra's algorithm on a binary grid. Args: grid (np.ndarray): A 2D numpy array representing the grid. 1 represents a walkable node and 0 represents an obstacle. source (Tuple[int, int]): A tuple representing the start node. destination (Tuple[int, int]): A tuple representing the destination node. allow_diagonal (bool): A boolean determining whether diagonal movements are allowed. Returns: Tuple[Union[float, int], List[Tuple[int, int]]]: The shortest distance from the start node to the destination node and the shortest path as a list of nodes. >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), False) (4.0, [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)]) >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), True) (2.0, [(0, 0), (1, 1), (2, 2)]) >>> dijkstra(np.array([[1, 1, 1], [0, 0, 1], [0, 1, 1]]), (0, 0), (2, 2), False) (4.0, [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]) """ rows, cols = grid.shape dx = [-1, 1, 0, 0] dy = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] queue, visited = [(0, source)], set() matrix = np.full((rows, cols), np.inf) matrix[source] = 0 predecessors = np.empty((rows, cols), dtype=object) predecessors[source] = None while queue: (dist, (x, y)) = heappop(queue) if (x, y) in visited: continue visited.add((x, y)) if (x, y) == destination: path = [] while (x, y) != source: path.append((x, y)) x, y = predecessors[x, y] path.append(source) # add the source manually path.reverse() return float(matrix[destination]), path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: next_node = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(queue, (dist + 1, (nx, ny))) matrix[nx, ny] = dist + 1 predecessors[nx, ny] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/frequent_pattern_graph_miner.py
graphs/frequent_pattern_graph_miner.py
""" FP-GraphMiner - A Fast Frequent Pattern Mining Algorithm for Network Graphs A novel Frequent Pattern Graph Mining algorithm, FP-GraphMiner, that compactly represents a set of network graphs as a Frequent Pattern Graph (or FP-Graph). This graph can be used to efficiently mine frequent subgraphs including maximal frequent subgraphs and maximum common subgraphs. URL: https://www.researchgate.net/publication/235255851 """ # fmt: off edge_array = [ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3'], ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'cd-e2', 'de-e1', 'df-e8', 'ef-e3', 'eg-e2', 'fg-e6'], ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2', 'eh-e12', 'fg-e6', 'fh-e10', 'gh-e6'], ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'], ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6'] ] # fmt: on def get_distinct_edge(edge_array): """ Return Distinct edges from edge array of multiple graphs >>> sorted(get_distinct_edge(edge_array)) ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] """ distinct_edge = set() for row in edge_array: for item in row: distinct_edge.add(item[0]) return list(distinct_edge) def get_bitcode(edge_array, distinct_edge): """ Return bitcode of distinct_edge """ bitcode = ["0"] * len(edge_array) for i, row in enumerate(edge_array): for item in row: if distinct_edge in item[0]: bitcode[i] = "1" break return "".join(bitcode) def get_frequency_table(edge_array): """ Returns Frequency Table """ distinct_edge = get_distinct_edge(edge_array) frequency_table = {} for item in distinct_edge: bit = get_bitcode(edge_array, item) # print('bit',bit) # bt=''.join(bit) s = bit.count("1") frequency_table[item] = [s, bit] # Store [Distinct edge, WT(Bitcode), Bitcode] in descending order sorted_frequency_table = [ [k, v[0], v[1]] for k, v in sorted(frequency_table.items(), key=lambda v: v[1][0], reverse=True) ] return sorted_frequency_table def get_nodes(frequency_table): """ Returns nodes format nodes={bitcode:edges that represent the bitcode} >>> get_nodes([['ab', 5, '11111'], ['ac', 5, '11111'], ['df', 5, '11111'], ... ['bd', 5, '11111'], ['bc', 5, '11111']]) {'11111': ['ab', 'ac', 'df', 'bd', 'bc']} """ nodes = {} for _, item in enumerate(frequency_table): nodes.setdefault(item[2], []).append(item[0]) return nodes def get_cluster(nodes): """ Returns cluster format cluster:{WT(bitcode):nodes with same WT} """ cluster = {} for key, value in nodes.items(): cluster.setdefault(key.count("1"), {})[key] = value return cluster def get_support(cluster): """ Returns support >>> get_support({5: {'11111': ['ab', 'ac', 'df', 'bd', 'bc']}, ... 4: {'11101': ['ef', 'eg', 'de', 'fg'], '11011': ['cd']}, ... 3: {'11001': ['ad'], '10101': ['dg']}, ... 2: {'10010': ['dh', 'bh'], '11000': ['be'], '10100': ['gh'], ... '10001': ['ce']}, ... 1: {'00100': ['fh', 'eh'], '10000': ['hi']}}) [100.0, 80.0, 60.0, 40.0, 20.0] """ return [i * 100 / len(cluster) for i in cluster] def print_all() -> None: print("\nNodes\n") for key, value in nodes.items(): print(key, value) print("\nSupport\n") print(support) print("\n Cluster \n") for key, value in sorted(cluster.items(), reverse=True): print(key, value) print("\n Graph\n") for key, value in graph.items(): print(key, value) print("\n Edge List of Frequent subgraphs \n") for edge_list in freq_subgraph_edge_list: print(edge_list) def create_edge(nodes, graph, cluster, c1): """ create edge between the nodes """ for i in cluster[c1]: count = 0 c2 = c1 + 1 while c2 < max(cluster.keys()): for j in cluster[c2]: """ creates edge only if the condition satisfies """ if int(i, 2) & int(j, 2) == int(i, 2): if tuple(nodes[i]) in graph: graph[tuple(nodes[i])].append(nodes[j]) else: graph[tuple(nodes[i])] = [nodes[j]] count += 1 if count == 0: c2 = c2 + 1 else: break def construct_graph(cluster, nodes): x = cluster[max(cluster.keys())] cluster[max(cluster.keys()) + 1] = "Header" graph = {} for i in x: if (["Header"],) in graph: graph[(["Header"],)].append(x[i]) else: graph[(["Header"],)] = [x[i]] for i in x: graph[(x[i],)] = [["Header"]] i = 1 while i < max(cluster) - 1: create_edge(nodes, graph, cluster, i) i = i + 1 return graph def my_dfs(graph, start, end, path=None): """ find different DFS walk from given node to Header node """ path = (path or []) + [start] if start == end: paths.append(path) for node in graph[start]: if tuple(node) not in path: my_dfs(graph, tuple(node), end, path) def find_freq_subgraph_given_support(s, cluster, graph): """ find edges of multiple frequent subgraphs """ k = int(s / 100 * (len(cluster) - 1)) for i in cluster[k]: my_dfs(graph, tuple(cluster[k][i]), (["Header"],)) def freq_subgraphs_edge_list(paths): """ returns Edge list for frequent subgraphs """ freq_sub_el = [] for edges in paths: el = [] for j in range(len(edges) - 1): temp = list(edges[j]) for e in temp: edge = (e[0], e[1]) el.append(edge) freq_sub_el.append(el) return freq_sub_el def preprocess(edge_array): """ Preprocess the edge array >>> preprocess([['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', ... 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', ... 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3']]) """ for i in range(len(edge_array)): for j in range(len(edge_array[i])): t = edge_array[i][j].split("-") edge_array[i][j] = t if __name__ == "__main__": preprocess(edge_array) frequency_table = get_frequency_table(edge_array) nodes = get_nodes(frequency_table) cluster = get_cluster(nodes) support = get_support(cluster) graph = construct_graph(cluster, nodes) find_freq_subgraph_given_support(60, cluster, graph) paths: list = [] freq_subgraph_edge_list = freq_subgraphs_edge_list(paths) print_all()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/check_cycle.py
graphs/check_cycle.py
""" Program to check if a cycle is present in a given graph """ def check_cycle(graph: dict) -> bool: """ Returns True if graph is cyclic else False >>> check_cycle(graph={0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]}) False >>> check_cycle(graph={0:[1, 2], 1:[2], 2:[0, 3], 3:[3]}) True """ # Keep track of visited nodes visited: set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack rec_stk: set[int] = set() return any( node not in visited and depth_first_search(graph, node, visited, rec_stk) for node in graph ) def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool: """ Recur for all neighbours. If any neighbour is visited and in rec_stk then graph is cyclic. >>> graph = {0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]} >>> vertex, visited, rec_stk = 0, set(), set() >>> depth_first_search(graph, vertex, visited, rec_stk) False """ # Mark current node as visited and add to recursion stack visited.add(vertex) rec_stk.add(vertex) for node in graph[vertex]: if node not in visited: if depth_first_search(graph, node, visited, rec_stk): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(vertex) return False if __name__ == "__main__": from doctest import testmod testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/tarjans_scc.py
graphs/tarjans_scc.py
from collections import deque def tarjan(g: list[list[int]]) -> list[list[int]]: """ Tarjan's algo for finding strongly connected components in a directed graph Uses two main attributes of each node to track reachability, the index of that node within a component(index), and the lowest index reachable from that node(lowlink). We then perform a dfs of the each component making sure to update these parameters for each node and saving the nodes we visit on the way. If ever we find that the lowest reachable node from a current node is equal to the index of the current node then it must be the root of a strongly connected component and so we save it and it's equireachable vertices as a strongly connected component. Complexity: strong_connect() is called at most once for each node and has a complexity of O(|E|) as it is DFS. Therefore this has complexity O(|V| + |E|) for a graph G = (V, E) >>> tarjan([[2, 3, 4], [2, 3, 4], [0, 1, 3], [0, 1, 2], [1]]) [[4, 3, 1, 2, 0]] >>> tarjan([[], [], [], []]) [[0], [1], [2], [3]] >>> a = [0, 1, 2, 3, 4, 5, 4] >>> b = [1, 0, 3, 2, 5, 4, 0] >>> n = 7 >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) == sorted( ... tarjan(create_graph(n, list(zip(a[::-1], b[::-1]))))) True >>> a = [0, 1, 2, 3, 4, 5, 6] >>> b = [0, 1, 2, 3, 4, 5, 6] >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) [[0], [1], [2], [3], [4], [5], [6]] """ n = len(g) stack: deque[int] = deque() on_stack = [False for _ in range(n)] index_of = [-1 for _ in range(n)] lowlink_of = index_of[:] def strong_connect(v: int, index: int, components: list[list[int]]) -> int: index_of[v] = index # the number when this node is seen lowlink_of[v] = index # lowest rank node reachable from here index += 1 stack.append(v) on_stack[v] = True for w in g[v]: if index_of[w] == -1: index = strong_connect(w, index, components) lowlink_of[v] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowlink_of[v] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: component = [] w = stack.pop() on_stack[w] = False component.append(w) while w != v: w = stack.pop() on_stack[w] = False component.append(w) components.append(component) return index components: list[list[int]] = [] for v in range(n): if index_of[v] == -1: strong_connect(v, 0, components) return components def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]: """ >>> n = 7 >>> source = [0, 0, 1, 2, 3, 3, 4, 4, 6] >>> target = [1, 3, 2, 0, 1, 4, 5, 6, 5] >>> edges = list(zip(source, target)) >>> create_graph(n, edges) [[1, 3], [2], [0], [1, 4], [5, 6], [], [5]] """ g: list[list[int]] = [[] for _ in range(n)] for u, v in edges: g[u].append(v) return g if __name__ == "__main__": # Test n_vertices = 7 source = [0, 0, 1, 2, 3, 3, 4, 4, 6] target = [1, 3, 2, 0, 1, 4, 5, 6, 5] edges = list(zip(source, target)) g = create_graph(n_vertices, edges) assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]]
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/prim.py
graphs/prim.py
"""Prim's Algorithm. Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm. Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm """ import heapq as hq import math from collections.abc import Iterator class Vertex: """Class Vertex.""" def __init__(self, id_): """ Arguments: id - input an id to identify the vertex Attributes: neighbors - a list of the vertices it is linked to edges - a dict to store the edges's weight """ self.id = str(id_) self.key = None self.pi = None self.neighbors = [] self.edges = {} # {vertex:distance} def __lt__(self, other): """Comparison rule to < operator.""" return self.key < other.key def __repr__(self): """Return the vertex id.""" return self.id def add_neighbor(self, vertex): """Add a pointer to a vertex at neighbor's list.""" self.neighbors.append(vertex) def add_edge(self, vertex, weight): """Destination vertex and weight.""" self.edges[vertex.id] = weight def connect(graph, a, b, edge): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1]) graph[b - 1].add_neighbor(graph[a - 1]) # add the edges: graph[a - 1].add_edge(graph[b - 1], edge) graph[b - 1].add_edge(graph[a - 1], edge) def prim(graph: list, root: Vertex) -> list: """Prim's Algorithm. Runtime: O(mn) with `m` edges and `n` vertices Return: List with the edges of a Minimum Spanning Tree Usage: prim(graph, graph[0]) """ a = [] for u in graph: u.key = math.inf u.pi = None root.key = 0 q = graph[:] while q: u = min(q) q.remove(u) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): v.pi = u v.key = u.edges[v.id] for i in range(1, len(graph)): a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1)) return a def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]: """Prim's Algorithm with min heap. Runtime: O((m + n)log n) with `m` edges and `n` vertices Yield: Edges of a Minimum Spanning Tree Usage: prim(graph, graph[0]) """ for u in graph: u.key = math.inf u.pi = None root.key = 0 h = list(graph) hq.heapify(h) while h: u = hq.heappop(h) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): v.pi = u v.key = u.edges[v.id] hq.heapify(h) for i in range(1, len(graph)): yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1) def test_vector() -> None: """ # Creates a list to store x vertices. >>> x = 5 >>> G = [Vertex(n) for n in range(x)] >>> connect(G, 1, 2, 15) >>> connect(G, 1, 3, 12) >>> connect(G, 2, 4, 13) >>> connect(G, 2, 5, 5) >>> connect(G, 3, 2, 6) >>> connect(G, 3, 4, 6) >>> connect(G, 0, 0, 0) # Generate the minimum spanning tree: >>> G_heap = G[:] >>> MST = prim(G, G[0]) >>> MST_heap = prim_heap(G, G[0]) >>> for i in MST: ... print(i) (2, 3) (3, 1) (4, 3) (5, 2) >>> for i in MST_heap: ... print(i) (2, 3) (3, 1) (4, 3) (5, 2) """ if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/multi_heuristic_astar.py
graphs/multi_heuristic_astar.py
import heapq import sys import numpy as np TPos = tuple[int, int] class PriorityQueue: def __init__(self): self.elements = [] self.set = set() def minkey(self): if not self.empty(): return self.elements[0][0] else: return float("inf") def empty(self): return len(self.elements) == 0 def put(self, item, priority): if item not in self.set: heapq.heappush(self.elements, (priority, item)) self.set.add(item) else: # update # print("update", item) temp = [] (pri, x) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) (pri, x) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements, (pro, xxx)) def remove_element(self, item): if item in self.set: self.set.remove(item) temp = [] (pro, x) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) (pro, x) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements, (prito, yyy)) def top_show(self): return self.elements[0][1] def get(self): (priority, item) = heapq.heappop(self.elements) self.set.remove(item) return (priority, item) def consistent_heuristic(p: TPos, goal: TPos): # euclidean distance a = np.array(p) b = np.array(goal) return np.linalg.norm(a - b) def heuristic_2(p: TPos, goal: TPos): # integer division by time variable return consistent_heuristic(p, goal) // t def heuristic_1(p: TPos, goal: TPos): # manhattan distance return abs(p[0] - goal[0]) + abs(p[1] - goal[1]) def key(start: TPos, i: int, goal: TPos, g_function: dict[TPos, float]): ans = g_function[start] + W1 * heuristics[i](start, goal) return ans def do_something(back_pointer, goal, start): grid = np.char.chararray((n, n)) for i in range(n): for j in range(n): grid[i][j] = "*" for i in range(n): for j in range(n): if (j, (n - 1) - i) in blocks: grid[i][j] = "#" grid[0][(n - 1)] = "-" x = back_pointer[goal] while x != start: (x_c, y_c) = x # print(x) grid[(n - 1) - y_c][x_c] = "-" x = back_pointer[x] grid[(n - 1)][0] = "-" for i in range(n): for j in range(n): if (i, j) == (0, n - 1): print(grid[i][j], end=" ") print("<-- End position", end=" ") else: print(grid[i][j], end=" ") print() print("^") print("Start position") print() print("# is an obstacle") print("- is the path taken by algorithm") print("PATH TAKEN BY THE ALGORITHM IS:-") x = back_pointer[goal] while x != start: print(x, end=" ") x = back_pointer[x] print(x) sys.exit() def valid(p: TPos): if p[0] < 0 or p[0] > n - 1: return False return not (p[1] < 0 or p[1] > n - 1) def expand_state( s, j, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer, ): for itera in range(n_heuristic): open_list[itera].remove_element(s) # print("s", s) # print("j", j) (x, y) = s left = (x - 1, y) right = (x + 1, y) up = (x, y + 1) down = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(neighbours) and neighbours not in visited: # print("neighbour", neighbours) visited.add(neighbours) back_pointer[neighbours] = -1 g_function[neighbours] = float("inf") if valid(neighbours) and g_function[neighbours] > g_function[s] + 1: g_function[neighbours] = g_function[s] + 1 back_pointer[neighbours] = s if neighbours not in close_list_anchor: open_list[0].put(neighbours, key(neighbours, 0, goal, g_function)) if neighbours not in close_list_inad: for var in range(1, n_heuristic): if key(neighbours, var, goal, g_function) <= W2 * key( neighbours, 0, goal, g_function ): open_list[j].put( neighbours, key(neighbours, var, goal, g_function) ) def make_common_ground(): some_list = [] for x in range(1, 5): for y in range(1, 6): some_list.append((x, y)) for x in range(15, 20): some_list.append((x, 17)) for x in range(10, 19): for y in range(1, 15): some_list.append((x, y)) # L block for x in range(1, 4): for y in range(12, 19): some_list.append((x, y)) for x in range(3, 13): for y in range(16, 19): some_list.append((x, y)) return some_list heuristics = {0: consistent_heuristic, 1: heuristic_1, 2: heuristic_2} blocks_blk = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] blocks_all = make_common_ground() blocks = blocks_blk # hyper parameters W1 = 1 W2 = 1 n = 20 n_heuristic = 3 # one consistent and two other inconsistent # start and end destination start = (0, 0) goal = (n - 1, n - 1) t = 1 def multi_a_star(start: TPos, goal: TPos, n_heuristic: int): g_function = {start: 0, goal: float("inf")} back_pointer = {start: -1, goal: -1} open_list = [] visited = set() for i in range(n_heuristic): open_list.append(PriorityQueue()) open_list[i].put(start, key(start, i, goal, g_function)) close_list_anchor: list[int] = [] close_list_inad: list[int] = [] while open_list[0].minkey() < float("inf"): for i in range(1, n_heuristic): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= W2 * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("inf"): do_something(back_pointer, goal, start) else: _, get_s = open_list[i].top_show() visited.add(get_s) expand_state( get_s, i, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer, ) close_list_inad.append(get_s) elif g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("inf"): do_something(back_pointer, goal, start) else: get_s = open_list[0].top_show() visited.add(get_s) expand_state( get_s, 0, visited, g_function, close_list_anchor, close_list_inad, open_list, back_pointer, ) close_list_anchor.append(get_s) print("No path found to goal") print() for i in range(n - 1, -1, -1): for j in range(n): if (j, i) in blocks: print("#", end=" ") elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("*", end=" ") else: print("-", end=" ") else: print("*", end=" ") if (j, i) == (n - 1, n - 1): print("<-- End position", end=" ") print() print("^") print("Start position") print() print("# is an obstacle") print("- is the path taken by algorithm") if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/articulation_points.py
graphs/articulation_points.py
# Finding Articulation Points in Undirected Graph def compute_ap(graph): n = len(graph) out_edge_count = 0 low = [0] * n visited = [False] * n is_art = [False] * n def dfs(root, at, parent, out_edge_count): if parent == root: out_edge_count += 1 visited[at] = True low[at] = at for to in graph[at]: if to == parent: pass elif not visited[to]: out_edge_count = dfs(root, to, at, out_edge_count) low[at] = min(low[at], low[to]) # AP found via bridge if at < low[to]: is_art[at] = True # AP found via cycle if at == low[to]: is_art[at] = True else: low[at] = min(low[at], to) return out_edge_count for i in range(n): if not visited[i]: out_edge_count = 0 out_edge_count = dfs(i, i, -1, out_edge_count) is_art[i] = out_edge_count > 1 for x in range(len(is_art)): if is_art[x] is True: print(x) # Adjacency list of graph graph = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(graph)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/breadth_first_search_shortest_path_2.py
graphs/breadth_first_search_shortest_path_2.py
"""Breadth-first search the shortest path implementations. doctest: python -m doctest -v breadth_first_search_shortest_path_2.py Manual test: python breadth_first_search_shortest_path_2.py """ from collections import deque demo_graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def bfs_shortest_path(graph: dict, start, goal) -> list[str]: """Find the shortest path between `start` and `goal` nodes. Args: graph (dict): node/list of neighboring nodes key/value pairs. start: start node. goal: target node. Returns: Shortest path between `start` and `goal` nodes as a string of nodes. 'Not found' string if no path found. Example: >>> bfs_shortest_path(demo_graph, "G", "D") ['G', 'C', 'A', 'B', 'D'] >>> bfs_shortest_path(demo_graph, "G", "G") ['G'] >>> bfs_shortest_path(demo_graph, "G", "Unknown") [] """ # keep track of explored nodes explored = set() # keep track of all the paths to be checked queue = deque([[start]]) # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue path = queue.popleft() # get the last node from the path node = path[-1] if node not in explored: neighbours = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: new_path = list(path) new_path.append(neighbour) queue.append(new_path) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(node) # in case there's no path between the 2 nodes return [] def bfs_shortest_path_distance(graph: dict, start, target) -> int: """Find the shortest path distance between `start` and `target` nodes. Args: graph: node/list of neighboring nodes key/value pairs. start: node to start search from. target: node to search for. Returns: Number of edges in the shortest path between `start` and `target` nodes. -1 if no path exists. Example: >>> bfs_shortest_path_distance(demo_graph, "G", "D") 4 >>> bfs_shortest_path_distance(demo_graph, "A", "A") 0 >>> bfs_shortest_path_distance(demo_graph, "A", "Unknown") -1 """ if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 queue = deque([start]) visited = set(start) # Keep tab on distances from `start` node. dist = {start: 0, target: -1} while queue: node = queue.popleft() if node == target: dist[target] = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(adjacent) queue.append(adjacent) dist[adjacent] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/markov_chain.py
graphs/markov_chain.py
from __future__ import annotations from collections import Counter from random import random class MarkovChainGraphUndirectedUnweighted: """ Undirected Unweighted Graph for running Markov Chain Algorithm """ def __init__(self): self.connections = {} def add_node(self, node: str) -> None: self.connections[node] = {} def add_transition_probability( self, node1: str, node2: str, probability: float ) -> None: if node1 not in self.connections: self.add_node(node1) if node2 not in self.connections: self.add_node(node2) self.connections[node1][node2] = probability def get_nodes(self) -> list[str]: return list(self.connections) def transition(self, node: str) -> str: current_probability = 0 random_value = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def get_transitions( start: str, transitions: list[tuple[str, str, float]], steps: int ) -> dict[str, int]: """ Running Markov Chain algorithm and calculating the number of times each node is visited >>> transitions = [ ... ('a', 'a', 0.9), ... ('a', 'b', 0.075), ... ('a', 'c', 0.025), ... ('b', 'a', 0.15), ... ('b', 'b', 0.8), ... ('b', 'c', 0.05), ... ('c', 'a', 0.25), ... ('c', 'b', 0.25), ... ('c', 'c', 0.5) ... ] >>> result = get_transitions('a', transitions, 5000) >>> result['a'] > result['b'] > result['c'] True """ graph = MarkovChainGraphUndirectedUnweighted() for node1, node2, probability in transitions: graph.add_transition_probability(node1, node2, probability) visited = Counter(graph.get_nodes()) node = start for _ in range(steps): node = graph.transition(node) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/lanczos_eigenvectors.py
graphs/lanczos_eigenvectors.py
""" Lanczos Method for Finding Eigenvalues and Eigenvectors of a Graph. This module demonstrates the Lanczos method to approximate the largest eigenvalues and corresponding eigenvectors of a symmetric matrix represented as a graph's adjacency list. The method efficiently handles large, sparse matrices by converting the graph to a tridiagonal matrix, whose eigenvalues and eigenvectors are then computed. Key Functions: - `find_lanczos_eigenvectors`: Computes the k largest eigenvalues and vectors. - `lanczos_iteration`: Constructs the tridiagonal matrix and orthonormal basis vectors. - `multiply_matrix_vector`: Multiplies an adjacency list graph with a vector. Complexity: - Time: O(k * n), where k is the number of eigenvalues and n is the matrix size. - Space: O(n), due to sparse representation and tridiagonal matrix structure. Further Reading: - Lanczos Algorithm: https://en.wikipedia.org/wiki/Lanczos_algorithm - Eigenvector Centrality: https://en.wikipedia.org/wiki/Eigenvector_centrality Example Usage: Given a graph represented by an adjacency list, the `find_lanczos_eigenvectors` function returns the largest eigenvalues and eigenvectors. This can be used to analyze graph centrality. """ import numpy as np def validate_adjacency_list(graph: list[list[int | None]]) -> None: """Validates the adjacency list format for the graph. Args: graph: A list of lists where each sublist contains the neighbors of a node. Raises: ValueError: If the graph is not a list of lists, or if any node has invalid neighbors (e.g., out-of-range or non-integer values). >>> validate_adjacency_list([[1, 2], [0], [0, 1]]) >>> validate_adjacency_list([[]]) # No neighbors, valid case >>> validate_adjacency_list([[1], [2], [-1]]) # Invalid neighbor Traceback (most recent call last): ... ValueError: Invalid neighbor -1 in node 2 adjacency list. """ if not isinstance(graph, list): raise ValueError("Graph should be a list of lists.") for node_index, neighbors in enumerate(graph): if not isinstance(neighbors, list): no_neighbors_message: str = ( f"Node {node_index} should have a list of neighbors." ) raise ValueError(no_neighbors_message) for neighbor_index in neighbors: if ( not isinstance(neighbor_index, int) or neighbor_index < 0 or neighbor_index >= len(graph) ): invalid_neighbor_message: str = ( f"Invalid neighbor {neighbor_index} in node {node_index} " f"adjacency list." ) raise ValueError(invalid_neighbor_message) def lanczos_iteration( graph: list[list[int | None]], num_eigenvectors: int ) -> tuple[np.ndarray, np.ndarray]: """Constructs the tridiagonal matrix and orthonormal basis vectors using the Lanczos method. Args: graph: The graph represented as a list of adjacency lists. num_eigenvectors: The number of largest eigenvalues and eigenvectors to approximate. Returns: A tuple containing: - tridiagonal_matrix: A (num_eigenvectors x num_eigenvectors) symmetric matrix. - orthonormal_basis: A (num_nodes x num_eigenvectors) matrix of orthonormal basis vectors. Raises: ValueError: If num_eigenvectors is less than 1 or greater than the number of nodes. >>> graph = [[1, 2], [0, 2], [0, 1]] >>> T, Q = lanczos_iteration(graph, 2) >>> T.shape == (2, 2) and Q.shape == (3, 2) True """ num_nodes: int = len(graph) if not (1 <= num_eigenvectors <= num_nodes): raise ValueError( "Number of eigenvectors must be between 1 and the number of " "nodes in the graph." ) orthonormal_basis: np.ndarray = np.zeros((num_nodes, num_eigenvectors)) tridiagonal_matrix: np.ndarray = np.zeros((num_eigenvectors, num_eigenvectors)) rng = np.random.default_rng() initial_vector: np.ndarray = rng.random(num_nodes) initial_vector /= np.sqrt(np.dot(initial_vector, initial_vector)) orthonormal_basis[:, 0] = initial_vector prev_beta: float = 0.0 for iter_index in range(num_eigenvectors): result_vector: np.ndarray = multiply_matrix_vector( graph, orthonormal_basis[:, iter_index] ) if iter_index > 0: result_vector -= prev_beta * orthonormal_basis[:, iter_index - 1] alpha_value: float = np.dot(orthonormal_basis[:, iter_index], result_vector) result_vector -= alpha_value * orthonormal_basis[:, iter_index] prev_beta = np.sqrt(np.dot(result_vector, result_vector)) if iter_index < num_eigenvectors - 1 and prev_beta > 1e-10: orthonormal_basis[:, iter_index + 1] = result_vector / prev_beta tridiagonal_matrix[iter_index, iter_index] = alpha_value if iter_index < num_eigenvectors - 1: tridiagonal_matrix[iter_index, iter_index + 1] = prev_beta tridiagonal_matrix[iter_index + 1, iter_index] = prev_beta return tridiagonal_matrix, orthonormal_basis def multiply_matrix_vector( graph: list[list[int | None]], vector: np.ndarray ) -> np.ndarray: """Performs multiplication of a graph's adjacency list representation with a vector. Args: graph: The adjacency list of the graph. vector: A 1D numpy array representing the vector to multiply. Returns: A numpy array representing the product of the adjacency list and the vector. Raises: ValueError: If the vector's length does not match the number of nodes in the graph. >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([1, 1, 1])) array([2., 2., 2.]) >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([0, 1, 0])) array([1., 0., 1.]) """ num_nodes: int = len(graph) if vector.shape[0] != num_nodes: raise ValueError("Vector length must match the number of nodes in the graph.") result: np.ndarray = np.zeros(num_nodes) for node_index, neighbors in enumerate(graph): for neighbor_index in neighbors: result[node_index] += vector[neighbor_index] return result def find_lanczos_eigenvectors( graph: list[list[int | None]], num_eigenvectors: int ) -> tuple[np.ndarray, np.ndarray]: """Computes the largest eigenvalues and their corresponding eigenvectors using the Lanczos method. Args: graph: The graph as a list of adjacency lists. num_eigenvectors: Number of largest eigenvalues and eigenvectors to compute. Returns: A tuple containing: - eigenvalues: 1D array of the largest eigenvalues in descending order. - eigenvectors: 2D array where each column is an eigenvector corresponding to an eigenvalue. Raises: ValueError: If the graph format is invalid or num_eigenvectors is out of bounds. >>> eigenvalues, eigenvectors = find_lanczos_eigenvectors( ... [[1, 2], [0, 2], [0, 1]], 2 ... ) >>> len(eigenvalues) == 2 and eigenvectors.shape[1] == 2 True """ validate_adjacency_list(graph) tridiagonal_matrix, orthonormal_basis = lanczos_iteration(graph, num_eigenvectors) eigenvalues, eigenvectors = np.linalg.eigh(tridiagonal_matrix) return eigenvalues[::-1], np.dot(orthonormal_basis, eigenvectors[:, ::-1]) def main() -> None: """ Main driver function for testing the implementation with doctests. """ import doctest doctest.testmod() if __name__ == "__main__": main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/ant_colony_optimization_algorithms.py
graphs/ant_colony_optimization_algorithms.py
""" Use an ant colony optimization algorithm to solve the travelling salesman problem (TSP) which asks the following question: "Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city exactly once and returns to the origin city?" https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms https://en.wikipedia.org/wiki/Travelling_salesman_problem Author: Clark """ import copy import random cities = { 0: [0, 0], 1: [0, 5], 2: [3, 8], 3: [8, 10], 4: [12, 8], 5: [12, 4], 6: [8, 0], 7: [6, 2], } def main( cities: dict[int, list[int]], ants_num: int, iterations_num: int, pheromone_evaporation: float, alpha: float, beta: float, q: float, # Pheromone system parameters Q, which is a constant ) -> tuple[list[int], float]: """ Ant colony algorithm main function >>> main(cities=cities, ants_num=10, iterations_num=20, ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) ([0, 1, 2, 3, 4, 5, 6, 7, 0], 37.909778143828696) >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) ([0, 1, 0], 5.656854249492381) >>> main(cities={0: [0, 0], 1: [2, 2], 4: [4, 4]}, ants_num=5, iterations_num=5, ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) Traceback (most recent call last): ... IndexError: list index out of range >>> main(cities={}, ants_num=5, iterations_num=5, ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) Traceback (most recent call last): ... StopIteration >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=0, iterations_num=5, ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) ([], inf) >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=0, ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) ([], inf) >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, ... pheromone_evaporation=1, alpha=1.0, beta=5.0, q=10) ([0, 1, 0], 5.656854249492381) >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, ... pheromone_evaporation=0, alpha=1.0, beta=5.0, q=10) ([0, 1, 0], 5.656854249492381) """ # Initialize the pheromone matrix cities_num = len(cities) pheromone = [[1.0] * cities_num] * cities_num best_path: list[int] = [] best_distance = float("inf") for _ in range(iterations_num): ants_route = [] for _ in range(ants_num): unvisited_cities = copy.deepcopy(cities) current_city = {next(iter(cities.keys())): next(iter(cities.values()))} del unvisited_cities[next(iter(current_city.keys()))] ant_route = [next(iter(current_city.keys()))] while unvisited_cities: current_city, unvisited_cities = city_select( pheromone, current_city, unvisited_cities, alpha, beta ) ant_route.append(next(iter(current_city.keys()))) ant_route.append(0) ants_route.append(ant_route) pheromone, best_path, best_distance = pheromone_update( pheromone, cities, pheromone_evaporation, ants_route, q, best_path, best_distance, ) return best_path, best_distance def distance(city1: list[int], city2: list[int]) -> float: """ Calculate the distance between two coordinate points >>> distance([0, 0], [3, 4] ) 5.0 >>> distance([0, 0], [-3, 4] ) 5.0 >>> distance([0, 0], [-3, -4] ) 5.0 """ return (((city1[0] - city2[0]) ** 2) + ((city1[1] - city2[1]) ** 2)) ** 0.5 def pheromone_update( pheromone: list[list[float]], cities: dict[int, list[int]], pheromone_evaporation: float, ants_route: list[list[int]], q: float, # Pheromone system parameters Q, which is a constant best_path: list[int], best_distance: float, ) -> tuple[list[list[float]], list[int], float]: """ Update pheromones on the route and update the best route >>> >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]], ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7, ... ants_route=[[0, 1, 0]], q=10, best_path=[], ... best_distance=float("inf")) ([[0.7, 4.235533905932737], [4.235533905932737, 0.7]], [0, 1, 0], 5.656854249492381) >>> pheromone_update(pheromone=[], ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7, ... ants_route=[[0, 1, 0]], q=10, best_path=[], ... best_distance=float("inf")) Traceback (most recent call last): ... IndexError: list index out of range >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]], ... cities={}, pheromone_evaporation=0.7, ... ants_route=[[0, 1, 0]], q=10, best_path=[], ... best_distance=float("inf")) Traceback (most recent call last): ... KeyError: 0 """ for a in range(len(cities)): # Update the volatilization of pheromone on all routes for b in range(len(cities)): pheromone[a][b] *= pheromone_evaporation for ant_route in ants_route: total_distance = 0.0 for i in range(len(ant_route) - 1): # Calculate total distance total_distance += distance(cities[ant_route[i]], cities[ant_route[i + 1]]) delta_pheromone = q / total_distance for i in range(len(ant_route) - 1): # Update pheromones pheromone[ant_route[i]][ant_route[i + 1]] += delta_pheromone pheromone[ant_route[i + 1]][ant_route[i]] = pheromone[ant_route[i]][ ant_route[i + 1] ] if total_distance < best_distance: best_path = ant_route best_distance = total_distance return pheromone, best_path, best_distance def city_select( pheromone: list[list[float]], current_city: dict[int, list[int]], unvisited_cities: dict[int, list[int]], alpha: float, beta: float, ) -> tuple[dict[int, list[int]], dict[int, list[int]]]: """ Choose the next city for ants >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]}, ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) ({1: [2, 2]}, {}) >>> city_select(pheromone=[], current_city={0: [0,0]}, ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) Traceback (most recent call last): ... IndexError: list index out of range >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={}, ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) Traceback (most recent call last): ... StopIteration >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]}, ... unvisited_cities={}, alpha=1.0, beta=5.0) Traceback (most recent call last): ... IndexError: list index out of range """ probabilities = [] for city, value in unvisited_cities.items(): city_distance = distance(value, next(iter(current_city.values()))) probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * ( (1 / city_distance) ** beta ) probabilities.append(probability) chosen_city_i = random.choices( list(unvisited_cities.keys()), weights=probabilities )[0] chosen_city = {chosen_city_i: unvisited_cities[chosen_city_i]} del unvisited_cities[next(iter(chosen_city.keys()))] return chosen_city, unvisited_cities if __name__ == "__main__": best_path, best_distance = main( cities=cities, ants_num=10, iterations_num=20, pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10, ) print(f"{best_path = }") print(f"{best_distance = }")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/bidirectional_a_star.py
graphs/bidirectional_a_star.py
""" https://en.wikipedia.org/wiki/Bidirectional_search """ from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean HEURISTIC = 0 grid = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right TPosition = tuple[int, int] class Node: """ >>> k = Node(0, 0, 4, 3, 0, None) >>> k.calculate_heuristic() 5.0 >>> n = Node(1, 4, 3, 4, 2, None) >>> n.calculate_heuristic() 2.0 >>> l = [k, n] >>> n == l[0] False >>> l.sort() >>> n == l[0] True """ def __init__( self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, g_cost: int, parent: Node | None, ) -> None: self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) self.goal_x = goal_x self.goal_y = goal_y self.g_cost = g_cost self.parent = parent self.h_cost = self.calculate_heuristic() self.f_cost = self.g_cost + self.h_cost def calculate_heuristic(self) -> float: """ Heuristic for the A* """ dy = self.pos_x - self.goal_x dx = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(dx) + abs(dy) else: return sqrt(dy**2 + dx**2) def __lt__(self, other: Node) -> bool: return self.f_cost < other.f_cost class AStar: """ >>> astar = AStar((0, 0), (len(grid) - 1, len(grid[0]) - 1)) >>> (astar.start.pos_y + delta[3][0], astar.start.pos_x + delta[3][1]) (0, 1) >>> [x.pos for x in astar.get_successors(astar.start)] [(1, 0), (0, 1)] >>> (astar.start.pos_y + delta[2][0], astar.start.pos_x + delta[2][1]) (1, 0) >>> astar.retrace_path(astar.start) [(0, 0)] >>> astar.search() # doctest: +NORMALIZE_WHITESPACE [(0, 0), (1, 0), (2, 0), (2, 1), (2, 2), (2, 3), (3, 3), (4, 3), (4, 4), (5, 4), (5, 5), (6, 5), (6, 6)] """ def __init__(self, start: TPosition, goal: TPosition): self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) self.open_nodes = [self.start] self.closed_nodes: list[Node] = [] self.reached = False def search(self) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() current_node = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(current_node) self.closed_nodes.append(current_node) successors = self.get_successors(current_node) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(child_node) else: # retrieve the best current path better_node = self.open_nodes.pop(self.open_nodes.index(child_node)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(child_node) else: self.open_nodes.append(better_node) return [self.start.pos] def get_successors(self, parent: Node) -> list[Node]: """ Returns a list of successors (both in the grid and free spaces) """ successors = [] for action in delta: pos_x = parent.pos_x + action[1] pos_y = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, parent, ) ) return successors def retrace_path(self, node: Node | None) -> list[TPosition]: """ Retrace the path from parents to parents until start node """ current_node = node path = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) current_node = current_node.parent path.reverse() return path class BidirectionalAStar: """ >>> bd_astar = BidirectionalAStar((0, 0), (len(grid) - 1, len(grid[0]) - 1)) >>> bd_astar.fwd_astar.start.pos == bd_astar.bwd_astar.target.pos True >>> bd_astar.retrace_bidirectional_path(bd_astar.fwd_astar.start, ... bd_astar.bwd_astar.start) [(0, 0)] >>> bd_astar.search() # doctest: +NORMALIZE_WHITESPACE [(0, 0), (0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (2, 4), (2, 5), (3, 5), (4, 5), (5, 5), (5, 6), (6, 6)] """ def __init__(self, start: TPosition, goal: TPosition) -> None: self.fwd_astar = AStar(start, goal) self.bwd_astar = AStar(goal, start) self.reached = False def search(self) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() current_fwd_node = self.fwd_astar.open_nodes.pop(0) current_bwd_node = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( current_fwd_node, current_bwd_node ) self.fwd_astar.closed_nodes.append(current_fwd_node) self.bwd_astar.closed_nodes.append(current_bwd_node) self.fwd_astar.target = current_bwd_node self.bwd_astar.target = current_fwd_node successors = { self.fwd_astar: self.fwd_astar.get_successors(current_fwd_node), self.bwd_astar: self.bwd_astar.get_successors(current_bwd_node), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(child_node) else: # retrieve the best current path better_node = astar.open_nodes.pop( astar.open_nodes.index(child_node) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(child_node) else: astar.open_nodes.append(better_node) return [self.fwd_astar.start.pos] def retrace_bidirectional_path( self, fwd_node: Node, bwd_node: Node ) -> list[TPosition]: fwd_path = self.fwd_astar.retrace_path(fwd_node) bwd_path = self.bwd_astar.retrace_path(bwd_node) bwd_path.pop() bwd_path.reverse() path = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) start_time = time.time() a_star = AStar(init, goal) path = a_star.search() end_time = time.time() - start_time print(f"AStar execution time = {end_time:f} seconds") bd_start_time = time.time() bidir_astar = BidirectionalAStar(init, goal) bd_end_time = time.time() - bd_start_time print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/kahns_algorithm_topo.py
graphs/kahns_algorithm_topo.py
def topological_sort(graph: dict[int, list[int]]) -> list[int] | None: """ Perform topological sorting of a Directed Acyclic Graph (DAG) using Kahn's Algorithm via Breadth-First Search (BFS). Topological sorting is a linear ordering of vertices in a graph such that for every directed edge u → v, vertex u comes before vertex v in the ordering. Parameters: graph: Adjacency list representing the directed graph where keys are vertices, and values are lists of adjacent vertices. Returns: The topologically sorted order of vertices if the graph is a DAG. Returns None if the graph contains a cycle. Example: >>> graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} >>> topological_sort(graph) [0, 1, 2, 3, 4, 5] >>> graph_with_cycle = {0: [1], 1: [2], 2: [0]} >>> topological_sort(graph_with_cycle) """ indegree = [0] * len(graph) queue = [] topo_order = [] processed_vertices_count = 0 # Calculate the indegree of each vertex for values in graph.values(): for i in values: indegree[i] += 1 # Add all vertices with 0 indegree to the queue for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) # Perform BFS while queue: vertex = queue.pop(0) processed_vertices_count += 1 topo_order.append(vertex) # Traverse neighbors for neighbor in graph[vertex]: indegree[neighbor] -= 1 if indegree[neighbor] == 0: queue.append(neighbor) if processed_vertices_count != len(graph): return None # no topological ordering exists due to cycle return topo_order # valid topological ordering if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/matching_min_vertex_cover.py
graphs/matching_min_vertex_cover.py
""" * Author: Manuel Di Lullo (https://github.com/manueldilullo) * Description: Approximization algorithm for minimum vertex cover problem. Matching Approach. Uses graphs represented with an adjacency list URL: https://mathworld.wolfram.com/MinimumVertexCover.html URL: https://www.princeton.edu/~aaa/Public/Teaching/ORF523/ORF523_Lec6.pdf """ def matching_min_vertex_cover(graph: dict) -> set: """ APX Algorithm for min Vertex Cover using Matching Approach @input: graph (graph stored in an adjacency list where each vertex is represented as an integer) @example: >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} >>> matching_min_vertex_cover(graph) {0, 1, 2, 4} """ # chosen_vertices = set of chosen vertices chosen_vertices = set() # edges = list of graph's edges edges = get_edges(graph) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: from_node, to_node = edges.pop() chosen_vertices.add(from_node) chosen_vertices.add(to_node) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(edge) return chosen_vertices def get_edges(graph: dict) -> set: """ Return a set of couples that represents all of the edges. @input: graph (graph stored in an adjacency list where each vertex is represented as an integer) @example: >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3], 3: [0, 1, 2]} >>> get_edges(graph) {(0, 1), (3, 1), (0, 3), (2, 0), (3, 0), (2, 3), (1, 0), (3, 2), (1, 3)} """ edges = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node)) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/dijkstra_2.py
graphs/dijkstra_2.py
def print_dist(dist, v): print("\nVertex Distance") for i in range(v): if dist[i] != float("inf"): print(i, "\t", int(dist[i]), end="\t") else: print(i, "\t", "INF", end="\t") print() def min_dist(mdist, vset, v): min_val = float("inf") min_ind = -1 for i in range(v): if (not vset[i]) and mdist[i] < min_val: min_ind = i min_val = mdist[i] return min_ind def dijkstra(graph, v, src): mdist = [float("inf") for _ in range(v)] vset = [False for _ in range(v)] mdist[src] = 0.0 for _ in range(v - 1): u = min_dist(mdist, vset, v) vset[u] = True for i in range(v): if ( (not vset[i]) and graph[u][i] != float("inf") and mdist[u] + graph[u][i] < mdist[i] ): mdist[i] = mdist[u] + graph[u][i] print_dist(mdist, i) if __name__ == "__main__": V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) graph = [[float("inf") for i in range(V)] for j in range(V)] for i in range(V): graph[i][i] = 0.0 for i in range(E): print("\nEdge ", i + 1) src = int(input("Enter source:").strip()) dst = int(input("Enter destination:").strip()) weight = float(input("Enter weight:").strip()) graph[src][dst] = weight gsrc = int(input("\nEnter shortest path source:").strip()) dijkstra(graph, V, gsrc)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/scc_kosaraju.py
graphs/scc_kosaraju.py
from __future__ import annotations def dfs(u): global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True for v in graph[u]: dfs(v) stack.append(u) def dfs2(u): global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True component.append(u) for v in reversed_graph[u]: dfs2(v) def kosaraju(): global graph, reversed_graph, scc, component, visit, stack for i in range(n): dfs(i) visit = [False] * n for i in stack[::-1]: if visit[i]: continue component = [] dfs2(i) scc.append(component) return scc if __name__ == "__main__": # n - no of nodes, m - no of edges n, m = list(map(int, input().strip().split())) graph: list[list[int]] = [[] for _ in range(n)] # graph reversed_graph: list[list[int]] = [[] for i in range(n)] # reversed graph # input graph data (edges) for _ in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) reversed_graph[v].append(u) stack: list[int] = [] visit: list[bool] = [False] * n scc: list[int] = [] component: list[int] = [] print(kosaraju())
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/page_rank.py
graphs/page_rank.py
""" Author: https://github.com/bhushan-borole """ """ The input graph for the algorithm is: A B C A 0 1 1 B 0 0 1 C 1 0 0 """ graph = [[0, 1, 1], [0, 0, 1], [1, 0, 0]] class Node: def __init__(self, name): self.name = name self.inbound = [] self.outbound = [] def add_inbound(self, node): self.inbound.append(node) def add_outbound(self, node): self.outbound.append(node) def __repr__(self): return f"<node={self.name} inbound={self.inbound} outbound={self.outbound}>" def page_rank(nodes, limit=3, d=0.85): ranks = {} for node in nodes: ranks[node.name] = 1 outbounds = {} for node in nodes: outbounds[node.name] = len(node.outbound) for i in range(limit): print(f"======= Iteration {i + 1} =======") for _, node in enumerate(nodes): ranks[node.name] = (1 - d) + d * sum( ranks[ib] / outbounds[ib] for ib in node.inbound ) print(ranks) def main(): names = list(input("Enter Names of the Nodes: ").split()) nodes = [Node(name) for name in names] for ri, row in enumerate(graph): for ci, col in enumerate(row): if col == 1: nodes[ci].add_inbound(names[ri]) nodes[ri].add_outbound(names[ci]) print("======= Nodes =======") for node in nodes: print(node) page_rank(nodes) if __name__ == "__main__": main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/graph_adjacency_matrix.py
graphs/graph_adjacency_matrix.py
#!/usr/bin/env python3 """ Author: Vikram Nithyanandam Description: The following implementation is a robust unweighted Graph data structure implemented using an adjacency matrix. This vertices and edges of this graph can be effectively initialized and modified while storing your chosen generic value in each vertex. Adjacency Matrix: https://mathworld.wolfram.com/AdjacencyMatrix.html Potential Future Ideas: - Add a flag to set edge weights on and set edge weights - Make edge weights and vertex values customizable to store whatever the client wants - Support multigraph functionality if the client wants it """ from __future__ import annotations import random import unittest from pprint import pformat from typing import TypeVar import pytest T = TypeVar("T") class GraphAdjacencyMatrix[T]: def __init__( self, vertices: list[T], edges: list[list[T]], directed: bool = True ) -> None: """ Parameters: - vertices: (list[T]) The list of vertex names the client wants to pass in. Default is empty. - edges: (list[list[T]]) The list of edges the client wants to pass in. Each edge is a 2-element list. Default is empty. - directed: (bool) Indicates if graph is directed or undirected. Default is True. """ self.directed = directed self.vertex_to_index: dict[T, int] = {} self.adj_matrix: list[list[int]] = [] # Falsey checks edges = edges or [] vertices = vertices or [] for vertex in vertices: self.add_vertex(vertex) for edge in edges: if len(edge) != 2: msg = f"Invalid input: {edge} must have length 2." raise ValueError(msg) self.add_edge(edge[0], edge[1]) def add_edge(self, source_vertex: T, destination_vertex: T) -> None: """ Creates an edge from source vertex to destination vertex. If any given vertex doesn't exist or the edge already exists, a ValueError will be thrown. """ if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge already exists between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) # Get the indices of the corresponding vertices and set their edge value to 1. u: int = self.vertex_to_index[source_vertex] v: int = self.vertex_to_index[destination_vertex] self.adj_matrix[u][v] = 1 if not self.directed: self.adj_matrix[v][u] = 1 def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: """ Removes the edge between the two vertices. If any given vertex doesn't exist or the edge does not exist, a ValueError will be thrown. """ if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if not self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge does NOT exist between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) # Get the indices of the corresponding vertices and set their edge value to 0. u: int = self.vertex_to_index[source_vertex] v: int = self.vertex_to_index[destination_vertex] self.adj_matrix[u][v] = 0 if not self.directed: self.adj_matrix[v][u] = 0 def add_vertex(self, vertex: T) -> None: """ Adds a vertex to the graph. If the given vertex already exists, a ValueError will be thrown. """ if self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} already exists in this graph." raise ValueError(msg) # build column for vertex for row in self.adj_matrix: row.append(0) # build row for vertex and update other data structures self.adj_matrix.append([0] * (len(self.adj_matrix) + 1)) self.vertex_to_index[vertex] = len(self.adj_matrix) - 1 def remove_vertex(self, vertex: T) -> None: """ Removes the given vertex from the graph and deletes all incoming and outgoing edges from the given vertex as well. If the given vertex does not exist, a ValueError will be thrown. """ if not self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} does not exist in this graph." raise ValueError(msg) # first slide up the rows by deleting the row corresponding to # the vertex being deleted. start_index = self.vertex_to_index[vertex] self.adj_matrix.pop(start_index) # next, slide the columns to the left by deleting the values in # the column corresponding to the vertex being deleted for lst in self.adj_matrix: lst.pop(start_index) # final clean up self.vertex_to_index.pop(vertex) # decrement indices for vertices shifted by the deleted vertex in the adj matrix for inner_vertex in self.vertex_to_index: if self.vertex_to_index[inner_vertex] >= start_index: self.vertex_to_index[inner_vertex] = ( self.vertex_to_index[inner_vertex] - 1 ) def contains_vertex(self, vertex: T) -> bool: """ Returns True if the graph contains the vertex, False otherwise. """ return vertex in self.vertex_to_index def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: """ Returns True if the graph contains the edge from the source_vertex to the destination_vertex, False otherwise. If any given vertex doesn't exist, a ValueError will be thrown. """ if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} " f"or {destination_vertex} does not exist." ) raise ValueError(msg) u = self.vertex_to_index[source_vertex] v = self.vertex_to_index[destination_vertex] return self.adj_matrix[u][v] == 1 def clear_graph(self) -> None: """ Clears all vertices and edges. """ self.vertex_to_index = {} self.adj_matrix = [] def __repr__(self) -> str: first = "Adj Matrix:\n" + pformat(self.adj_matrix) second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index) return first + second class TestGraphMatrix(unittest.TestCase): def __assert_graph_edge_exists_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: assert undirected_graph.contains_edge(edge[0], edge[1]) assert undirected_graph.contains_edge(edge[1], edge[0]) assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: assert not undirected_graph.contains_edge(edge[0], edge[1]) assert not undirected_graph.contains_edge(edge[1], edge[0]) assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: assert undirected_graph.contains_vertex(vertex) assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: assert not undirected_graph.contains_vertex(vertex) assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count ) random_destination_vertices: list[int] = random.sample( vertices[int(len(vertices) / 2) :], edge_pick_count ) random_edges: list[list[int]] = [] for source in random_source_vertices: for dest in random_destination_vertices: random_edges.append([source, dest]) return random_edges def __generate_graphs( self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int ) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]: if max_val - min_val + 1 < vertex_count: raise ValueError( "Will result in duplicate vertices. Either increase " "range between min_val and max_val or decrease vertex count" ) # generate graph input random_vertices: list[int] = random.sample( range(min_val, max_val + 1), vertex_count ) random_edges: list[list[int]] = self.__generate_random_edges( random_vertices, edge_pick_count ) # build graphs undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=random_edges, directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=random_edges, directed=True ) return undirected_graph, directed_graph, random_vertices, random_edges def test_init_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) # test graph initialization with vertices and edges for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) assert not undirected_graph.directed assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) # Build graphs WITHOUT edges undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=True ) # Test contains_vertex for num in range(101): assert (num in random_vertices) == undirected_graph.contains_vertex(num) assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) # build empty graphs undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( vertices=[], edges=[], directed=False ) directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( vertices=[], edges=[], directed=True ) # run add_vertex for num in random_vertices: undirected_graph.add_vertex(num) for num in random_vertices: directed_graph.add_vertex(num) # test add_vertex worked for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) def test_remove_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) # build graphs WITHOUT edges undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=True ) # test remove_vertex worked for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) undirected_graph.remove_vertex(num) directed_graph.remove_vertex(num) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, num ) def test_add_and_remove_vertices_repeatedly(self) -> None: random_vertices1: list[int] = random.sample(range(51), 20) random_vertices2: list[int] = random.sample(range(51, 101), 20) # build graphs WITHOUT edges undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices1, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices1, edges=[], directed=True ) # test adding and removing vertices for i, _ in enumerate(random_vertices1): undirected_graph.add_vertex(random_vertices2[i]) directed_graph.add_vertex(random_vertices2[i]) self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, random_vertices2[i] ) undirected_graph.remove_vertex(random_vertices1[i]) directed_graph.remove_vertex(random_vertices1[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices1[i] ) # remove all vertices for i, _ in enumerate(random_vertices1): undirected_graph.remove_vertex(random_vertices2[i]) directed_graph.remove_vertex(random_vertices2[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices2[i] ) def test_contains_edge(self) -> None: # generate graphs and graph input vertex_count = 20 ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(vertex_count, 0, 100, 4) # generate all possible edges for testing all_possible_edges: list[list[int]] = [] for i in range(vertex_count - 1): for j in range(i + 1, vertex_count): all_possible_edges.append([random_vertices[i], random_vertices[j]]) all_possible_edges.append([random_vertices[j], random_vertices[i]]) # test contains_edge function for edge in all_possible_edges: if edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) elif [edge[1], edge[0]] in random_edges: # since this edge exists for undirected but the reverse may # not exist for directed self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, [edge[1], edge[0]] ) else: self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_edge(self) -> None: # generate graph input random_vertices: list[int] = random.sample(range(101), 15) random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) # build graphs WITHOUT edges undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=True ) # run and test add_edge for edge in random_edges: undirected_graph.add_edge(edge[0], edge[1]) directed_graph.add_edge(edge[0], edge[1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) def test_remove_edge(self) -> None: # generate graph input and graphs ( undirected_graph, directed_graph, _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) # run and test remove_edge for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) undirected_graph.remove_edge(edge[0], edge[1]) directed_graph.remove_edge(edge[0], edge[1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_and_remove_edges_repeatedly(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) # make some more edge options! more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for i, _ in enumerate(random_edges): undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, more_random_edges[i] ) undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, random_edges[i] ) def test_add_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for i in range(101): if i not in random_vertices: with pytest.raises(ValueError): undirected_graph.remove_vertex(i) with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for edge in more_random_edges: with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) if __name__ == "__main__": unittest.main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/minimum_spanning_tree_kruskal.py
graphs/minimum_spanning_tree_kruskal.py
def kruskal( num_nodes: int, edges: list[tuple[int, int, int]] ) -> list[tuple[int, int, int]]: """ >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1)]) [(2, 3, 1), (0, 1, 3), (1, 2, 5)] >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2)]) [(2, 3, 1), (0, 2, 1), (0, 1, 3)] >>> kruskal(4, [(0, 1, 3), (1, 2, 5), (2, 3, 1), (0, 2, 1), (0, 3, 2), ... (2, 1, 1)]) [(2, 3, 1), (0, 2, 1), (2, 1, 1)] """ edges = sorted(edges, key=lambda edge: edge[2]) parent = list(range(num_nodes)) def find_parent(i): if i != parent[i]: parent[i] = find_parent(parent[i]) return parent[i] minimum_spanning_tree_cost = 0 minimum_spanning_tree = [] for edge in edges: parent_a = find_parent(edge[0]) parent_b = find_parent(edge[1]) if parent_a != parent_b: minimum_spanning_tree_cost += edge[2] minimum_spanning_tree.append(edge) parent[parent_a] = parent_b return minimum_spanning_tree if __name__ == "__main__": # pragma: no cover num_nodes, num_edges = list(map(int, input().strip().split())) edges = [] for _ in range(num_edges): node1, node2, cost = (int(x) for x in input().strip().split()) edges.append((node1, node2, cost)) kruskal(num_nodes, edges)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/kahns_algorithm_long.py
graphs/kahns_algorithm_long.py
# Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm def longest_distance(graph): indegree = [0] * len(graph) queue = [] long_dist = [1] * len(graph) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) while queue: vertex = queue.pop(0) for x in graph[vertex]: indegree[x] -= 1 long_dist[x] = max(long_dist[x], long_dist[vertex] + 1) if indegree[x] == 0: queue.append(x) print(max(long_dist)) # Adjacency list of Graph graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/minimum_spanning_tree_prims.py
graphs/minimum_spanning_tree_prims.py
import sys from collections import defaultdict class Heap: def __init__(self): self.node_position = [] def get_position(self, vertex): return self.node_position[vertex] def set_position(self, vertex, pos): self.node_position[vertex] = pos def top_to_bottom(self, heap, start, size, positions): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: # noqa: SIM114 smallest_child = 2 * start + 1 elif heap[2 * start + 1] < heap[2 * start + 2]: smallest_child = 2 * start + 1 else: smallest_child = 2 * start + 2 if heap[smallest_child] < heap[start]: temp, temp1 = heap[smallest_child], positions[smallest_child] heap[smallest_child], positions[smallest_child] = ( heap[start], positions[start], ) heap[start], positions[start] = temp, temp1 temp = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child], self.get_position(positions[start]) ) self.set_position(positions[start], temp) self.top_to_bottom(heap, smallest_child, size, positions) # Update function if value of any node in min-heap decreases def bottom_to_top(self, val, index, heap, position): temp = position[index] while index != 0: parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: heap[index] = heap[parent] position[index] = position[parent] self.set_position(position[parent], index) else: heap[index] = val position[index] = temp self.set_position(temp, index) break index = parent else: heap[0] = val position[0] = temp self.set_position(temp, 0) def heapify(self, heap, positions): start = len(heap) // 2 - 1 for i in range(start, -1, -1): self.top_to_bottom(heap, i, len(heap), positions) def delete_minimum(self, heap, positions): temp = positions[0] heap[0] = sys.maxsize self.top_to_bottom(heap, 0, len(heap), positions) return temp def prisms_algorithm(adjacency_list): """ >>> adjacency_list = {0: [[1, 1], [3, 3]], ... 1: [[0, 1], [2, 6], [3, 5], [4, 1]], ... 2: [[1, 6], [4, 5], [5, 2]], ... 3: [[0, 3], [1, 5], [4, 1]], ... 4: [[1, 1], [2, 5], [3, 1], [5, 4]], ... 5: [[2, 2], [4, 4]]} >>> prisms_algorithm(adjacency_list) [(0, 1), (1, 4), (4, 3), (4, 5), (5, 2)] """ heap = Heap() visited = [0] * len(adjacency_list) nbr_tv = [-1] * len(adjacency_list) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph distance_tv = [] # Heap of Distance of vertices from their neighboring vertex positions = [] for vertex in range(len(adjacency_list)): distance_tv.append(sys.maxsize) positions.append(vertex) heap.node_position.append(vertex) tree_edges = [] visited[0] = 1 distance_tv[0] = sys.maxsize for neighbor, distance in adjacency_list[0]: nbr_tv[neighbor] = 0 distance_tv[neighbor] = distance heap.heapify(distance_tv, positions) for _ in range(1, len(adjacency_list)): vertex = heap.delete_minimum(distance_tv, positions) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) visited[vertex] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(neighbor)] ): distance_tv[heap.get_position(neighbor)] = distance heap.bottom_to_top( distance, heap.get_position(neighbor), distance_tv, positions ) nbr_tv[neighbor] = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > edges_number = int(input("Enter number of edges: ").strip()) adjacency_list = defaultdict(list) for _ in range(edges_number): edge = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/dijkstra_alternate.py
graphs/dijkstra_alternate.py
from __future__ import annotations class Graph: def __init__(self, vertices: int) -> None: """ >>> graph = Graph(2) >>> graph.vertices 2 >>> len(graph.graph) 2 >>> len(graph.graph[0]) 2 """ self.vertices = vertices self.graph = [[0] * vertices for _ in range(vertices)] def print_solution(self, distances_from_source: list[int]) -> None: """ >>> Graph(0).print_solution([]) # doctest: +NORMALIZE_WHITESPACE Vertex Distance from Source """ print("Vertex \t Distance from Source") for vertex in range(self.vertices): print(vertex, "\t\t", distances_from_source[vertex]) def minimum_distance( self, distances_from_source: list[int], visited: list[bool] ) -> int: """ A utility function to find the vertex with minimum distance value, from the set of vertices not yet included in shortest path tree. >>> Graph(3).minimum_distance([1, 2, 3], [False, False, True]) 0 """ # Initialize minimum distance for next node minimum = 1e7 min_index = 0 # Search not nearest vertex not in the shortest path tree for vertex in range(self.vertices): if distances_from_source[vertex] < minimum and visited[vertex] is False: minimum = distances_from_source[vertex] min_index = vertex return min_index def dijkstra(self, source: int) -> None: """ Function that implements Dijkstra's single source shortest path algorithm for a graph represented using adjacency matrix representation. >>> Graph(4).dijkstra(1) # doctest: +NORMALIZE_WHITESPACE Vertex Distance from Source 0 10000000 1 0 2 10000000 3 10000000 """ distances = [int(1e7)] * self.vertices # distances from the source distances[source] = 0 visited = [False] * self.vertices for _ in range(self.vertices): u = self.minimum_distance(distances, visited) visited[u] = True # Update dist value of the adjacent vertices # of the picked vertex only if the current # distance is greater than new distance and # the vertex in not in the shortest path tree for v in range(self.vertices): if ( self.graph[u][v] > 0 and visited[v] is False and distances[v] > distances[u] + self.graph[u][v] ): distances[v] = distances[u] + self.graph[u][v] self.print_solution(distances) if __name__ == "__main__": graph = Graph(9) graph.graph = [ [0, 4, 0, 0, 0, 0, 0, 8, 0], [4, 0, 8, 0, 0, 0, 0, 11, 0], [0, 8, 0, 7, 0, 4, 0, 0, 2], [0, 0, 7, 0, 9, 14, 0, 0, 0], [0, 0, 0, 9, 0, 10, 0, 0, 0], [0, 0, 4, 14, 10, 0, 2, 0, 0], [0, 0, 0, 0, 0, 2, 0, 1, 6], [8, 11, 0, 0, 0, 0, 1, 0, 7], [0, 0, 2, 0, 0, 0, 6, 7, 0], ] graph.dijkstra(0)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/breadth_first_search_2.py
graphs/breadth_first_search_2.py
""" https://en.wikipedia.org/wiki/Breadth-first_search pseudo-code: breadth_first_search(graph G, start vertex s): // all nodes initially unexplored mark s as explored let Q = queue data structure, initialized with s while Q is non-empty: remove the first node of Q, call it v for each edge(v, w): // for w in graph[v] if w unexplored: mark w as explored add w to Q (at the end) """ from __future__ import annotations from collections import deque from queue import Queue from timeit import timeit G = { "A": ["B", "C"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B"], "E": ["B", "F"], "F": ["C", "E"], } def breadth_first_search(graph: dict, start: str) -> list[str]: """ Implementation of breadth first search using queue.Queue. >>> ''.join(breadth_first_search(G, 'A')) 'ABCDEF' """ explored = {start} result = [start] queue: Queue = Queue() queue.put(start) while not queue.empty(): v = queue.get() for w in graph[v]: if w not in explored: explored.add(w) result.append(w) queue.put(w) return result def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]: """ Implementation of breadth first search using collection.queue. >>> ''.join(breadth_first_search_with_deque(G, 'A')) 'ABCDEF' """ visited = {start} result = [start] queue = deque([start]) while queue: v = queue.popleft() for child in graph[v]: if child not in visited: visited.add(child) result.append(child) queue.append(child) return result def benchmark_function(name: str) -> None: setup = f"from __main__ import G, {name}" number = 10000 res = timeit(f"{name}(G, 'A')", setup=setup, number=number) print(f"{name:<35} finished {number} runs in {res:.5f} seconds") if __name__ == "__main__": import doctest doctest.testmod() benchmark_function("breadth_first_search") benchmark_function("breadth_first_search_with_deque") # breadth_first_search finished 10000 runs in 0.20999 seconds # breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/karger.py
graphs/karger.py
""" An implementation of Karger's Algorithm for partitioning a graph. """ from __future__ import annotations import random # Adjacency list representation of this graph: # https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg TEST_GRAPH = { "1": ["2", "3", "4", "5"], "2": ["1", "3", "4", "5"], "3": ["1", "2", "4", "5", "10"], "4": ["1", "2", "3", "5", "6"], "5": ["1", "2", "3", "4", "7"], "6": ["7", "8", "9", "10", "4"], "7": ["6", "8", "9", "10", "5"], "8": ["6", "7", "9", "10"], "9": ["6", "7", "8", "10"], "10": ["6", "7", "8", "9", "3"], } def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]: """ Partitions a graph using Karger's Algorithm. Implemented from pseudocode found here: https://en.wikipedia.org/wiki/Karger%27s_algorithm. This function involves random choices, meaning it will not give consistent outputs. Args: graph: A dictionary containing adacency lists for the graph. Nodes must be strings. Returns: The cutset of the cut found by Karger's Algorithm. >>> graph = {'0':['1'], '1':['0']} >>> partition_graph(graph) {('0', '1')} """ # Dict that maps contracted nodes to a list of all the nodes it "contains." contracted_nodes = {node: {node} for node in graph} graph_copy = {node: graph[node][:] for node in graph} while len(graph_copy) > 2: # Choose a random edge. u = random.choice(list(graph_copy.keys())) v = random.choice(graph_copy[u]) # Contract edge (u, v) to new node uv uv = u + v uv_neighbors = list(set(graph_copy[u] + graph_copy[v])) uv_neighbors.remove(u) uv_neighbors.remove(v) graph_copy[uv] = uv_neighbors for neighbor in uv_neighbors: graph_copy[neighbor].append(uv) contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v])) # Remove nodes u and v. del graph_copy[u] del graph_copy[v] for neighbor in uv_neighbors: if u in graph_copy[neighbor]: graph_copy[neighbor].remove(u) if v in graph_copy[neighbor]: graph_copy[neighbor].remove(v) # Find cutset. groups = [contracted_nodes[node] for node in graph_copy] return { (node, neighbor) for node in groups[0] for neighbor in graph[node] if neighbor in groups[1] } if __name__ == "__main__": print(partition_graph(TEST_GRAPH))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/depth_first_search.py
graphs/depth_first_search.py
"""Non recursive implementation of a DFS algorithm.""" from __future__ import annotations def depth_first_search(graph: dict, start: str) -> set[str]: """Depth First Search on Graph :param graph: directed graph in dictionary format :param start: starting vertex as a string :returns: the trace of the search >>> input_G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], ... "F": ["C", "E", "G"], "G": ["F"] } >>> output_G = list({'A', 'B', 'C', 'D', 'E', 'F', 'G'}) >>> all(x in output_G for x in list(depth_first_search(input_G, "A"))) True >>> all(x in output_G for x in list(depth_first_search(input_G, "G"))) True """ explored, stack = set(start), [start] while stack: v = stack.pop() explored.add(v) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(adj) return explored G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/dijkstra_algorithm.py
graphs/dijkstra_algorithm.py
# Title: Dijkstra's Algorithm for finding single source shortest path from scratch # Author: Shubham Malik # References: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm import math import sys # For storing the vertex set to retrieve node with the lowest distance class PriorityQueue: # Based on Min Heap def __init__(self): """ Priority queue class constructor method. Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.cur_size 0 >>> priority_queue_test.array [] >>> priority_queue_test.pos {} """ self.cur_size = 0 self.array = [] self.pos = {} # To store the pos of node in array def is_empty(self): """ Conditional boolean method to determine if the priority queue is empty or not. Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.is_empty() True >>> priority_queue_test.insert((2, 'A')) >>> priority_queue_test.is_empty() False """ return self.cur_size == 0 def min_heapify(self, idx): """ Sorts the queue array so that the minimum element is root. Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.cur_size = 3 >>> priority_queue_test.pos = {'A': 0, 'B': 1, 'C': 2} >>> priority_queue_test.array = [(5, 'A'), (10, 'B'), (15, 'C')] >>> priority_queue_test.min_heapify(0) >>> priority_queue_test.array [(5, 'A'), (10, 'B'), (15, 'C')] >>> priority_queue_test.array = [(10, 'A'), (5, 'B'), (15, 'C')] >>> priority_queue_test.min_heapify(0) >>> priority_queue_test.array [(5, 'B'), (10, 'A'), (15, 'C')] >>> priority_queue_test.array = [(10, 'A'), (15, 'B'), (5, 'C')] >>> priority_queue_test.min_heapify(0) >>> priority_queue_test.array [(5, 'C'), (15, 'B'), (10, 'A')] >>> priority_queue_test.array = [(10, 'A'), (5, 'B')] >>> priority_queue_test.cur_size = len(priority_queue_test.array) >>> priority_queue_test.pos = {'A': 0, 'B': 1} >>> priority_queue_test.min_heapify(0) >>> priority_queue_test.array [(5, 'B'), (10, 'A')] """ lc = self.left(idx) rc = self.right(idx) if lc < self.cur_size and self.array[lc][0] < self.array[idx][0]: smallest = lc else: smallest = idx if rc < self.cur_size and self.array[rc][0] < self.array[smallest][0]: smallest = rc if smallest != idx: self.swap(idx, smallest) self.min_heapify(smallest) def insert(self, tup): """ Inserts a node into the Priority Queue. Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.insert((10, 'A')) >>> priority_queue_test.array [(10, 'A')] >>> priority_queue_test.insert((15, 'B')) >>> priority_queue_test.array [(10, 'A'), (15, 'B')] >>> priority_queue_test.insert((5, 'C')) >>> priority_queue_test.array [(5, 'C'), (10, 'A'), (15, 'B')] """ self.pos[tup[1]] = self.cur_size self.cur_size += 1 self.array.append((sys.maxsize, tup[1])) self.decrease_key((sys.maxsize, tup[1]), tup[0]) def extract_min(self): """ Removes and returns the min element at top of priority queue. Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] >>> priority_queue_test.cur_size = len(priority_queue_test.array) >>> priority_queue_test.pos = {'A': 0, 'B': 1} >>> priority_queue_test.insert((5, 'C')) >>> priority_queue_test.extract_min() 'C' >>> priority_queue_test.array[0] (10, 'A') """ min_node = self.array[0][1] self.array[0] = self.array[self.cur_size - 1] self.cur_size -= 1 self.min_heapify(0) del self.pos[min_node] return min_node def left(self, i): """ Returns the index of left child Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.left(0) 1 >>> priority_queue_test.left(1) 3 """ return 2 * i + 1 def right(self, i): """ Returns the index of right child Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.right(0) 2 >>> priority_queue_test.right(1) 4 """ return 2 * i + 2 def par(self, i): """ Returns the index of parent Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.par(1) 0 >>> priority_queue_test.par(2) 1 >>> priority_queue_test.par(4) 2 """ return math.floor(i / 2) def swap(self, i, j): """ Swaps array elements at indices i and j, update the pos{} Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] >>> priority_queue_test.cur_size = len(priority_queue_test.array) >>> priority_queue_test.pos = {'A': 0, 'B': 1} >>> priority_queue_test.swap(0, 1) >>> priority_queue_test.array [(15, 'B'), (10, 'A')] >>> priority_queue_test.pos {'A': 1, 'B': 0} """ self.pos[self.array[i][1]] = j self.pos[self.array[j][1]] = i temp = self.array[i] self.array[i] = self.array[j] self.array[j] = temp def decrease_key(self, tup, new_d): """ Decrease the key value for a given tuple, assuming the new_d is at most old_d. Examples: >>> priority_queue_test = PriorityQueue() >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] >>> priority_queue_test.cur_size = len(priority_queue_test.array) >>> priority_queue_test.pos = {'A': 0, 'B': 1} >>> priority_queue_test.decrease_key((10, 'A'), 5) >>> priority_queue_test.array [(5, 'A'), (15, 'B')] """ idx = self.pos[tup[1]] # assuming the new_d is at most old_d self.array[idx] = (new_d, tup[1]) while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]: self.swap(idx, self.par(idx)) idx = self.par(idx) class Graph: def __init__(self, num): """ Graph class constructor Examples: >>> graph_test = Graph(1) >>> graph_test.num_nodes 1 >>> graph_test.dist [0] >>> graph_test.par [-1] >>> graph_test.adjList {} """ self.adjList = {} # To store graph: u -> (v,w) self.num_nodes = num # Number of nodes in graph # To store the distance from source vertex self.dist = [0] * self.num_nodes self.par = [-1] * self.num_nodes # To store the path def add_edge(self, u, v, w): """ Add edge going from node u to v and v to u with weight w: u (w)-> v, v (w) -> u Examples: >>> graph_test = Graph(1) >>> graph_test.add_edge(1, 2, 1) >>> graph_test.add_edge(2, 3, 2) >>> graph_test.adjList {1: [(2, 1)], 2: [(1, 1), (3, 2)], 3: [(2, 2)]} """ # Check if u already in graph if u in self.adjList: self.adjList[u].append((v, w)) else: self.adjList[u] = [(v, w)] # Assuming undirected graph if v in self.adjList: self.adjList[v].append((u, w)) else: self.adjList[v] = [(u, w)] def show_graph(self): """ Show the graph: u -> v(w) Examples: >>> graph_test = Graph(1) >>> graph_test.add_edge(1, 2, 1) >>> graph_test.show_graph() 1 -> 2(1) 2 -> 1(1) >>> graph_test.add_edge(2, 3, 2) >>> graph_test.show_graph() 1 -> 2(1) 2 -> 1(1) -> 3(2) 3 -> 2(2) """ for u in self.adjList: print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u])) def dijkstra(self, src): """ Dijkstra algorithm Examples: >>> graph_test = Graph(3) >>> graph_test.add_edge(0, 1, 2) >>> graph_test.add_edge(1, 2, 2) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 2 Node 2 has distance: 4 >>> graph_test.dist [0, 2, 4] >>> graph_test = Graph(2) >>> graph_test.add_edge(0, 1, 2) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 2 >>> graph_test.dist [0, 2] >>> graph_test = Graph(3) >>> graph_test.add_edge(0, 1, 2) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 2 Node 2 has distance: 0 >>> graph_test.dist [0, 2, 0] >>> graph_test = Graph(3) >>> graph_test.add_edge(0, 1, 2) >>> graph_test.add_edge(1, 2, 2) >>> graph_test.add_edge(0, 2, 1) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 2 Node 2 has distance: 1 >>> graph_test.dist [0, 2, 1] >>> graph_test = Graph(4) >>> graph_test.add_edge(0, 1, 4) >>> graph_test.add_edge(1, 2, 2) >>> graph_test.add_edge(2, 3, 1) >>> graph_test.add_edge(0, 2, 3) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 4 Node 2 has distance: 3 Node 3 has distance: 4 >>> graph_test.dist [0, 4, 3, 4] >>> graph_test = Graph(4) >>> graph_test.add_edge(0, 1, 4) >>> graph_test.add_edge(1, 2, 2) >>> graph_test.add_edge(2, 3, 1) >>> graph_test.add_edge(0, 2, 7) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 4 Node 2 has distance: 6 Node 3 has distance: 7 >>> graph_test.dist [0, 4, 6, 7] """ # Flush old junk values in par[] self.par = [-1] * self.num_nodes # src is the source node self.dist[src] = 0 q = PriorityQueue() q.insert((0, src)) # (dist from src, node) for u in self.adjList: if u != src: self.dist[u] = sys.maxsize # Infinity self.par[u] = -1 while not q.is_empty(): u = q.extract_min() # Returns node with the min dist from source # Update the distance of all the neighbours of u and # if their prev dist was INFINITY then push them in Q for v, w in self.adjList[u]: new_dist = self.dist[u] + w if self.dist[v] > new_dist: if self.dist[v] == sys.maxsize: q.insert((new_dist, v)) else: q.decrease_key((self.dist[v], v), new_dist) self.dist[v] = new_dist self.par[v] = u # Show the shortest distances from src self.show_distances(src) def show_distances(self, src): """ Show the distances from src to all other nodes in a graph Examples: >>> graph_test = Graph(1) >>> graph_test.show_distances(0) Distance from node: 0 Node 0 has distance: 0 """ print(f"Distance from node: {src}") for u in range(self.num_nodes): print(f"Node {u} has distance: {self.dist[u]}") def show_path(self, src, dest): """ Shows the shortest path from src to dest. WARNING: Use it *after* calling dijkstra. Examples: >>> graph_test = Graph(4) >>> graph_test.add_edge(0, 1, 1) >>> graph_test.add_edge(1, 2, 2) >>> graph_test.add_edge(2, 3, 3) >>> graph_test.dijkstra(0) Distance from node: 0 Node 0 has distance: 0 Node 1 has distance: 1 Node 2 has distance: 3 Node 3 has distance: 6 >>> graph_test.show_path(0, 3) # doctest: +NORMALIZE_WHITESPACE ----Path to reach 3 from 0---- 0 -> 1 -> 2 -> 3 Total cost of path: 6 """ path = [] cost = 0 temp = dest # Backtracking from dest to src while self.par[temp] != -1: path.append(temp) if temp != src: for v, w in self.adjList[temp]: if v == self.par[temp]: cost += w break temp = self.par[temp] path.append(src) path.reverse() print(f"----Path to reach {dest} from {src}----") for u in path: print(f"{u}", end=" ") if u != dest: print("-> ", end="") print("\nTotal cost of path: ", cost) if __name__ == "__main__": from doctest import testmod testmod() graph = Graph(9) graph.add_edge(0, 1, 4) graph.add_edge(0, 7, 8) graph.add_edge(1, 2, 8) graph.add_edge(1, 7, 11) graph.add_edge(2, 3, 7) graph.add_edge(2, 8, 2) graph.add_edge(2, 5, 4) graph.add_edge(3, 4, 9) graph.add_edge(3, 5, 14) graph.add_edge(4, 5, 10) graph.add_edge(5, 6, 2) graph.add_edge(6, 7, 1) graph.add_edge(6, 8, 6) graph.add_edge(7, 8, 7) graph.show_graph() graph.dijkstra(0) graph.show_path(0, 4) # OUTPUT # 0 -> 1(4) -> 7(8) # 1 -> 0(4) -> 2(8) -> 7(11) # 7 -> 0(8) -> 1(11) -> 6(1) -> 8(7) # 2 -> 1(8) -> 3(7) -> 8(2) -> 5(4) # 3 -> 2(7) -> 4(9) -> 5(14) # 8 -> 2(2) -> 6(6) -> 7(7) # 5 -> 2(4) -> 3(14) -> 4(10) -> 6(2) # 4 -> 3(9) -> 5(10) # 6 -> 5(2) -> 7(1) -> 8(6) # Distance from node: 0 # Node 0 has distance: 0 # Node 1 has distance: 4 # Node 2 has distance: 12 # Node 3 has distance: 19 # Node 4 has distance: 21 # Node 5 has distance: 11 # Node 6 has distance: 9 # Node 7 has distance: 8 # Node 8 has distance: 14 # ----Path to reach 4 from 0---- # 0 -> 7 -> 6 -> 5 -> 4 # Total cost of path: 21
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/bellman_ford.py
graphs/bellman_ford.py
from __future__ import annotations def print_distance(distance: list[float], src): print(f"Vertex\tShortest Distance from vertex {src}") for i, d in enumerate(distance): print(f"{i}\t\t{d}") def check_negative_cycle( graph: list[dict[str, int]], distance: list[float], edge_count: int ): for j in range(edge_count): u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf") and distance[u] + w < distance[v]: return True return False def bellman_ford( graph: list[dict[str, int]], vertex_count: int, edge_count: int, src: int ) -> list[float]: """ Returns shortest paths from a vertex src to all other vertices. >>> edges = [(2, 1, -10), (3, 2, 3), (0, 3, 5), (0, 1, 4)] >>> g = [{"src": s, "dst": d, "weight": w} for s, d, w in edges] >>> bellman_ford(g, 4, 4, 0) [0.0, -2.0, 8.0, 5.0] >>> g = [{"src": s, "dst": d, "weight": w} for s, d, w in edges + [(1, 3, 5)]] >>> bellman_ford(g, 4, 5, 0) Traceback (most recent call last): ... Exception: Negative cycle found """ distance = [float("inf")] * vertex_count distance[src] = 0.0 for _ in range(vertex_count - 1): for j in range(edge_count): u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf") and distance[u] + w < distance[v]: distance[v] = distance[u] + w negative_cycle_exists = check_negative_cycle(graph, distance, edge_count) if negative_cycle_exists: raise Exception("Negative cycle found") return distance if __name__ == "__main__": import doctest doctest.testmod() V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) graph: list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) src, dest, weight = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) graph[i] = {"src": src, "dst": dest, "weight": weight} source = int(input("\nEnter shortest path source:").strip()) shortest_distance = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/random_graph_generator.py
graphs/random_graph_generator.py
""" * Author: Manuel Di Lullo (https://github.com/manueldilullo) * Description: Random graphs generator. Uses graphs represented with an adjacency list. URL: https://en.wikipedia.org/wiki/Random_graph """ import random def random_graph( vertices_number: int, probability: float, directed: bool = False ) -> dict: """ Generate a random graph @input: vertices_number (number of vertices), probability (probability that a generic edge (u,v) exists), directed (if True: graph will be a directed graph, otherwise it will be an undirected graph) @examples: >>> random.seed(1) >>> random_graph(4, 0.5) {0: [1], 1: [0, 2, 3], 2: [1, 3], 3: [1, 2]} >>> random.seed(1) >>> random_graph(4, 0.5, True) {0: [1], 1: [2, 3], 2: [3], 3: []} """ graph: dict = {i: [] for i in range(vertices_number)} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(vertices_number) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(vertices_number): for j in range(i + 1, vertices_number): if random.random() < probability: graph[i].append(j) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(i) return graph def complete_graph(vertices_number: int) -> dict: """ Generate a complete graph with vertices_number vertices. @input: vertices_number (number of vertices), directed (False if the graph is undirected, True otherwise) @example: >>> complete_graph(3) {0: [1, 2], 1: [0, 2], 2: [0, 1]} """ return { i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number) } if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/check_bipatrite.py
graphs/check_bipatrite.py
from collections import defaultdict, deque def is_bipartite_dfs(graph: dict[int, list[int]]) -> bool: """ Check if a graph is bipartite using depth-first search (DFS). Args: `graph`: Adjacency list representing the graph. Returns: ``True`` if bipartite, ``False`` otherwise. Checks if the graph can be divided into two sets of vertices, such that no two vertices within the same set are connected by an edge. Examples: >>> is_bipartite_dfs({0: [1, 2], 1: [0, 3], 2: [0, 4]}) True >>> is_bipartite_dfs({0: [1, 2], 1: [0, 3], 2: [0, 1]}) False >>> is_bipartite_dfs({}) True >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) True >>> is_bipartite_dfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) False >>> is_bipartite_dfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) True >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) False >>> is_bipartite_dfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) False >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]}) False >>> is_bipartite_dfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) True >>> is_bipartite_dfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) True >>> # FIXME: This test should fails with >>> # TypeError: list indices must be integers or... >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) True >>> is_bipartite_dfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) True """ def depth_first_search(node: int, color: int) -> bool: """ Perform Depth-First Search (DFS) on the graph starting from a node. Args: node: The current node being visited. color: The color assigned to the current node. Returns: True if the graph is bipartite starting from the current node, False otherwise. """ if visited[node] == -1: visited[node] = color if node not in graph: return True for neighbor in graph[node]: if not depth_first_search(neighbor, 1 - color): return False return visited[node] == color visited: defaultdict[int, int] = defaultdict(lambda: -1) for node in graph: if visited[node] == -1 and not depth_first_search(node, 0): return False return True def is_bipartite_bfs(graph: dict[int, list[int]]) -> bool: """ Check if a graph is bipartite using a breadth-first search (BFS). Args: `graph`: Adjacency list representing the graph. Returns: ``True`` if bipartite, ``False`` otherwise. Check if the graph can be divided into two sets of vertices, such that no two vertices within the same set are connected by an edge. Examples: >>> is_bipartite_bfs({0: [1, 2], 1: [0, 3], 2: [0, 4]}) True >>> is_bipartite_bfs({0: [1, 2], 1: [0, 2], 2: [0, 1]}) False >>> is_bipartite_bfs({}) True >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) True >>> is_bipartite_bfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) False >>> is_bipartite_bfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) True >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) False >>> is_bipartite_bfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) False >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]}) False >>> is_bipartite_bfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) True >>> is_bipartite_bfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) True >>> # FIXME: This test should fails with >>> # TypeError: list indices must be integers or... >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) True >>> is_bipartite_bfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) True """ visited: defaultdict[int, int] = defaultdict(lambda: -1) for node in graph: if visited[node] == -1: queue: deque[int] = deque() queue.append(node) visited[node] = 0 while queue: curr_node = queue.popleft() if curr_node not in graph: continue for neighbor in graph[curr_node]: if visited[neighbor] == -1: visited[neighbor] = 1 - visited[curr_node] queue.append(neighbor) elif visited[neighbor] == visited[curr_node]: return False return True if __name__ == "__main__": import doctest result = doctest.testmod() if result.failed: print(f"{result.failed} test(s) failed.") else: print("All tests passed!")
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/breadth_first_search.py
graphs/breadth_first_search.py
#!/usr/bin/python """Author: OMKAR PATHAK""" from __future__ import annotations from queue import Queue class Graph: def __init__(self) -> None: self.vertices: dict[int, list[int]] = {} def print_graph(self) -> None: """ prints adjacency list representation of graaph >>> g = Graph() >>> g.print_graph() >>> g.add_edge(0, 1) >>> g.print_graph() 0 : 1 """ for i in self.vertices: print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]])) def add_edge(self, from_vertex: int, to_vertex: int) -> None: """ adding the edge between two vertices >>> g = Graph() >>> g.print_graph() >>> g.add_edge(0, 1) >>> g.print_graph() 0 : 1 """ if from_vertex in self.vertices: self.vertices[from_vertex].append(to_vertex) else: self.vertices[from_vertex] = [to_vertex] def bfs(self, start_vertex: int) -> set[int]: """ >>> g = Graph() >>> g.add_edge(0, 1) >>> g.add_edge(0, 1) >>> g.add_edge(0, 2) >>> g.add_edge(1, 2) >>> g.add_edge(2, 0) >>> g.add_edge(2, 3) >>> g.add_edge(3, 3) >>> sorted(g.bfs(2)) [0, 1, 2, 3] """ # initialize set for storing already visited vertices visited = set() # create a first in first out queue to store all the vertices for BFS queue: Queue = Queue() # mark the source node as visited and enqueue it visited.add(start_vertex) queue.put(start_vertex) while not queue.empty(): vertex = queue.get() # loop through all adjacent vertex and enqueue it if not yet visited for adjacent_vertex in self.vertices[vertex]: if adjacent_vertex not in visited: queue.put(adjacent_vertex) visited.add(adjacent_vertex) return visited if __name__ == "__main__": from doctest import testmod testmod(verbose=True) g = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() # 0 : 1 -> 2 # 1 : 2 # 2 : 0 -> 3 # 3 : 3 assert sorted(g.bfs(2)) == [0, 1, 2, 3]
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/minimum_spanning_tree_prims2.py
graphs/minimum_spanning_tree_prims2.py
""" Prim's (also known as Jarník's) algorithm is a greedy algorithm that finds a minimum spanning tree for a weighted undirected graph. This means it finds a subset of the edges that forms a tree that includes every vertex, where the total weight of all the edges in the tree is minimized. The algorithm operates by building this tree one vertex at a time, from an arbitrary starting vertex, at each step adding the cheapest possible connection from the tree to another vertex. """ from __future__ import annotations from sys import maxsize from typing import TypeVar T = TypeVar("T") def get_parent_position(position: int) -> int: """ heap helper function get the position of the parent of the current node >>> get_parent_position(1) 0 >>> get_parent_position(2) 0 """ return (position - 1) // 2 def get_child_left_position(position: int) -> int: """ heap helper function get the position of the left child of the current node >>> get_child_left_position(0) 1 """ return (2 * position) + 1 def get_child_right_position(position: int) -> int: """ heap helper function get the position of the right child of the current node >>> get_child_right_position(0) 2 """ return (2 * position) + 2 class MinPriorityQueue[T]: """ Minimum Priority Queue Class Functions: is_empty: function to check if the priority queue is empty push: function to add an element with given priority to the queue extract_min: function to remove and return the element with lowest weight (highest priority) update_key: function to update the weight of the given key _bubble_up: helper function to place a node at the proper position (upward movement) _bubble_down: helper function to place a node at the proper position (downward movement) _swap_nodes: helper function to swap the nodes at the given positions >>> queue = MinPriorityQueue() >>> queue.push(1, 1000) >>> queue.push(2, 100) >>> queue.push(3, 4000) >>> queue.push(4, 3000) >>> queue.extract_min() 2 >>> queue.update_key(4, 50) >>> queue.extract_min() 4 >>> queue.extract_min() 1 >>> queue.extract_min() 3 """ def __init__(self) -> None: self.heap: list[tuple[T, int]] = [] self.position_map: dict[T, int] = {} self.elements: int = 0 def __len__(self) -> int: return self.elements def __repr__(self) -> str: return str(self.heap) def is_empty(self) -> bool: # Check if the priority queue is empty return self.elements == 0 def push(self, elem: T, weight: int) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight)) self.position_map[elem] = self.elements self.elements += 1 self._bubble_up(elem) def extract_min(self) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0, self.elements - 1) elem, _ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: bubble_down_elem, _ = self.heap[0] self._bubble_down(bubble_down_elem) return elem def update_key(self, elem: T, weight: int) -> None: # Update the weight of the given key position = self.position_map[elem] self.heap[position] = (elem, weight) if position > 0: parent_position = get_parent_position(position) _, parent_weight = self.heap[parent_position] if parent_weight > weight: self._bubble_up(elem) else: self._bubble_down(elem) else: self._bubble_down(elem) def _bubble_up(self, elem: T) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] curr_pos = self.position_map[elem] if curr_pos == 0: return None parent_position = get_parent_position(curr_pos) _, weight = self.heap[curr_pos] _, parent_weight = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(parent_position, curr_pos) return self._bubble_up(elem) return None def _bubble_down(self, elem: T) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] curr_pos = self.position_map[elem] _, weight = self.heap[curr_pos] child_left_position = get_child_left_position(curr_pos) child_right_position = get_child_right_position(curr_pos) if child_left_position < self.elements and child_right_position < self.elements: _, child_left_weight = self.heap[child_left_position] _, child_right_weight = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(child_right_position, curr_pos) return self._bubble_down(elem) if child_left_position < self.elements: _, child_left_weight = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(child_left_position, curr_pos) return self._bubble_down(elem) else: return None if child_right_position < self.elements: _, child_right_weight = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(child_right_position, curr_pos) return self._bubble_down(elem) return None def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None: # Swap the nodes at the given positions node1_elem = self.heap[node1_pos][0] node2_elem = self.heap[node2_pos][0] self.heap[node1_pos], self.heap[node2_pos] = ( self.heap[node2_pos], self.heap[node1_pos], ) self.position_map[node1_elem] = node2_pos self.position_map[node2_elem] = node1_pos class GraphUndirectedWeighted[T]: """ Graph Undirected Weighted Class Functions: add_node: function to add a node in the graph add_edge: function to add an edge between 2 nodes in the graph """ def __init__(self) -> None: self.connections: dict[T, dict[T, int]] = {} self.nodes: int = 0 def __repr__(self) -> str: return str(self.connections) def __len__(self) -> int: return self.nodes def add_node(self, node: T) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: self.connections[node] = {} self.nodes += 1 def add_edge(self, node1: T, node2: T, weight: int) -> None: # Add an edge between 2 nodes in the graph self.add_node(node1) self.add_node(node2) self.connections[node1][node2] = weight self.connections[node2][node1] = weight def prims_algo[T]( graph: GraphUndirectedWeighted[T], ) -> tuple[dict[T, int], dict[T, T | None]]: """ >>> graph = GraphUndirectedWeighted() >>> graph.add_edge("a", "b", 3) >>> graph.add_edge("b", "c", 10) >>> graph.add_edge("c", "d", 5) >>> graph.add_edge("a", "c", 15) >>> graph.add_edge("b", "d", 100) >>> dist, parent = prims_algo(graph) >>> abs(dist["a"] - dist["b"]) 3 >>> abs(dist["d"] - dist["b"]) 15 >>> abs(dist["a"] - dist["c"]) 13 """ # prim's algorithm for minimum spanning tree dist: dict[T, int] = dict.fromkeys(graph.connections, maxsize) parent: dict[T, T | None] = dict.fromkeys(graph.connections) priority_queue: MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(node, weight) if priority_queue.is_empty(): return dist, parent # initialization node = priority_queue.extract_min() dist[node] = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: dist[neighbour] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(neighbour, dist[neighbour]) parent[neighbour] = node # running prim's algorithm while not priority_queue.is_empty(): node = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: dist[neighbour] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(neighbour, dist[neighbour]) parent[neighbour] = node return dist, parent
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/__init__.py
graphs/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/eulerian_path_and_circuit_for_undirected_graph.py
graphs/eulerian_path_and_circuit_for_undirected_graph.py
# Eulerian Path is a path in graph that visits every edge exactly once. # Eulerian Circuit is an Eulerian Path which starts and ends on the same # vertex. # time complexity is O(V+E) # space complexity is O(VE) # using dfs for finding eulerian path traversal def dfs(u, graph, visited_edge, path=None): path = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: visited_edge[u][v], visited_edge[v][u] = True, True path = dfs(v, graph, visited_edge, path) return path # for checking in graph has euler path or circuit def check_circuit_or_path(graph, max_node): odd_degree_nodes = 0 odd_node = -1 for i in range(max_node): if i not in graph: continue if len(graph[i]) % 2 == 1: odd_degree_nodes += 1 odd_node = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def check_euler(graph, max_node): visited_edge = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)] check, odd_node = check_circuit_or_path(graph, max_node) if check == 3: print("graph is not Eulerian") print("no path") return start_node = 1 if check == 2: start_node = odd_node print("graph has a Euler path") if check == 1: print("graph has a Euler cycle") path = dfs(start_node, graph, visited_edge) print(path) def main(): g1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} g2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} g3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} g5 = { 1: [], 2: [], # all degree is zero } max_node = 10 check_euler(g1, max_node) check_euler(g2, max_node) check_euler(g3, max_node) check_euler(g4, max_node) check_euler(g5, max_node) if __name__ == "__main__": main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/dijkstra.py
graphs/dijkstra.py
""" pseudo-code DIJKSTRA(graph G, start vertex s, destination vertex d): //all nodes initially unexplored 1 - let H = min heap data structure, initialized with 0 and s [here 0 indicates the distance from start vertex s] 2 - while H is non-empty: 3 - remove the first node and cost of H, call it U and cost 4 - if U has been previously explored: 5 - go to the while loop, line 2 //Once a node is explored there is no need to make it again 6 - mark U as explored 7 - if U is d: 8 - return cost // total cost from start to destination vertex 9 - for each edge(U, V): c=cost of edge(U,V) // for V in graph[U] 10 - if V explored: 11 - go to next V in line 9 12 - total_cost = cost + c 13 - add (total_cost,V) to H You can think at cost as a distance where Dijkstra finds the shortest distance between vertices s and v in a graph G. The use of a min heap as H guarantees that if a vertex has already been explored there will be no other path with shortest distance, that happens because heapq.heappop will always return the next vertex with the shortest distance, considering that the heap stores not only the distance between previous vertex and current vertex but the entire distance between each vertex that makes up the path from start vertex to target vertex. """ import heapq def dijkstra(graph, start, end): """Return the cost of the shortest path between vertices start and end. >>> dijkstra(G, "E", "C") 6 >>> dijkstra(G2, "E", "F") 3 >>> dijkstra(G3, "E", "F") 3 """ heap = [(0, start)] # cost from start node,end node visited = set() while heap: (cost, u) = heapq.heappop(heap) if u in visited: continue visited.add(u) if u == end: return cost for v, c in graph[u]: if v in visited: continue next_item = cost + c heapq.heappush(heap, (next_item, v)) return -1 G = { "A": [["B", 2], ["C", 5]], "B": [["A", 2], ["D", 3], ["E", 1], ["F", 1]], "C": [["A", 5], ["F", 3]], "D": [["B", 3]], "E": [["B", 4], ["F", 3]], "F": [["C", 3], ["E", 3]], } r""" Layout of G2: E -- 1 --> B -- 1 --> C -- 1 --> D -- 1 --> F \ /\ \ || ----------------- 3 -------------------- """ G2 = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["F", 3]], "F": [], } r""" Layout of G3: E -- 1 --> B -- 1 --> C -- 1 --> D -- 1 --> F \ /\ \ || -------- 2 ---------> G ------- 1 ------ """ G3 = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } short_distance = dijkstra(G, "E", "C") print(short_distance) # E -- 3 --> F -- 3 --> C == 6 short_distance = dijkstra(G2, "E", "F") print(short_distance) # E -- 3 --> F == 3 short_distance = dijkstra(G3, "E", "F") print(short_distance) # E -- 2 --> G -- 1 --> F == 3 if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/boruvka.py
graphs/boruvka.py
"""Borůvka's algorithm. Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm. Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a connected graph, or a minimum spanning forest if a graph that is not connected. The time complexity of this algorithm is O(ELogV), where E represents the number of edges, while V represents the number of nodes. O(number_of_edges Log number_of_nodes) The space complexity of this algorithm is O(V + E), since we have to keep a couple of lists whose sizes are equal to the number of nodes, as well as keep all the edges of a graph inside of the data structure itself. Borůvka's algorithm gives us pretty much the same result as other MST Algorithms - they all find the minimum spanning tree, and the time complexity is approximately the same. One advantage that Borůvka's algorithm has compared to the alternatives is that it doesn't need to presort the edges or maintain a priority queue in order to find the minimum spanning tree. Even though that doesn't help its complexity, since it still passes the edges logE times, it is a bit simpler to code. Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm """ from __future__ import annotations from typing import Any class Graph: def __init__(self, num_of_nodes: int) -> None: """ Arguments: num_of_nodes - the number of nodes in the graph Attributes: m_num_of_nodes - the number of nodes in the graph. m_edges - the list of edges. m_component - the dictionary which stores the index of the component which a node belongs to. """ self.m_num_of_nodes = num_of_nodes self.m_edges: list[list[int]] = [] self.m_component: dict[int, int] = {} def add_edge(self, u_node: int, v_node: int, weight: int) -> None: """Adds an edge in the format [first, second, edge weight] to graph.""" self.m_edges.append([u_node, v_node, weight]) def find_component(self, u_node: int) -> int: """Propagates a new component throughout a given component.""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node]) def set_component(self, u_node: int) -> None: """Finds the component index of a given node""" if self.m_component[u_node] != u_node: for k in self.m_component: self.m_component[k] = self.find_component(k) def union(self, component_size: list[int], u_node: int, v_node: int) -> None: """Union finds the roots of components for two nodes, compares the components in terms of size, and attaches the smaller one to the larger one to form single component""" if component_size[u_node] <= component_size[v_node]: self.m_component[u_node] = v_node component_size[v_node] += component_size[u_node] self.set_component(u_node) elif component_size[u_node] >= component_size[v_node]: self.m_component[v_node] = self.find_component(u_node) component_size[u_node] += component_size[v_node] self.set_component(v_node) def boruvka(self) -> None: """Performs Borůvka's algorithm to find MST.""" # Initialize additional lists required to algorithm. component_size = [] mst_weight = 0 minimum_weight_edge: list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes): self.m_component.update({node: node}) component_size.append(1) num_of_components = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: u, v, w = edge u_component = self.m_component[u] v_component = self.m_component[v] if u_component != v_component: """If the current minimum weight edge of component u doesn't exist (is -1), or if it's greater than the edge we're observing right now, we will assign the value of the edge we're observing to it. If the current minimum weight edge of component v doesn't exist (is -1), or if it's greater than the edge we're observing right now, we will assign the value of the edge we're observing to it""" for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): minimum_weight_edge[component] = [u, v, w] for edge in minimum_weight_edge: if isinstance(edge, list): u, v, w = edge u_component = self.m_component[u] v_component = self.m_component[v] if u_component != v_component: mst_weight += w self.union(component_size, u_component, v_component) print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n") num_of_components -= 1 minimum_weight_edge = [-1] * self.m_num_of_nodes print(f"The total weight of the minimal spanning tree is: {mst_weight}") def test_vector() -> None: """ >>> g = Graph(8) >>> for u_v_w in ((0, 1, 10), (0, 2, 6), (0, 3, 5), (1, 3, 15), (2, 3, 4), ... (3, 4, 8), (4, 5, 10), (4, 6, 6), (4, 7, 5), (5, 7, 15), (6, 7, 4)): ... g.add_edge(*u_v_w) >>> g.boruvka() Added edge [0 - 3] Added weight: 5 <BLANKLINE> Added edge [0 - 1] Added weight: 10 <BLANKLINE> Added edge [2 - 3] Added weight: 4 <BLANKLINE> Added edge [4 - 7] Added weight: 5 <BLANKLINE> Added edge [4 - 5] Added weight: 10 <BLANKLINE> Added edge [6 - 7] Added weight: 4 <BLANKLINE> Added edge [3 - 4] Added weight: 8 <BLANKLINE> The total weight of the minimal spanning tree is: 46 """ if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/minimum_spanning_tree_kruskal2.py
graphs/minimum_spanning_tree_kruskal2.py
from __future__ import annotations from typing import TypeVar T = TypeVar("T") class DisjointSetTreeNode[T]: # Disjoint Set Node to store the parent and rank def __init__(self, data: T) -> None: self.data = data self.parent = self self.rank = 0 class DisjointSetTree[T]: # Disjoint Set DataStructure def __init__(self) -> None: # map from node name to the node object self.map: dict[T, DisjointSetTreeNode[T]] = {} def make_set(self, data: T) -> None: # create a new set with x as its member self.map[data] = DisjointSetTreeNode(data) def find_set(self, data: T) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) elem_ref = self.map[data] if elem_ref != elem_ref.parent: elem_ref.parent = self.find_set(elem_ref.parent.data) return elem_ref.parent def link( self, node1: DisjointSetTreeNode[T], node2: DisjointSetTreeNode[T] ) -> None: # helper function for union operation if node1.rank > node2.rank: node2.parent = node1 else: node1.parent = node2 if node1.rank == node2.rank: node2.rank += 1 def union(self, data1: T, data2: T) -> None: # merge 2 disjoint sets self.link(self.find_set(data1), self.find_set(data2)) class GraphUndirectedWeighted[T]: def __init__(self) -> None: # connections: map from the node to the neighbouring nodes (with weights) self.connections: dict[T, dict[T, int]] = {} def add_node(self, node: T) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: self.connections[node] = {} def add_edge(self, node1: T, node2: T, weight: int) -> None: # add an edge with the given weight self.add_node(node1) self.add_node(node2) self.connections[node1][node2] = weight self.connections[node2][node1] = weight def kruskal(self) -> GraphUndirectedWeighted[T]: # Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph """ Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm Example: >>> g1 = GraphUndirectedWeighted[int]() >>> g1.add_edge(1, 2, 1) >>> g1.add_edge(2, 3, 2) >>> g1.add_edge(3, 4, 1) >>> g1.add_edge(3, 5, 100) # Removed in MST >>> g1.add_edge(4, 5, 5) >>> assert 5 in g1.connections[3] >>> mst = g1.kruskal() >>> assert 5 not in mst.connections[3] >>> g2 = GraphUndirectedWeighted[str]() >>> g2.add_edge('A', 'B', 1) >>> g2.add_edge('B', 'C', 2) >>> g2.add_edge('C', 'D', 1) >>> g2.add_edge('C', 'E', 100) # Removed in MST >>> g2.add_edge('D', 'E', 5) >>> assert 'E' in g2.connections["C"] >>> mst = g2.kruskal() >>> assert 'E' not in mst.connections['C'] """ # getting the edges in ascending order of weights edges = [] seen = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start)) edges.append((start, end, self.connections[start][end])) edges.sort(key=lambda x: x[2]) # creating the disjoint set disjoint_set = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(node) # MST generation num_edges = 0 index = 0 graph = GraphUndirectedWeighted[T]() while num_edges < len(self.connections) - 1: u, v, w = edges[index] index += 1 parent_u = disjoint_set.find_set(u) parent_v = disjoint_set.find_set(v) if parent_u != parent_v: num_edges += 1 graph.add_edge(u, v, w) disjoint_set.union(u, v) return graph
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/directed_and_undirected_weighted_graph.py
graphs/directed_and_undirected_weighted_graph.py
from collections import deque from math import floor from random import random from time import time # the default weight is 1 if not assigned but all the implementation is weighted class DirectedGraph: def __init__(self): self.graph = {} # adding vertices and edges # adding the weight is optional # handles repetition def add_pair(self, u, v, w=1): if self.graph.get(u): if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: self.graph[u] = [[w, v]] if not self.graph.get(v): self.graph[v] = [] def all_nodes(self): return list(self.graph) # handles if the input does not exist def remove_pair(self, u, v): if self.graph.get(u): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_) # if no destination is meant the default value is -1 def dfs(self, s=-2, d=-1): if s == d: return [] stack = [] visited = [] if s == -2: s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(d) return visited else: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss # check if se have reached the starting point if len(stack) == 0: return visited # c is the count of nodes you want and if you leave it or pass -1 to the function # the count will be random from 10 to 10000 def fill_graph_randomly(self, c=-1): if c == -1: c = floor(random() * 10000) + 10 for i in range(c): # every vertex has max 100 edges for _ in range(floor(random() * 102) + 1): n = floor(random() * c) + 1 if n != i: self.add_pair(i, n, 1) def bfs(self, s=-2): d = deque() visited = [] if s == -2: s = next(iter(self.graph)) d.append(s) visited.append(s) while d: s = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def in_degree(self, u): count = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def out_degree(self, u): return len(self.graph[u]) def topological_sort(self, s=-2): stack = [] visited = [] if s == -2: s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s sorted_nodes = [] while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop()) if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss # check if se have reached the starting point if len(stack) == 0: return sorted_nodes def cycle_nodes(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack = len(stack) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return list(anticipating_nodes) def has_cycle(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return False def dfs_time(self, s=-2, e=-1): begin = time() self.dfs(s, e) end = time() return end - begin def bfs_time(self, s=-2): begin = time() self.bfs(s) end = time() return end - begin class Graph: def __init__(self): self.graph = {} # adding vertices and edges # adding the weight is optional # handles repetition def add_pair(self, u, v, w=1): # check if the u exists if self.graph.get(u): # if there already is a edge if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: # if u does not exist self.graph[u] = [[w, v]] # add the other way if self.graph.get(v): # if there already is a edge if self.graph[v].count([w, u]) == 0: self.graph[v].append([w, u]) else: # if u does not exist self.graph[v] = [[w, u]] # handles if the input does not exist def remove_pair(self, u, v): if self.graph.get(u): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_) # the other way round if self.graph.get(v): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_) # if no destination is meant the default value is -1 def dfs(self, s=-2, d=-1): if s == d: return [] stack = [] visited = [] if s == -2: s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(d) return visited else: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss # check if se have reached the starting point if len(stack) == 0: return visited # c is the count of nodes you want and if you leave it or pass -1 to the function # the count will be random from 10 to 10000 def fill_graph_randomly(self, c=-1): if c == -1: c = floor(random() * 10000) + 10 for i in range(c): # every vertex has max 100 edges for _ in range(floor(random() * 102) + 1): n = floor(random() * c) + 1 if n != i: self.add_pair(i, n, 1) def bfs(self, s=-2): d = deque() visited = [] if s == -2: s = next(iter(self.graph)) d.append(s) visited.append(s) while d: s = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def degree(self, u): return len(self.graph[u]) def cycle_nodes(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack = len(stack) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return list(anticipating_nodes) def has_cycle(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break # check if all the children are visited if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss # check if se have reached the starting point if len(stack) == 0: return False def all_nodes(self): return list(self.graph) def dfs_time(self, s=-2, e=-1): begin = time() self.dfs(s, e) end = time() return end - begin def bfs_time(self, s=-2): begin = time() self.bfs(s) end = time() return end - begin
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/bi_directional_dijkstra.py
graphs/bi_directional_dijkstra.py
""" Bi-directional Dijkstra's algorithm. A bi-directional approach is an efficient and less time consuming optimization for Dijkstra's searching algorithm Reference: shorturl.at/exHM7 """ # Author: Swayam Singh (https://github.com/practice404) from queue import PriorityQueue from typing import Any import numpy as np def pass_and_relaxation( graph: dict, v: str, visited_forward: set, visited_backward: set, cst_fwd: dict, cst_bwd: dict, queue: PriorityQueue, parent: dict, shortest_distance: float, ) -> float: for nxt, d in graph[v]: if nxt in visited_forward: continue old_cost_f = cst_fwd.get(nxt, np.inf) new_cost_f = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) cst_fwd[nxt] = new_cost_f parent[nxt] = v if ( nxt in visited_backward and cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance ): shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def bidirectional_dij( source: str, destination: str, graph_forward: dict, graph_backward: dict ) -> int: """ Bi-directional Dijkstra's algorithm. Returns: shortest_path_distance (int): length of the shortest path. Warnings: If the destination is not reachable, function returns -1 >>> bidirectional_dij("E", "F", graph_fwd, graph_bwd) 3 """ shortest_path_distance = -1 visited_forward = set() visited_backward = set() cst_fwd = {source: 0} cst_bwd = {destination: 0} parent_forward = {source: None} parent_backward = {destination: None} queue_forward: PriorityQueue[Any] = PriorityQueue() queue_backward: PriorityQueue[Any] = PriorityQueue() shortest_distance = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _, v_fwd = queue_forward.get() visited_forward.add(v_fwd) _, v_bwd = queue_backward.get() visited_backward.add(v_bwd) shortest_distance = pass_and_relaxation( graph_forward, v_fwd, visited_forward, visited_backward, cst_fwd, cst_bwd, queue_forward, parent_forward, shortest_distance, ) shortest_distance = pass_and_relaxation( graph_backward, v_bwd, visited_backward, visited_forward, cst_bwd, cst_fwd, queue_backward, parent_backward, shortest_distance, ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: shortest_path_distance = shortest_distance return shortest_path_distance graph_fwd = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } graph_bwd = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/strongly_connected_components.py
graphs/strongly_connected_components.py
""" https://en.wikipedia.org/wiki/Strongly_connected_component Finding strongly connected components in directed graph """ test_graph_1 = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} test_graph_2 = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def topology_sort( graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: """ Use depth first search to sort graph At this time graph is the same as input >>> topology_sort(test_graph_1, 0, 5 * [False]) [1, 2, 4, 3, 0] >>> topology_sort(test_graph_2, 0, 6 * [False]) [2, 1, 5, 4, 3, 0] """ visited[vert] = True order = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(graph, neighbour, visited) order.append(vert) return order def find_components( reversed_graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: """ Use depth first search to find strongly connected vertices. Now graph is reversed >>> find_components({0: [1], 1: [2], 2: [0]}, 0, 5 * [False]) [0, 1, 2] >>> find_components({0: [2], 1: [0], 2: [0, 1]}, 0, 6 * [False]) [0, 2, 1] """ visited[vert] = True component = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(reversed_graph, neighbour, visited) return component def strongly_connected_components(graph: dict[int, list[int]]) -> list[list[int]]: """ This function takes graph as a parameter and then returns the list of strongly connected components >>> strongly_connected_components(test_graph_1) [[0, 1, 2], [3], [4]] >>> strongly_connected_components(test_graph_2) [[0, 2, 1], [3, 5, 4]] """ visited = len(graph) * [False] reversed_graph: dict[int, list[int]] = {vert: [] for vert in range(len(graph))} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(vert) order = [] for i, was_visited in enumerate(visited): if not was_visited: order += topology_sort(graph, i, visited) components_list = [] visited = len(graph) * [False] for i in range(len(graph)): vert = order[len(graph) - i - 1] if not visited[vert]: component = find_components(reversed_graph, vert, visited) components_list.append(component) return components_list
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/minimum_spanning_tree_boruvka.py
graphs/minimum_spanning_tree_boruvka.py
class Graph: """ Data structure to store graphs (based on adjacency lists) """ def __init__(self): self.num_vertices = 0 self.num_edges = 0 self.adjacency = {} def add_vertex(self, vertex): """ Adds a vertex to the graph """ if vertex not in self.adjacency: self.adjacency[vertex] = {} self.num_vertices += 1 def add_edge(self, head, tail, weight): """ Adds an edge to the graph """ self.add_vertex(head) self.add_vertex(tail) if head == tail: return self.adjacency[head][tail] = weight self.adjacency[tail][head] = weight def distinct_weight(self): """ For Boruvks's algorithm the weights should be distinct Converts the weights to be distinct """ edges = self.get_edges() for edge in edges: head, tail, weight = edge edges.remove((tail, head, weight)) for i in range(len(edges)): edges[i] = list(edges[i]) edges.sort(key=lambda e: e[2]) for i in range(len(edges) - 1): if edges[i][2] >= edges[i + 1][2]: edges[i + 1][2] = edges[i][2] + 1 for edge in edges: head, tail, weight = edge self.adjacency[head][tail] = weight self.adjacency[tail][head] = weight def __str__(self): """ Returns string representation of the graph """ string = "" for tail in self.adjacency: for head in self.adjacency[tail]: weight = self.adjacency[head][tail] string += f"{head} -> {tail} == {weight}\n" return string.rstrip("\n") def get_edges(self): """ Returna all edges in the graph """ output = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail])) return output def get_vertices(self): """ Returns all vertices in the graph """ return self.adjacency.keys() @staticmethod def build(vertices=None, edges=None): """ Builds a graph from the given set of vertices and edges """ g = Graph() if vertices is None: vertices = [] if edges is None: edge = [] for vertex in vertices: g.add_vertex(vertex) for edge in edges: g.add_edge(*edge) return g class UnionFind: """ Disjoint set Union and Find for Boruvka's algorithm """ def __init__(self): self.parent = {} self.rank = {} def __len__(self): return len(self.parent) def make_set(self, item): if item in self.parent: return self.find(item) self.parent[item] = item self.rank[item] = 0 return item def find(self, item): if item not in self.parent: return self.make_set(item) if item != self.parent[item]: self.parent[item] = self.find(self.parent[item]) return self.parent[item] def union(self, item1, item2): root1 = self.find(item1) root2 = self.find(item2) if root1 == root2: return root1 if self.rank[root1] > self.rank[root2]: self.parent[root2] = root1 return root1 if self.rank[root1] < self.rank[root2]: self.parent[root1] = root2 return root2 if self.rank[root1] == self.rank[root2]: self.rank[root1] += 1 self.parent[root2] = root1 return root1 return None @staticmethod def boruvka_mst(graph): """ Implementation of Boruvka's algorithm >>> g = Graph() >>> g = Graph.build([0, 1, 2, 3], [[0, 1, 1], [0, 2, 1],[2, 3, 1]]) >>> g.distinct_weight() >>> bg = Graph.boruvka_mst(g) >>> print(bg) 1 -> 0 == 1 2 -> 0 == 2 0 -> 1 == 1 0 -> 2 == 2 3 -> 2 == 3 2 -> 3 == 3 """ num_components = graph.num_vertices union_find = Graph.UnionFind() mst_edges = [] while num_components > 1: cheap_edge = {} for vertex in graph.get_vertices(): cheap_edge[vertex] = -1 edges = graph.get_edges() for edge in edges: head, tail, weight = edge edges.remove((tail, head, weight)) for edge in edges: head, tail, weight = edge set1 = union_find.find(head) set2 = union_find.find(tail) if set1 != set2: if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight: cheap_edge[set1] = [head, tail, weight] if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight: cheap_edge[set2] = [head, tail, weight] for head_tail_weight in cheap_edge.values(): if head_tail_weight != -1: head, tail, weight = head_tail_weight if union_find.find(head) != union_find.find(tail): union_find.union(head, tail) mst_edges.append(head_tail_weight) num_components = num_components - 1 mst = Graph.build(edges=mst_edges) return mst
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/graph_adjacency_list.py
graphs/graph_adjacency_list.py
#!/usr/bin/env python3 """ Author: Vikram Nithyanandam Description: The following implementation is a robust unweighted Graph data structure implemented using an adjacency list. This vertices and edges of this graph can be effectively initialized and modified while storing your chosen generic value in each vertex. Adjacency List: https://en.wikipedia.org/wiki/Adjacency_list Potential Future Ideas: - Add a flag to set edge weights on and set edge weights - Make edge weights and vertex values customizable to store whatever the client wants - Support multigraph functionality if the client wants it """ from __future__ import annotations import random import unittest from pprint import pformat from typing import TypeVar import pytest T = TypeVar("T") class GraphAdjacencyList[T]: def __init__( self, vertices: list[T], edges: list[list[T]], directed: bool = True ) -> None: """ Parameters: - vertices: (list[T]) The list of vertex names the client wants to pass in. Default is empty. - edges: (list[list[T]]) The list of edges the client wants to pass in. Each edge is a 2-element list. Default is empty. - directed: (bool) Indicates if graph is directed or undirected. Default is True. """ self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T self.directed = directed # Falsey checks edges = edges or [] vertices = vertices or [] for vertex in vertices: self.add_vertex(vertex) for edge in edges: if len(edge) != 2: msg = f"Invalid input: {edge} is the wrong length." raise ValueError(msg) self.add_edge(edge[0], edge[1]) def add_vertex(self, vertex: T) -> None: """ Adds a vertex to the graph. If the given vertex already exists, a ValueError will be thrown. >>> g = GraphAdjacencyList(vertices=[], edges=[], directed=False) >>> g.add_vertex("A") >>> g.adj_list {'A': []} >>> g.add_vertex("A") Traceback (most recent call last): ... ValueError: Incorrect input: A is already in the graph. """ if self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} is already in the graph." raise ValueError(msg) self.adj_list[vertex] = [] def add_edge(self, source_vertex: T, destination_vertex: T) -> None: """ Creates an edge from source vertex to destination vertex. If any given vertex doesn't exist or the edge already exists, a ValueError will be thrown. """ if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge already exists between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) # add the destination vertex to the list associated with the source vertex # and vice versa if not directed self.adj_list[source_vertex].append(destination_vertex) if not self.directed: self.adj_list[destination_vertex].append(source_vertex) def remove_vertex(self, vertex: T) -> None: """ Removes the given vertex from the graph and deletes all incoming and outgoing edges from the given vertex as well. If the given vertex does not exist, a ValueError will be thrown. """ if not self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} does not exist in this graph." raise ValueError(msg) if not self.directed: # If not directed, find all neighboring vertices and delete all references # of edges connecting to the given vertex for neighbor in self.adj_list[vertex]: self.adj_list[neighbor].remove(vertex) else: # If directed, search all neighbors of all vertices and delete all # references of edges connecting to the given vertex for edge_list in self.adj_list.values(): if vertex in edge_list: edge_list.remove(vertex) # Finally, delete the given vertex and all of its outgoing edge references self.adj_list.pop(vertex) def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: """ Removes the edge between the two vertices. If any given vertex doesn't exist or the edge does not exist, a ValueError will be thrown. """ if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if not self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge does NOT exist between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) # remove the destination vertex from the list associated with the source # vertex and vice versa if not directed self.adj_list[source_vertex].remove(destination_vertex) if not self.directed: self.adj_list[destination_vertex].remove(source_vertex) def contains_vertex(self, vertex: T) -> bool: """ Returns True if the graph contains the vertex, False otherwise. """ return vertex in self.adj_list def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: """ Returns True if the graph contains the edge from the source_vertex to the destination_vertex, False otherwise. If any given vertex doesn't exist, a ValueError will be thrown. """ if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} " f"or {destination_vertex} does not exist." ) raise ValueError(msg) return destination_vertex in self.adj_list[source_vertex] def clear_graph(self) -> None: """ Clears all vertices and edges. """ self.adj_list = {} def __repr__(self) -> str: return pformat(self.adj_list) class TestGraphAdjacencyList(unittest.TestCase): def __assert_graph_edge_exists_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: assert undirected_graph.contains_edge(edge[0], edge[1]) assert undirected_graph.contains_edge(edge[1], edge[0]) assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: assert not undirected_graph.contains_edge(edge[0], edge[1]) assert not undirected_graph.contains_edge(edge[1], edge[0]) assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, vertex: int, ) -> None: assert undirected_graph.contains_vertex(vertex) assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, vertex: int, ) -> None: assert not undirected_graph.contains_vertex(vertex) assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count ) random_destination_vertices: list[int] = random.sample( vertices[int(len(vertices) / 2) :], edge_pick_count ) random_edges: list[list[int]] = [] for source in random_source_vertices: for dest in random_destination_vertices: random_edges.append([source, dest]) return random_edges def __generate_graphs( self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int ) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]: if max_val - min_val + 1 < vertex_count: raise ValueError( "Will result in duplicate vertices. Either increase range " "between min_val and max_val or decrease vertex count." ) # generate graph input random_vertices: list[int] = random.sample( range(min_val, max_val + 1), vertex_count ) random_edges: list[list[int]] = self.__generate_random_edges( random_vertices, edge_pick_count ) # build graphs undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=random_edges, directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=random_edges, directed=True ) return undirected_graph, directed_graph, random_vertices, random_edges def test_init_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) # test graph initialization with vertices and edges for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) assert not undirected_graph.directed assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) # Build graphs WITHOUT edges undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=True ) # Test contains_vertex for num in range(101): assert (num in random_vertices) == undirected_graph.contains_vertex(num) assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) # build empty graphs undirected_graph: GraphAdjacencyList = GraphAdjacencyList( vertices=[], edges=[], directed=False ) directed_graph: GraphAdjacencyList = GraphAdjacencyList( vertices=[], edges=[], directed=True ) # run add_vertex for num in random_vertices: undirected_graph.add_vertex(num) for num in random_vertices: directed_graph.add_vertex(num) # test add_vertex worked for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) def test_remove_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) # build graphs WITHOUT edges undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=True ) # test remove_vertex worked for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) undirected_graph.remove_vertex(num) directed_graph.remove_vertex(num) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, num ) def test_add_and_remove_vertices_repeatedly(self) -> None: random_vertices1: list[int] = random.sample(range(51), 20) random_vertices2: list[int] = random.sample(range(51, 101), 20) # build graphs WITHOUT edges undirected_graph = GraphAdjacencyList( vertices=random_vertices1, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices1, edges=[], directed=True ) # test adding and removing vertices for i, _ in enumerate(random_vertices1): undirected_graph.add_vertex(random_vertices2[i]) directed_graph.add_vertex(random_vertices2[i]) self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, random_vertices2[i] ) undirected_graph.remove_vertex(random_vertices1[i]) directed_graph.remove_vertex(random_vertices1[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices1[i] ) # remove all vertices for i, _ in enumerate(random_vertices1): undirected_graph.remove_vertex(random_vertices2[i]) directed_graph.remove_vertex(random_vertices2[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices2[i] ) def test_contains_edge(self) -> None: # generate graphs and graph input vertex_count = 20 ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(vertex_count, 0, 100, 4) # generate all possible edges for testing all_possible_edges: list[list[int]] = [] for i in range(vertex_count - 1): for j in range(i + 1, vertex_count): all_possible_edges.append([random_vertices[i], random_vertices[j]]) all_possible_edges.append([random_vertices[j], random_vertices[i]]) # test contains_edge function for edge in all_possible_edges: if edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) elif [edge[1], edge[0]] in random_edges: # since this edge exists for undirected but the reverse # may not exist for directed self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, [edge[1], edge[0]] ) else: self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_edge(self) -> None: # generate graph input random_vertices: list[int] = random.sample(range(101), 15) random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) # build graphs WITHOUT edges undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=True ) # run and test add_edge for edge in random_edges: undirected_graph.add_edge(edge[0], edge[1]) directed_graph.add_edge(edge[0], edge[1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) def test_remove_edge(self) -> None: # generate graph input and graphs ( undirected_graph, directed_graph, _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) # run and test remove_edge for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) undirected_graph.remove_edge(edge[0], edge[1]) directed_graph.remove_edge(edge[0], edge[1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_and_remove_edges_repeatedly(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) # make some more edge options! more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for i, _ in enumerate(random_edges): undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, more_random_edges[i] ) undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, random_edges[i] ) def test_add_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for i in range(101): if i not in random_vertices: with pytest.raises(ValueError): undirected_graph.remove_vertex(i) with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for edge in more_random_edges: with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) if __name__ == "__main__": unittest.main()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/graphs_floyd_warshall.py
graphs/graphs_floyd_warshall.py
# floyd_warshall.py """ The problem is to find the shortest distance between all pairs of vertices in a weighted directed graph that can have negative edge weights. """ def _print_dist(dist, v): print("\nThe shortest path matrix using Floyd Warshall algorithm\n") for i in range(v): for j in range(v): if dist[i][j] != float("inf"): print(int(dist[i][j]), end="\t") else: print("INF", end="\t") print() def floyd_warshall(graph, v): """ :param graph: 2D array calculated from weight[edge[i, j]] :type graph: List[List[float]] :param v: number of vertices :type v: int :return: shortest distance between all vertex pairs distance[u][v] will contain the shortest distance from vertex u to v. 1. For all edges from v to n, distance[i][j] = weight(edge(i, j)). 3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] + distance[k][j]) for each possible pair i, j of vertices. 4. The above is repeated for each vertex k in the graph. 5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is updated to the next vertex[i][k]. """ dist = [[float("inf") for _ in range(v)] for _ in range(v)] for i in range(v): for j in range(v): dist[i][j] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(v): # looping through rows of graph array for i in range(v): # looping through columns of graph array for j in range(v): if ( dist[i][k] != float("inf") and dist[k][j] != float("inf") and dist[i][k] + dist[k][j] < dist[i][j] ): dist[i][j] = dist[i][k] + dist[k][j] _print_dist(dist, v) return dist, v if __name__ == "__main__": v = int(input("Enter number of vertices: ")) e = int(input("Enter number of edges: ")) graph = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): graph[i][i] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) src = int(input("Enter source:")) dst = int(input("Enter destination:")) weight = float(input("Enter weight:")) graph[src][dst] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/finding_bridges.py
graphs/finding_bridges.py
""" An edge is a bridge if, after removing it count of connected components in graph will be increased by one. Bridges represent vulnerabilities in a connected network and are useful for designing reliable networks. For example, in a wired computer network, an articulation point indicates the critical computers and a bridge indicates the critical wires or connections. For more details, refer this article: https://www.geeksforgeeks.org/bridge-in-a-graph/ """ def __get_demo_graph(index): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]: """ Return the list of undirected graph bridges [(a1, b1), ..., (ak, bk)]; ai <= bi >>> compute_bridges(__get_demo_graph(0)) [(3, 4), (2, 3), (2, 5)] >>> compute_bridges(__get_demo_graph(1)) [(6, 7), (0, 6), (1, 9), (3, 4), (2, 4), (2, 5)] >>> compute_bridges(__get_demo_graph(2)) [(1, 6), (4, 6), (0, 4)] >>> compute_bridges(__get_demo_graph(3)) [] >>> compute_bridges({}) [] """ id_ = 0 n = len(graph) # No of vertices in graph low = [0] * n visited = [False] * n def dfs(at, parent, bridges, id_): visited[at] = True low[at] = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(to, at, bridges, id_) low[at] = min(low[at], low[to]) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at)) else: # This edge is a back edge and cannot be a bridge low[at] = min(low[at], low[to]) bridges: list[tuple[int, int]] = [] for i in range(n): if not visited[i]: dfs(i, -1, bridges, id_) return bridges if __name__ == "__main__": import doctest doctest.testmod()
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/a_star.py
graphs/a_star.py
from __future__ import annotations DIRECTIONS = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] # function to search the path def search( grid: list[list[int]], init: list[int], goal: list[int], cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: """ Search for a path on a grid avoiding obstacles. >>> grid = [[0, 1, 0, 0, 0, 0], ... [0, 1, 0, 0, 0, 0], ... [0, 1, 0, 0, 0, 0], ... [0, 1, 0, 0, 1, 0], ... [0, 0, 0, 0, 1, 0]] >>> init = [0, 0] >>> goal = [len(grid) - 1, len(grid[0]) - 1] >>> cost = 1 >>> heuristic = [[0] * len(grid[0]) for _ in range(len(grid))] >>> heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] >>> for i in range(len(grid)): ... for j in range(len(grid[0])): ... heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) ... if grid[i][j] == 1: ... heuristic[i][j] = 99 >>> path, action = search(grid, init, goal, cost, heuristic) >>> path # doctest: +NORMALIZE_WHITESPACE [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [4, 1], [4, 2], [4, 3], [3, 3], [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]] >>> action # doctest: +NORMALIZE_WHITESPACE [[0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0, 0], [2, 0, 0, 0, 3, 3], [2, 0, 0, 0, 0, 2], [2, 3, 3, 3, 0, 2]] """ closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the reference grid closed[init[0]][init[1]] = 1 action = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the action grid x = init[0] y = init[1] g = 0 f = g + heuristic[x][y] # cost from starting cell to destination cell cell = [[f, g, x, y]] found = False # flag that is set when search is complete resign = False # flag set if we can't find expand while not found and not resign: if len(cell) == 0: raise ValueError("Algorithm is unable to find solution") else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() next_cell = cell.pop() x = next_cell[2] y = next_cell[3] g = next_cell[1] if x == goal[0] and y == goal[1]: found = True else: for i in range(len(DIRECTIONS)): # to try out different valid actions x2 = x + DIRECTIONS[i][0] y2 = y + DIRECTIONS[i][1] if ( x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]) and closed[x2][y2] == 0 and grid[x2][y2] == 0 ): g2 = g + cost f2 = g2 + heuristic[x2][y2] cell.append([f2, g2, x2, y2]) closed[x2][y2] = 1 action[x2][y2] = i invpath = [] x = goal[0] y = goal[1] invpath.append([x, y]) # we get the reverse path from here while x != init[0] or y != init[1]: x2 = x - DIRECTIONS[action[x][y]][0] y2 = y - DIRECTIONS[action[x][y]][1] x = x2 y = y2 invpath.append([x, y]) path = [] for i in range(len(invpath)): path.append(invpath[len(invpath) - 1 - i]) return path, action if __name__ == "__main__": grid = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] init = [0, 0] # all coordinates are given in format [y,x] goal = [len(grid) - 1, len(grid[0]) - 1] cost = 1 # the cost map which pushes the path closer to the goal heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map heuristic[i][j] = 99 path, action = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/tests/test_min_spanning_tree_prim.py
graphs/tests/test_min_spanning_tree_prim.py
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def test_prim_successful_result(): num_nodes, num_edges = 9, 14 # noqa: F841 edges = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] adjacency = defaultdict(list) for node1, node2, cost in edges: adjacency[node1].append([node2, cost]) adjacency[node2].append([node1, cost]) result = mst(adjacency) expected = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: edge = tuple(answer[:2]) reverse = tuple(edge[::-1]) assert edge in result or reverse in result
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/tests/__init__.py
graphs/tests/__init__.py
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false
TheAlgorithms/Python
https://github.com/TheAlgorithms/Python/blob/2c15b8c54eb8130e83640fe1d911c10eb6cd70d4/graphs/tests/test_min_spanning_tree_kruskal.py
graphs/tests/test_min_spanning_tree_kruskal.py
from graphs.minimum_spanning_tree_kruskal import kruskal def test_kruskal_successful_result(): num_nodes = 9 edges = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] result = kruskal(num_nodes, edges) expected = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(expected) == sorted(result)
python
MIT
2c15b8c54eb8130e83640fe1d911c10eb6cd70d4
2026-01-04T14:38:15.231112Z
false