instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Improve my code by adding docstrings | # https://www.investopedia.com
from __future__ import annotations
def simple_interest(
principal: float, daily_interest_rate: float, days_between_payments: float
) -> float:
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0")
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * daily_interest_rate * days_between_payments
def compound_interest(
principal: float,
nominal_annual_interest_rate_percentage: float,
number_of_compounding_periods: float,
) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0")
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def apr_interest(
principal: float,
nominal_annual_percentage_rate: float,
number_of_years: float,
) -> float:
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0")
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0")
if principal <= 0:
raise ValueError("principal must be > 0")
return compound_interest(
principal, nominal_annual_percentage_rate / 365, number_of_years * 365
)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -6,6 +6,30 @@ def simple_interest(
principal: float, daily_interest_rate: float, days_between_payments: float
) -> float:
+ """
+ >>> simple_interest(18000.0, 0.06, 3)
+ 3240.0
+ >>> simple_interest(0.5, 0.06, 3)
+ 0.09
+ >>> simple_interest(18000.0, 0.01, 10)
+ 1800.0
+ >>> simple_interest(18000.0, 0.0, 3)
+ 0.0
+ >>> simple_interest(5500.0, 0.01, 100)
+ 5500.0
+ >>> simple_interest(10000.0, -0.06, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: daily_interest_rate must be >= 0
+ >>> simple_interest(-10000.0, 0.06, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: principal must be > 0
+ >>> simple_interest(5500.0, 0.01, -5)
+ Traceback (most recent call last):
+ ...
+ ValueError: days_between_payments must be > 0
+ """
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0")
if daily_interest_rate < 0:
@@ -20,6 +44,26 @@ nominal_annual_interest_rate_percentage: float,
number_of_compounding_periods: float,
) -> float:
+ """
+ >>> compound_interest(10000.0, 0.05, 3)
+ 1576.2500000000014
+ >>> compound_interest(10000.0, 0.05, 1)
+ 500.00000000000045
+ >>> compound_interest(0.5, 0.05, 3)
+ 0.07881250000000006
+ >>> compound_interest(10000.0, 0.06, -4)
+ Traceback (most recent call last):
+ ...
+ ValueError: number_of_compounding_periods must be > 0
+ >>> compound_interest(10000.0, -3.5, 3.0)
+ Traceback (most recent call last):
+ ...
+ ValueError: nominal_annual_interest_rate_percentage must be >= 0
+ >>> compound_interest(-5500.0, 0.01, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: principal must be > 0
+ """
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0")
if nominal_annual_interest_rate_percentage < 0:
@@ -38,6 +82,26 @@ nominal_annual_percentage_rate: float,
number_of_years: float,
) -> float:
+ """
+ >>> apr_interest(10000.0, 0.05, 3)
+ 1618.223072263547
+ >>> apr_interest(10000.0, 0.05, 1)
+ 512.6749646744732
+ >>> apr_interest(0.5, 0.05, 3)
+ 0.08091115361317736
+ >>> apr_interest(10000.0, 0.06, -4)
+ Traceback (most recent call last):
+ ...
+ ValueError: number_of_years must be > 0
+ >>> apr_interest(10000.0, -3.5, 3.0)
+ Traceback (most recent call last):
+ ...
+ ValueError: nominal_annual_percentage_rate must be >= 0
+ >>> apr_interest(-5500.0, 0.01, 5)
+ Traceback (most recent call last):
+ ...
+ ValueError: principal must be > 0
+ """
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0")
if nominal_annual_percentage_rate < 0:
@@ -53,4 +117,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/financial/interest.py |
Add docstrings including usage examples |
from __future__ import annotations
import matplotlib.pyplot as plt
import numpy as np
# initial triangle of Koch snowflake
VECTOR_1 = np.array([0, 0])
VECTOR_2 = np.array([0.5, 0.8660254])
VECTOR_3 = np.array([1, 0])
INITIAL_VECTORS = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
# uncomment for simple Koch curve instead of Koch snowflake
# INITIAL_VECTORS = [VECTOR_1, VECTOR_3]
def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]:
vectors = initial_vectors
for _ in range(steps):
vectors = iteration_step(vectors)
return vectors
def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]:
new_vectors = []
for i, start_vector in enumerate(vectors[:-1]):
end_vector = vectors[i + 1]
new_vectors.append(start_vector)
difference_vector = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3)
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3, 60)
)
new_vectors.append(start_vector + difference_vector * 2 / 3)
new_vectors.append(vectors[-1])
return new_vectors
def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray:
theta = np.radians(angle_in_degrees)
c, s = np.cos(theta), np.sin(theta)
rotation_matrix = np.array(((c, -s), (s, c)))
return np.dot(rotation_matrix, vector)
def plot(vectors: list[np.ndarray]) -> None:
# avoid stretched display of graph
axes = plt.gca()
axes.set_aspect("equal")
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
x_coordinates, y_coordinates = zip(*vectors)
plt.plot(x_coordinates, y_coordinates)
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
processed_vectors = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | --- +++ @@ -1,3 +1,24 @@+"""
+Description
+ The Koch snowflake is a fractal curve and one of the earliest fractals to
+ have been described. The Koch snowflake can be built up iteratively, in a
+ sequence of stages. The first stage is an equilateral triangle, and each
+ successive stage is formed by adding outward bends to each side of the
+ previous stage, making smaller equilateral triangles.
+ This can be achieved through the following steps for each line:
+ 1. divide the line segment into three segments of equal length.
+ 2. draw an equilateral triangle that has the middle segment from step 1
+ as its base and points outward.
+ 3. remove the line segment that is the base of the triangle from step 2.
+ (description adapted from https://en.wikipedia.org/wiki/Koch_snowflake )
+ (for a more detailed explanation and an implementation in the
+ Processing language, see https://natureofcode.com/book/chapter-8-fractals/
+ #84-the-koch-curve-and-the-arraylist-technique )
+
+Requirements (pip):
+ - matplotlib
+ - numpy
+"""
from __future__ import annotations
@@ -15,6 +36,14 @@
def iterate(initial_vectors: list[np.ndarray], steps: int) -> list[np.ndarray]:
+ """
+ Go through the number of iterations determined by the argument "steps".
+ Be careful with high values (above 5) since the time to calculate increases
+ exponentially.
+ >>> iterate([np.array([0, 0]), np.array([1, 0])], 1)
+ [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \
+0.28867513]), array([0.66666667, 0. ]), array([1, 0])]
+ """
vectors = initial_vectors
for _ in range(steps):
vectors = iteration_step(vectors)
@@ -22,6 +51,15 @@
def iteration_step(vectors: list[np.ndarray]) -> list[np.ndarray]:
+ """
+ Loops through each pair of adjacent vectors. Each line between two adjacent
+ vectors is divided into 4 segments by adding 3 additional vectors in-between
+ the original two vectors. The vector in the middle is constructed through a
+ 60 degree rotation so it is bent outwards.
+ >>> iteration_step([np.array([0, 0]), np.array([1, 0])])
+ [array([0, 0]), array([0.33333333, 0. ]), array([0.5 , \
+0.28867513]), array([0.66666667, 0. ]), array([1, 0])]
+ """
new_vectors = []
for i, start_vector in enumerate(vectors[:-1]):
end_vector = vectors[i + 1]
@@ -37,6 +75,14 @@
def rotate(vector: np.ndarray, angle_in_degrees: float) -> np.ndarray:
+ """
+ Standard rotation of a 2D vector with a rotation matrix
+ (see https://en.wikipedia.org/wiki/Rotation_matrix )
+ >>> rotate(np.array([1, 0]), 60)
+ array([0.5 , 0.8660254])
+ >>> rotate(np.array([1, 0]), 90)
+ array([6.123234e-17, 1.000000e+00])
+ """
theta = np.radians(angle_in_degrees)
c, s = np.cos(theta), np.sin(theta)
rotation_matrix = np.array(((c, -s), (s, c)))
@@ -44,6 +90,10 @@
def plot(vectors: list[np.ndarray]) -> None:
+ """
+ Utility function to plot the vectors using matplotlib.pyplot
+ No doctest was implemented since this function does not have a return value
+ """
# avoid stretched display of graph
axes = plt.gca()
axes.set_aspect("equal")
@@ -62,4 +112,4 @@ doctest.testmod()
processed_vectors = iterate(INITIAL_VECTORS, 5)
- plot(processed_vectors)+ plot(processed_vectors)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/fractals/koch_snowflake.py |
Fully document this Python code with docstrings |
def price_plus_tax(price: float, tax_rate: float) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }") | --- +++ @@ -1,9 +1,18 @@+"""
+Calculate price plus tax of a good or service given its price and a tax rate.
+"""
def price_plus_tax(price: float, tax_rate: float) -> float:
+ """
+ >>> price_plus_tax(100, 0.25)
+ 125.0
+ >>> price_plus_tax(125.50, 0.05)
+ 131.775
+ """
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
- print(f"{price_plus_tax(125.50, 0.05) = }")+ print(f"{price_plus_tax(125.50, 0.05) = }")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/financial/price_plus_tax.py |
Add documentation for all methods | from __future__ import annotations
import math
from dataclasses import dataclass, field
from types import NoneType
from typing import Self
# Building block classes
@dataclass
class Angle:
degrees: float = 90
def __post_init__(self) -> None:
if not isinstance(self.degrees, (int, float)) or not 0 <= self.degrees <= 360:
raise TypeError("degrees must be a numeric value between 0 and 360.")
@dataclass
class Side:
length: float
angle: Angle = field(default_factory=Angle)
next_side: Side | None = None
def __post_init__(self) -> None:
if not isinstance(self.length, (int, float)) or self.length <= 0:
raise TypeError("length must be a positive numeric value.")
if not isinstance(self.angle, Angle):
raise TypeError("angle must be an Angle object.")
if not isinstance(self.next_side, (Side, NoneType)):
raise TypeError("next_side must be a Side or None.")
@dataclass
class Ellipse:
major_radius: float
minor_radius: float
@property
def area(self) -> float:
return math.pi * self.major_radius * self.minor_radius
@property
def perimeter(self) -> float:
return math.pi * (self.major_radius + self.minor_radius)
class Circle(Ellipse):
def __init__(self, radius: float) -> None:
super().__init__(radius, radius)
self.radius = radius
def __repr__(self) -> str:
return f"Circle(radius={self.radius})"
@property
def diameter(self) -> float:
return self.radius * 2
def max_parts(self, num_cuts: float) -> float:
if not isinstance(num_cuts, (int, float)) or num_cuts < 0:
raise TypeError("num_cuts must be a positive numeric value.")
return (num_cuts + 2 + num_cuts**2) * 0.5
@dataclass
class Polygon:
sides: list[Side] = field(default_factory=list)
def add_side(self, side: Side) -> Self:
self.sides.append(side)
return self
def get_side(self, index: int) -> Side:
return self.sides[index]
def set_side(self, index: int, side: Side) -> Self:
self.sides[index] = side
return self
class Rectangle(Polygon):
def __init__(self, short_side_length: float, long_side_length: float) -> None:
super().__init__()
self.short_side_length = short_side_length
self.long_side_length = long_side_length
self.post_init()
def post_init(self) -> None:
self.short_side = Side(self.short_side_length)
self.long_side = Side(self.long_side_length)
super().add_side(self.short_side)
super().add_side(self.long_side)
def perimeter(self) -> float:
return (self.short_side.length + self.long_side.length) * 2
def area(self) -> float:
return self.short_side.length * self.long_side.length
@dataclass
class Square(Rectangle):
def __init__(self, side_length: float) -> None:
super().__init__(side_length, side_length)
def perimeter(self) -> float:
return super().perimeter()
def area(self) -> float:
return super().area()
if __name__ == "__main__":
__import__("doctest").testmod() | --- +++ @@ -10,6 +10,22 @@
@dataclass
class Angle:
+ """
+ An Angle in degrees (unit of measurement)
+
+ >>> Angle()
+ Angle(degrees=90)
+ >>> Angle(45.5)
+ Angle(degrees=45.5)
+ >>> Angle(-1)
+ Traceback (most recent call last):
+ ...
+ TypeError: degrees must be a numeric value between 0 and 360.
+ >>> Angle(361)
+ Traceback (most recent call last):
+ ...
+ TypeError: degrees must be a numeric value between 0 and 360.
+ """
degrees: float = 90
@@ -20,6 +36,31 @@
@dataclass
class Side:
+ """
+ A side of a two dimensional Shape such as Polygon, etc.
+ adjacent_sides: a list of sides which are adjacent to the current side
+ angle: the angle in degrees between each adjacent side
+ length: the length of the current side in meters
+
+ >>> Side(5)
+ Side(length=5, angle=Angle(degrees=90), next_side=None)
+ >>> Side(5, Angle(45.6))
+ Side(length=5, angle=Angle(degrees=45.6), next_side=None)
+ >>> Side(5, Angle(45.6), Side(1, Angle(2))) # doctest: +ELLIPSIS
+ Side(length=5, angle=Angle(degrees=45.6), next_side=Side(length=1, angle=Angle(d...
+ >>> Side(-1)
+ Traceback (most recent call last):
+ ...
+ TypeError: length must be a positive numeric value.
+ >>> Side(5, None)
+ Traceback (most recent call last):
+ ...
+ TypeError: angle must be an Angle object.
+ >>> Side(5, Angle(90), "Invalid next_side")
+ Traceback (most recent call last):
+ ...
+ TypeError: next_side must be a Side or None.
+ """
length: float
angle: Angle = field(default_factory=Angle)
@@ -36,20 +77,52 @@
@dataclass
class Ellipse:
+ """
+ A geometric Ellipse on a 2D surface
+
+ >>> Ellipse(5, 10)
+ Ellipse(major_radius=5, minor_radius=10)
+ >>> Ellipse(5, 10) is Ellipse(5, 10)
+ False
+ >>> Ellipse(5, 10) == Ellipse(5, 10)
+ True
+ """
major_radius: float
minor_radius: float
@property
def area(self) -> float:
+ """
+ >>> Ellipse(5, 10).area
+ 157.07963267948966
+ """
return math.pi * self.major_radius * self.minor_radius
@property
def perimeter(self) -> float:
+ """
+ >>> Ellipse(5, 10).perimeter
+ 47.12388980384689
+ """
return math.pi * (self.major_radius + self.minor_radius)
class Circle(Ellipse):
+ """
+ A geometric Circle on a 2D surface
+
+ >>> Circle(5)
+ Circle(radius=5)
+ >>> Circle(5) is Circle(5)
+ False
+ >>> Circle(5) == Circle(5)
+ True
+ >>> Circle(5).area
+ 78.53981633974483
+ >>> Circle(5).perimeter
+ 31.41592653589793
+ """
def __init__(self, radius: float) -> None:
super().__init__(radius, radius)
@@ -60,9 +133,35 @@
@property
def diameter(self) -> float:
+ """
+ >>> Circle(5).diameter
+ 10
+ """
return self.radius * 2
def max_parts(self, num_cuts: float) -> float:
+ """
+ Return the maximum number of parts that circle can be divided into if cut
+ 'num_cuts' times.
+
+ >>> circle = Circle(5)
+ >>> circle.max_parts(0)
+ 1.0
+ >>> circle.max_parts(7)
+ 29.0
+ >>> circle.max_parts(54)
+ 1486.0
+ >>> circle.max_parts(22.5)
+ 265.375
+ >>> circle.max_parts(-222)
+ Traceback (most recent call last):
+ ...
+ TypeError: num_cuts must be a positive numeric value.
+ >>> circle.max_parts("-222")
+ Traceback (most recent call last):
+ ...
+ TypeError: num_cuts must be a positive numeric value.
+ """
if not isinstance(num_cuts, (int, float)) or num_cuts < 0:
raise TypeError("num_cuts must be a positive numeric value.")
return (num_cuts + 2 + num_cuts**2) * 0.5
@@ -70,22 +169,74 @@
@dataclass
class Polygon:
+ """
+ An abstract class which represents Polygon on a 2D surface.
+
+ >>> Polygon()
+ Polygon(sides=[])
+ >>> polygon = Polygon()
+ >>> polygon.add_side(Side(5)).get_side(0)
+ Side(length=5, angle=Angle(degrees=90), next_side=None)
+ >>> polygon.get_side(1)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> polygon.set_side(0, Side(10)).get_side(0)
+ Side(length=10, angle=Angle(degrees=90), next_side=None)
+ >>> polygon.set_side(1, Side(10))
+ Traceback (most recent call last):
+ ...
+ IndexError: list assignment index out of range
+ """
sides: list[Side] = field(default_factory=list)
def add_side(self, side: Side) -> Self:
+ """
+ >>> Polygon().add_side(Side(5))
+ Polygon(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None)])
+ """
self.sides.append(side)
return self
def get_side(self, index: int) -> Side:
+ """
+ >>> Polygon().get_side(0)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> Polygon().add_side(Side(5)).get_side(-1)
+ Side(length=5, angle=Angle(degrees=90), next_side=None)
+ """
return self.sides[index]
def set_side(self, index: int, side: Side) -> Self:
+ """
+ >>> Polygon().set_side(0, Side(5))
+ Traceback (most recent call last):
+ ...
+ IndexError: list assignment index out of range
+ >>> Polygon().add_side(Side(5)).set_side(0, Side(10))
+ Polygon(sides=[Side(length=10, angle=Angle(degrees=90), next_side=None)])
+ """
self.sides[index] = side
return self
class Rectangle(Polygon):
+ """
+ A geometric rectangle on a 2D surface.
+
+ >>> rectangle_one = Rectangle(5, 10)
+ >>> rectangle_one.perimeter()
+ 30
+ >>> rectangle_one.area()
+ 50
+ >>> Rectangle(-5, 10)
+ Traceback (most recent call last):
+ ...
+ TypeError: length must be a positive numeric value.
+ """
def __init__(self, short_side_length: float, long_side_length: float) -> None:
super().__init__()
@@ -94,6 +245,11 @@ self.post_init()
def post_init(self) -> None:
+ """
+ >>> Rectangle(5, 10) # doctest: +NORMALIZE_WHITESPACE
+ Rectangle(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None),
+ Side(length=10, angle=Angle(degrees=90), next_side=None)])
+ """
self.short_side = Side(self.short_side_length)
self.long_side = Side(self.long_side_length)
super().add_side(self.short_side)
@@ -108,6 +264,15 @@
@dataclass
class Square(Rectangle):
+ """
+ a structure which represents a
+ geometrical square on a 2D surface
+ >>> square_one = Square(5)
+ >>> square_one.perimeter()
+ 20
+ >>> square_one.area()
+ 25
+ """
def __init__(self, side_length: float) -> None:
super().__init__(side_length, side_length)
@@ -120,4 +285,4 @@
if __name__ == "__main__":
- __import__("doctest").testmod()+ __import__("doctest").testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/geometry/geometry.py |
Add docstrings to improve code quality |
from dataclasses import dataclass
@dataclass
class Node:
value: int = 0
neighbors: list["Node"] | None = None
def __post_init__(self) -> None:
self.neighbors = self.neighbors or []
def __hash__(self) -> int:
return id(self)
def clone_graph(node: Node | None) -> Node | None:
if not node:
return None
originals_to_clones = {} # map nodes to clones
stack = [node]
while stack:
original = stack.pop()
if original in originals_to_clones:
continue
originals_to_clones[original] = Node(original.value)
stack.extend(original.neighbors or [])
for original, clone in originals_to_clones.items():
for neighbor in original.neighbors or []:
cloned_neighbor = originals_to_clones[neighbor]
if not clone.neighbors:
clone.neighbors = []
clone.neighbors.append(cloned_neighbor)
return originals_to_clones[node]
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,14 @@+"""
+LeetCode 133. Clone Graph
+https://leetcode.com/problems/clone-graph/
+
+Given a reference of a node in a connected undirected graph.
+
+Return a deep copy (clone) of the graph.
+
+Each node in the graph contains a value (int) and a list (List[Node]) of its
+neighbors.
+"""
from dataclasses import dataclass
@@ -8,13 +19,30 @@ neighbors: list["Node"] | None = None
def __post_init__(self) -> None:
+ """
+ >>> Node(3).neighbors
+ []
+ """
self.neighbors = self.neighbors or []
def __hash__(self) -> int:
+ """
+ >>> hash(Node(3)) != 0
+ True
+ """
return id(self)
def clone_graph(node: Node | None) -> Node | None:
+ """
+ This function returns a clone of a connected undirected graph.
+ >>> clone_graph(Node(1))
+ Node(value=1, neighbors=[])
+ >>> clone_graph(Node(1, [Node(2)]))
+ Node(value=1, neighbors=[Node(value=2, neighbors=[])])
+ >>> clone_graph(None) is None
+ True
+ """
if not node:
return None
@@ -47,4 +75,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/deep_clone_graph.py |
Help me comply with documentation standards |
import warnings
from collections.abc import Callable
from typing import Any
import matplotlib.pyplot as plt
import numpy as np
c_cauliflower = 0.25 + 0.0j
c_polynomial_1 = -0.4 + 0.6j
c_polynomial_2 = -0.1 + 0.651j
c_exponential = -2.0
nb_iterations = 56
window_size = 2.0
nb_pixels = 666
def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
return np.exp(z_values) + c_parameter
def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
return z_values * z_values + c_parameter
def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray:
x = np.linspace(-window_size, window_size, nb_pixels)
x = x.reshape((nb_pixels, 1))
y = np.linspace(-window_size, window_size, nb_pixels)
y = y.reshape((1, nb_pixels))
return x + 1.0j * y
def iterate_function(
eval_function: Callable[[Any, np.ndarray], np.ndarray],
function_params: Any,
nb_iterations: int,
z_0: np.ndarray,
infinity: float | None = None,
) -> np.ndarray:
z_n = z_0.astype("complex64")
for _ in range(nb_iterations):
z_n = eval_function(function_params, z_n)
if infinity is not None:
np.nan_to_num(z_n, copy=False, nan=infinity)
z_n[abs(z_n) == np.inf] = infinity
return z_n
def show_results(
function_label: str,
function_params: Any,
escape_radius: float,
z_final: np.ndarray,
) -> None:
abs_z_final = (abs(z_final)).transpose()
abs_z_final[:, :] = abs_z_final[::-1, :]
plt.matshow(abs_z_final < escape_radius)
plt.title(f"Julia set of ${function_label}$, $c={function_params}$")
plt.show()
def ignore_overflow_warnings() -> None:
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in multiply"
)
warnings.filterwarnings(
"ignore",
category=RuntimeWarning,
message="invalid value encountered in multiply",
)
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in absolute"
)
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in exp"
)
if __name__ == "__main__":
z_0 = prepare_grid(window_size, nb_pixels)
ignore_overflow_warnings() # See file header for explanations
nb_iterations = 24
escape_radius = 2 * abs(c_cauliflower) + 1
z_final = iterate_function(
eval_quadratic_polynomial,
c_cauliflower,
nb_iterations,
z_0,
infinity=1.1 * escape_radius,
)
show_results("z^2+c", c_cauliflower, escape_radius, z_final)
nb_iterations = 64
escape_radius = 2 * abs(c_polynomial_1) + 1
z_final = iterate_function(
eval_quadratic_polynomial,
c_polynomial_1,
nb_iterations,
z_0,
infinity=1.1 * escape_radius,
)
show_results("z^2+c", c_polynomial_1, escape_radius, z_final)
nb_iterations = 161
escape_radius = 2 * abs(c_polynomial_2) + 1
z_final = iterate_function(
eval_quadratic_polynomial,
c_polynomial_2,
nb_iterations,
z_0,
infinity=1.1 * escape_radius,
)
show_results("z^2+c", c_polynomial_2, escape_radius, z_final)
nb_iterations = 12
escape_radius = 10000.0
z_final = iterate_function(
eval_exponential,
c_exponential,
nb_iterations,
z_0 + 2,
infinity=1.0e10,
)
show_results("e^z+c", c_exponential, escape_radius, z_final) | --- +++ @@ -1,3 +1,25 @@+"""Author Alexandre De Zotti
+
+Draws Julia sets of quadratic polynomials and exponential maps.
+ More specifically, this iterates the function a fixed number of times
+ then plots whether the absolute value of the last iterate is greater than
+ a fixed threshold (named "escape radius"). For the exponential map this is not
+ really an escape radius but rather a convenient way to approximate the Julia
+ set with bounded orbits.
+
+The examples presented here are:
+- The Cauliflower Julia set, see e.g.
+https://en.wikipedia.org/wiki/File:Julia_z2%2B0,25.png
+- Other examples from https://en.wikipedia.org/wiki/Julia_set
+- An exponential map Julia set, ambiantly homeomorphic to the examples in
+https://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html
+ and
+https://ddd.uab.cat/pub/pubmat/02141493v43n1/02141493v43n1p27.pdf
+
+Remark: Some overflow runtime warnings are suppressed. This is because of the
+ way the iteration loop is implemented, using numpy's efficient computations.
+ Overflows and infinites are replaced after each step by a large number.
+"""
import warnings
from collections.abc import Callable
@@ -16,14 +38,43 @@
def eval_exponential(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
+ """
+ Evaluate $e^z + c$.
+ >>> float(eval_exponential(0, 0))
+ 1.0
+ >>> bool(abs(eval_exponential(1, np.pi*1.j)) < 1e-15)
+ True
+ >>> bool(abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15)
+ True
+ """
return np.exp(z_values) + c_parameter
def eval_quadratic_polynomial(c_parameter: complex, z_values: np.ndarray) -> np.ndarray:
+ """
+ >>> eval_quadratic_polynomial(0, 2)
+ 4
+ >>> eval_quadratic_polynomial(-1, 1)
+ 0
+ >>> round(eval_quadratic_polynomial(1.j, 0).imag)
+ 1
+ >>> round(eval_quadratic_polynomial(1.j, 0).real)
+ 0
+ """
return z_values * z_values + c_parameter
def prepare_grid(window_size: float, nb_pixels: int) -> np.ndarray:
+ """
+ Create a grid of complex values of size nb_pixels*nb_pixels with real and
+ imaginary parts ranging from -window_size to window_size (inclusive).
+ Returns a numpy array.
+
+ >>> prepare_grid(1,3)
+ array([[-1.-1.j, -1.+0.j, -1.+1.j],
+ [ 0.-1.j, 0.+0.j, 0.+1.j],
+ [ 1.-1.j, 1.+0.j, 1.+1.j]])
+ """
x = np.linspace(-window_size, window_size, nb_pixels)
x = x.reshape((nb_pixels, 1))
y = np.linspace(-window_size, window_size, nb_pixels)
@@ -38,6 +89,31 @@ z_0: np.ndarray,
infinity: float | None = None,
) -> np.ndarray:
+ """
+ Iterate the function "eval_function" exactly nb_iterations times.
+ The first argument of the function is a parameter which is contained in
+ function_params. The variable z_0 is an array that contains the initial
+ values to iterate from.
+ This function returns the final iterates.
+
+ >>> iterate_function(eval_quadratic_polynomial, 0, 3, np.array([0,1,2])).shape
+ (3,)
+ >>> complex(np.round(iterate_function(eval_quadratic_polynomial,
+ ... 0,
+ ... 3,
+ ... np.array([0,1,2]))[0]))
+ 0j
+ >>> complex(np.round(iterate_function(eval_quadratic_polynomial,
+ ... 0,
+ ... 3,
+ ... np.array([0,1,2]))[1]))
+ (1+0j)
+ >>> complex(np.round(iterate_function(eval_quadratic_polynomial,
+ ... 0,
+ ... 3,
+ ... np.array([0,1,2]))[2]))
+ (256+0j)
+ """
z_n = z_0.astype("complex64")
for _ in range(nb_iterations):
@@ -54,6 +130,13 @@ escape_radius: float,
z_final: np.ndarray,
) -> None:
+ """
+ Plots of whether the absolute value of z_final is greater than
+ the value of escape_radius. Adds the function_label and function_params to
+ the title.
+
+ >>> show_results('80', 0, 1, np.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]]))
+ """
abs_z_final = (abs(z_final)).transpose()
abs_z_final[:, :] = abs_z_final[::-1, :]
@@ -63,6 +146,11 @@
def ignore_overflow_warnings() -> None:
+ """
+ Ignore some overflow and invalid value warnings.
+
+ >>> ignore_overflow_warnings()
+ """
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in multiply"
)
@@ -126,4 +214,4 @@ z_0 + 2,
infinity=1.0e10,
)
- show_results("e^z+c", c_exponential, escape_radius, z_final)+ show_results("e^z+c", c_exponential, escape_radius, z_final)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/fractals/julia_sets.py |
Generate NumPy-style docstrings |
from collections import deque
def expand_search(
graph: dict[int, list[int]],
queue: deque[int],
parents: dict[int, int | None],
opposite_direction_parents: dict[int, int | None],
) -> int | None:
if not queue:
return None
current = queue.popleft()
for neighbor in graph[current]:
if neighbor in parents:
continue
parents[neighbor] = current
queue.append(neighbor)
# Check if this creates an intersection
if neighbor in opposite_direction_parents:
return neighbor
return None
def construct_path(current: int | None, parents: dict[int, int | None]) -> list[int]:
path: list[int] = []
while current is not None:
path.append(current)
current = parents[current]
return path
def bidirectional_search(
graph: dict[int, list[int]], start: int, goal: int
) -> list[int] | None:
if start == goal:
return [start]
# Check if start and goal are in the graph
if start not in graph or goal not in graph:
return None
# Initialize forward and backward search dictionaries
# Each maps a node to its parent in the search
forward_parents: dict[int, int | None] = {start: None}
backward_parents: dict[int, int | None] = {goal: None}
# Initialize forward and backward search queues
forward_queue = deque([start])
backward_queue = deque([goal])
# Intersection node (where the two searches meet)
intersection = None
# Continue until both queues are empty or an intersection is found
while forward_queue and backward_queue and intersection is None:
# Expand forward search
intersection = expand_search(
graph=graph,
queue=forward_queue,
parents=forward_parents,
opposite_direction_parents=backward_parents,
)
# If no intersection found, expand backward search
if intersection is not None:
break
intersection = expand_search(
graph=graph,
queue=backward_queue,
parents=backward_parents,
opposite_direction_parents=forward_parents,
)
# If no intersection found, there's no path
if intersection is None:
return None
# Construct path from start to intersection
forward_path: list[int] = construct_path(
current=intersection, parents=forward_parents
)
forward_path.reverse()
# Construct path from intersection to goal
backward_path: list[int] = construct_path(
current=backward_parents[intersection], parents=backward_parents
)
# Return the complete path
return forward_path + backward_path
def main() -> None:
# Example graph represented as an adjacency list
example_graph = {
0: [1, 2],
1: [0, 3, 4],
2: [0, 5, 6],
3: [1, 7],
4: [1, 8],
5: [2, 9],
6: [2, 10],
7: [3, 11],
8: [4, 11],
9: [5, 11],
10: [6, 11],
11: [7, 8, 9, 10],
}
# Test case 1: Path exists
start, goal = 0, 11
path = bidirectional_search(graph=example_graph, start=start, goal=goal)
print(f"Path from {start} to {goal}: {path}")
# Test case 2: Start and goal are the same
start, goal = 5, 5
path = bidirectional_search(graph=example_graph, start=start, goal=goal)
print(f"Path from {start} to {goal}: {path}")
# Test case 3: No path exists (disconnected graph)
disconnected_graph = {
0: [1, 2],
1: [0],
2: [0],
3: [4],
4: [3],
}
start, goal = 0, 3
path = bidirectional_search(graph=disconnected_graph, start=start, goal=goal)
print(f"Path from {start} to {goal}: {path}")
if __name__ == "__main__":
main() | --- +++ @@ -1,3 +1,15 @@+"""
+Bidirectional Search Algorithm.
+
+This algorithm searches from both the source and target nodes simultaneously,
+meeting somewhere in the middle. This approach can significantly reduce the
+search space compared to a traditional one-directional search.
+
+Time Complexity: O(b^(d/2)) where b is the branching factor and d is the depth
+Space Complexity: O(b^(d/2))
+
+https://en.wikipedia.org/wiki/Bidirectional_search
+"""
from collections import deque
@@ -37,6 +49,46 @@ def bidirectional_search(
graph: dict[int, list[int]], start: int, goal: int
) -> list[int] | None:
+ """
+ Perform bidirectional search on a graph to find the shortest path.
+
+ Args:
+ graph: A dictionary where keys are nodes and values are lists of adjacent nodes
+ start: The starting node
+ goal: The target node
+
+ Returns:
+ A list representing the path from start to goal, or None if no path exists
+
+ Examples:
+ >>> graph = {
+ ... 0: [1, 2],
+ ... 1: [0, 3, 4],
+ ... 2: [0, 5, 6],
+ ... 3: [1, 7],
+ ... 4: [1, 8],
+ ... 5: [2, 9],
+ ... 6: [2, 10],
+ ... 7: [3, 11],
+ ... 8: [4, 11],
+ ... 9: [5, 11],
+ ... 10: [6, 11],
+ ... 11: [7, 8, 9, 10],
+ ... }
+ >>> bidirectional_search(graph=graph, start=0, goal=11)
+ [0, 1, 3, 7, 11]
+ >>> bidirectional_search(graph=graph, start=5, goal=5)
+ [5]
+ >>> disconnected_graph = {
+ ... 0: [1, 2],
+ ... 1: [0],
+ ... 2: [0],
+ ... 3: [4],
+ ... 4: [3],
+ ... }
+ >>> bidirectional_search(graph=disconnected_graph, start=0, goal=3) is None
+ True
+ """
if start == goal:
return [start]
@@ -97,6 +149,15 @@
def main() -> None:
+ """
+ Run example of bidirectional search algorithm.
+
+ Examples:
+ >>> main() # doctest: +NORMALIZE_WHITESPACE
+ Path from 0 to 11: [0, 1, 3, 7, 11]
+ Path from 5 to 5: [5]
+ Path from 0 to 3: None
+ """
# Example graph represented as an adjacency list
example_graph = {
0: [1, 2],
@@ -137,4 +198,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/bidirectional_search.py |
Create documentation for each function signature |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
N_POPULATION = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
N_SELECTED = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
MUTATION_PROBABILITY = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def evaluate(item: str, main_target: str) -> tuple[str, float]:
score = len([g for position, g in enumerate(item) if g == main_target[position]])
return (item, float(score))
def crossover(parent_1: str, parent_2: str) -> tuple[str, str]:
random_slice = random.randint(0, len(parent_1) - 1)
child_1 = parent_1[:random_slice] + parent_2[random_slice:]
child_2 = parent_2[:random_slice] + parent_1[random_slice:]
return (child_1, child_2)
def mutate(child: str, genes: list[str]) -> str:
child_list = list(child)
if random.uniform(0, 1) < MUTATION_PROBABILITY:
child_list[random.randint(0, len(child)) - 1] = random.choice(genes)
return "".join(child_list)
# Select, crossover and mutate a new population.
def select(
parent_1: tuple[str, float],
population_score: list[tuple[str, float]],
genes: list[str],
) -> list[str]:
pop = []
# Generate more children proportionally to the fitness score.
child_n = int(parent_1[1] * 100) + 1
child_n = 10 if child_n >= 10 else child_n
for _ in range(child_n):
parent_2 = population_score[random.randint(0, N_SELECTED)][0]
child_1, child_2 = crossover(parent_1[0], parent_2)
# Append new string to the population list.
pop.append(mutate(child_1, genes))
pop.append(mutate(child_2, genes))
return pop
def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
msg = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(msg)
# Verify that the target contains no genes besides the ones inside genes variable.
not_in_genes_list = sorted({c for c in target if c not in genes})
if not_in_genes_list:
msg = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(msg)
# Generate random starting population.
population = []
for _ in range(N_POPULATION):
population.append("".join([random.choice(genes) for i in range(len(target))]))
# Just some logs to know what the algorithms is doing.
generation, total_population = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(population)
# Random population created. Now it's time to evaluate.
# (Option 1) Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item, target) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# (Option 2) We just need to call evaluate for every item inside the population.
population_score = [evaluate(item, target) for item in population]
# Check if there is a matching evolution.
population_score = sorted(population_score, key=lambda x: x[1], reverse=True)
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}"
)
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
population_best = population[: int(N_POPULATION / 3)]
population.clear()
population.extend(population_best)
# Normalize population score to be between 0 and 1.
population_score = [
(item, score / len(target)) for item, score in population_score
]
# This is selection
for i in range(N_SELECTED):
population.extend(select(population_score[int(i)], population_score, genes))
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(population) > N_POPULATION:
break
if __name__ == "__main__":
target_str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
genes_list = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
generation, population, target = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
) | --- +++ @@ -1,3 +1,9 @@+"""
+Simple multithreaded algorithm to show how the 4 phases of a genetic algorithm works
+(Evaluation, Selection, Crossover and Mutation)
+https://en.wikipedia.org/wiki/Genetic_algorithm
+Author: D4rkia
+"""
from __future__ import annotations
@@ -16,11 +22,23 @@
def evaluate(item: str, main_target: str) -> tuple[str, float]:
+ """
+ Evaluate how similar the item is with the target by just
+ counting each char in the right position
+ >>> evaluate("Helxo Worlx", "Hello World")
+ ('Helxo Worlx', 9.0)
+ """
score = len([g for position, g in enumerate(item) if g == main_target[position]])
return (item, float(score))
def crossover(parent_1: str, parent_2: str) -> tuple[str, str]:
+ """
+ Slice and combine two strings at a random point.
+ >>> random.seed(42)
+ >>> crossover("123456", "abcdef")
+ ('12345f', 'abcde6')
+ """
random_slice = random.randint(0, len(parent_1) - 1)
child_1 = parent_1[:random_slice] + parent_2[random_slice:]
child_2 = parent_2[:random_slice] + parent_1[random_slice:]
@@ -28,6 +46,12 @@
def mutate(child: str, genes: list[str]) -> str:
+ """
+ Mutate a random gene of a child with another one from the list.
+ >>> random.seed(123)
+ >>> mutate("123456", list("ABCDEF"))
+ '12345A'
+ """
child_list = list(child)
if random.uniform(0, 1) < MUTATION_PROBABILITY:
child_list[random.randint(0, len(child)) - 1] = random.choice(genes)
@@ -40,6 +64,22 @@ population_score: list[tuple[str, float]],
genes: list[str],
) -> list[str]:
+ """
+ Select the second parent and generate new population
+
+ >>> random.seed(42)
+ >>> parent_1 = ("123456", 8.0)
+ >>> population_score = [("abcdef", 4.0), ("ghijkl", 5.0), ("mnopqr", 7.0)]
+ >>> genes = list("ABCDEF")
+ >>> child_n = int(min(parent_1[1] + 1, 10))
+ >>> population = []
+ >>> for _ in range(child_n):
+ ... parent_2 = population_score[random.randrange(len(population_score))][0]
+ ... child_1, child_2 = crossover(parent_1[0], parent_2)
+ ... population.extend((mutate(child_1, genes), mutate(child_2, genes)))
+ >>> len(population) == (int(parent_1[1]) + 1) * 2
+ True
+ """
pop = []
# Generate more children proportionally to the fitness score.
child_n = int(parent_1[1] * 100) + 1
@@ -55,6 +95,29 @@
def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]:
+ """
+ Verify that the target contains no genes besides the ones inside genes variable.
+
+ >>> from string import ascii_lowercase
+ >>> basic("doctest", ascii_lowercase, debug=False)[2]
+ 'doctest'
+ >>> genes = list(ascii_lowercase)
+ >>> genes.remove("e")
+ >>> basic("test", genes)
+ Traceback (most recent call last):
+ ...
+ ValueError: ['e'] is not in genes list, evolution cannot converge
+ >>> genes.remove("s")
+ >>> basic("test", genes)
+ Traceback (most recent call last):
+ ...
+ ValueError: ['e', 's'] is not in genes list, evolution cannot converge
+ >>> genes.remove("t")
+ >>> basic("test", genes)
+ Traceback (most recent call last):
+ ...
+ ValueError: ['e', 's', 't'] is not in genes list, evolution cannot converge
+ """
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
@@ -142,4 +205,4 @@ generation, population, target = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
- )+ )
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/genetic_algorithm/basic_string.py |
Document my Python code with docstrings |
from __future__ import annotations
from dataclasses import dataclass
import matplotlib.pyplot as plt
import numpy as np
@dataclass
class FuzzySet:
name: str
left_boundary: float
peak: float
right_boundary: float
def __str__(self) -> str:
return (
f"{self.name}: [{self.left_boundary}, {self.peak}, {self.right_boundary}]"
)
def complement(self) -> FuzzySet:
return FuzzySet(
f"¬{self.name}",
1 - self.right_boundary,
1 - self.left_boundary,
1 - self.peak,
)
def intersection(self, other) -> FuzzySet:
return FuzzySet(
f"{self.name} ∩ {other.name}",
max(self.left_boundary, other.left_boundary),
min(self.right_boundary, other.right_boundary),
(self.peak + other.peak) / 2,
)
def membership(self, x: float) -> float:
if x <= self.left_boundary or x >= self.right_boundary:
return 0.0
elif self.left_boundary < x <= self.peak:
return (x - self.left_boundary) / (self.peak - self.left_boundary)
elif self.peak < x < self.right_boundary:
return (self.right_boundary - x) / (self.right_boundary - self.peak)
msg = f"Invalid value {x} for fuzzy set {self}"
raise ValueError(msg)
def union(self, other) -> FuzzySet:
return FuzzySet(
f"{self.name} U {other.name}",
min(self.left_boundary, other.left_boundary),
max(self.right_boundary, other.right_boundary),
(self.peak + other.peak) / 2,
)
def plot(self):
x = np.linspace(0, 1, 1000)
y = [self.membership(xi) for xi in x]
plt.plot(x, y, label=self.name)
if __name__ == "__main__":
from doctest import testmod
testmod()
a = FuzzySet("A", 0, 0.5, 1)
b = FuzzySet("B", 0.2, 0.7, 1)
a.plot()
b.plot()
plt.xlabel("x")
plt.ylabel("Membership")
plt.legend()
plt.show()
union_ab = a.union(b)
intersection_ab = a.intersection(b)
complement_a = a.complement()
union_ab.plot()
intersection_ab.plot()
complement_a.plot()
plt.xlabel("x")
plt.ylabel("Membership")
plt.legend()
plt.show() | --- +++ @@ -1,3 +1,8 @@+"""
+By @Shreya123714
+
+https://en.wikipedia.org/wiki/Fuzzy_set
+"""
from __future__ import annotations
@@ -9,6 +14,51 @@
@dataclass
class FuzzySet:
+ """
+ A class for representing and manipulating triangular fuzzy sets.
+ Attributes:
+ name: The name or label of the fuzzy set.
+ left_boundary: The left boundary of the fuzzy set.
+ peak: The peak (central) value of the fuzzy set.
+ right_boundary: The right boundary of the fuzzy set.
+ Methods:
+ membership(x): Calculate the membership value of an input 'x' in the fuzzy set.
+ union(other): Calculate the union of this fuzzy set with another fuzzy set.
+ intersection(other): Calculate the intersection of this fuzzy set with another.
+ complement(): Calculate the complement (negation) of this fuzzy set.
+ plot(): Plot the membership function of the fuzzy set.
+
+ >>> sheru = FuzzySet("Sheru", 0.4, 1, 0.6)
+ >>> sheru
+ FuzzySet(name='Sheru', left_boundary=0.4, peak=1, right_boundary=0.6)
+ >>> str(sheru)
+ 'Sheru: [0.4, 1, 0.6]'
+
+ >>> siya = FuzzySet("Siya", 0.5, 1, 0.7)
+ >>> siya
+ FuzzySet(name='Siya', left_boundary=0.5, peak=1, right_boundary=0.7)
+
+ # Complement Operation
+ >>> sheru.complement()
+ FuzzySet(name='¬Sheru', left_boundary=0.4, peak=0.6, right_boundary=0)
+ >>> siya.complement() # doctest: +NORMALIZE_WHITESPACE
+ FuzzySet(name='¬Siya', left_boundary=0.30000000000000004, peak=0.5,
+ right_boundary=0)
+
+ # Intersection Operation
+ >>> siya.intersection(sheru)
+ FuzzySet(name='Siya ∩ Sheru', left_boundary=0.5, peak=0.6, right_boundary=1.0)
+
+ # Membership Operation
+ >>> sheru.membership(0.5)
+ 0.16666666666666663
+ >>> sheru.membership(0.6)
+ 0.0
+
+ # Union Operations
+ >>> siya.union(sheru)
+ FuzzySet(name='Siya U Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0)
+ """
name: str
left_boundary: float
@@ -16,11 +66,23 @@ right_boundary: float
def __str__(self) -> str:
+ """
+ >>> FuzzySet("fuzzy_set", 0.1, 0.2, 0.3)
+ FuzzySet(name='fuzzy_set', left_boundary=0.1, peak=0.2, right_boundary=0.3)
+ """
return (
f"{self.name}: [{self.left_boundary}, {self.peak}, {self.right_boundary}]"
)
def complement(self) -> FuzzySet:
+ """
+ Calculate the complement (negation) of this fuzzy set.
+ Returns:
+ FuzzySet: A new fuzzy set representing the complement.
+
+ >>> FuzzySet("fuzzy_set", 0.1, 0.2, 0.3).complement()
+ FuzzySet(name='¬fuzzy_set', left_boundary=0.7, peak=0.9, right_boundary=0.8)
+ """
return FuzzySet(
f"¬{self.name}",
1 - self.right_boundary,
@@ -29,6 +91,17 @@ )
def intersection(self, other) -> FuzzySet:
+ """
+ Calculate the intersection of this fuzzy set
+ with another fuzzy set.
+ Args:
+ other: Another fuzzy set to intersect with.
+ Returns:
+ A new fuzzy set representing the intersection.
+
+ >>> FuzzySet("a", 0.1, 0.2, 0.3).intersection(FuzzySet("b", 0.4, 0.5, 0.6))
+ FuzzySet(name='a ∩ b', left_boundary=0.4, peak=0.3, right_boundary=0.35)
+ """
return FuzzySet(
f"{self.name} ∩ {other.name}",
max(self.left_boundary, other.left_boundary),
@@ -37,6 +110,25 @@ )
def membership(self, x: float) -> float:
+ """
+ Calculate the membership value of an input 'x' in the fuzzy set.
+ Returns:
+ The membership value of 'x' in the fuzzy set.
+
+ >>> a = FuzzySet("a", 0.1, 0.2, 0.3)
+ >>> a.membership(0.09)
+ 0.0
+ >>> a.membership(0.1)
+ 0.0
+ >>> a.membership(0.11)
+ 0.09999999999999995
+ >>> a.membership(0.4)
+ 0.0
+ >>> FuzzySet("A", 0, 0.5, 1).membership(0.1)
+ 0.2
+ >>> FuzzySet("B", 0.2, 0.7, 1).membership(0.6)
+ 0.8
+ """
if x <= self.left_boundary or x >= self.right_boundary:
return 0.0
elif self.left_boundary < x <= self.peak:
@@ -47,6 +139,16 @@ raise ValueError(msg)
def union(self, other) -> FuzzySet:
+ """
+ Calculate the union of this fuzzy set with another fuzzy set.
+ Args:
+ other (FuzzySet): Another fuzzy set to union with.
+ Returns:
+ FuzzySet: A new fuzzy set representing the union.
+
+ >>> FuzzySet("a", 0.1, 0.2, 0.3).union(FuzzySet("b", 0.4, 0.5, 0.6))
+ FuzzySet(name='a U b', left_boundary=0.1, peak=0.6, right_boundary=0.35)
+ """
return FuzzySet(
f"{self.name} U {other.name}",
min(self.left_boundary, other.left_boundary),
@@ -55,6 +157,9 @@ )
def plot(self):
+ """
+ Plot the membership function of the fuzzy set.
+ """
x = np.linspace(0, 1, 1000)
y = [self.membership(xi) for xi in x]
@@ -87,4 +192,4 @@ plt.xlabel("x")
plt.ylabel("Membership")
plt.legend()
- plt.show()+ plt.show()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/fuzzy_logic/fuzzy_operations.py |
Help me comply with documentation standards |
from __future__ import annotations
class Point:
def __init__(self, x_coordinate: float, y_coordinate: float) -> None:
self.x = x_coordinate
self.y = y_coordinate
def __eq__(self, other: object) -> bool:
if not isinstance(other, Point):
return NotImplemented
return self.x == other.x and self.y == other.y
def __repr__(self) -> str:
return f"Point({self.x}, {self.y})"
def __hash__(self) -> int:
return hash((self.x, self.y))
def _cross_product(origin: Point, point_a: Point, point_b: Point) -> float:
return (point_a.x - origin.x) * (point_b.y - origin.y) - (point_a.y - origin.y) * (
point_b.x - origin.x
)
def _is_point_on_segment(p1: Point, p2: Point, point: Point) -> bool:
# Check if point is collinear with segment endpoints
cross = (point.y - p1.y) * (p2.x - p1.x) - (point.x - p1.x) * (p2.y - p1.y)
if abs(cross) > 1e-9:
return False
# Check if point is within the bounding box of the segment
return min(p1.x, p2.x) <= point.x <= max(p1.x, p2.x) and min(
p1.y, p2.y
) <= point.y <= max(p1.y, p2.y)
def _find_leftmost_point(points: list[Point]) -> int:
left_idx = 0
for i in range(1, len(points)):
if points[i].x < points[left_idx].x or (
points[i].x == points[left_idx].x and points[i].y < points[left_idx].y
):
left_idx = i
return left_idx
def _find_next_hull_point(points: list[Point], current_idx: int) -> int:
next_idx = (current_idx + 1) % len(points)
# Ensure next_idx is not the same as current_idx
while next_idx == current_idx:
next_idx = (next_idx + 1) % len(points)
for i in range(len(points)):
if i == current_idx:
continue
cross = _cross_product(points[current_idx], points[i], points[next_idx])
if cross > 0:
next_idx = i
return next_idx
def _is_valid_polygon(hull: list[Point]) -> bool:
for i in range(len(hull)):
p1 = hull[i]
p2 = hull[(i + 1) % len(hull)]
p3 = hull[(i + 2) % len(hull)]
if abs(_cross_product(p1, p2, p3)) > 1e-9:
return True
return False
def _add_point_to_hull(hull: list[Point], point: Point) -> None:
last = len(hull) - 1
if len(hull) > 1 and _is_point_on_segment(hull[last - 1], hull[last], point):
hull[last] = Point(point.x, point.y)
else:
hull.append(Point(point.x, point.y))
def jarvis_march(points: list[Point]) -> list[Point]:
if len(points) <= 2:
return []
# Remove duplicate points to avoid infinite loops
unique_points = list(set(points))
if len(unique_points) <= 2:
return []
convex_hull: list[Point] = []
# Find the leftmost point
left_point_idx = _find_leftmost_point(unique_points)
convex_hull.append(
Point(unique_points[left_point_idx].x, unique_points[left_point_idx].y)
)
current_idx = left_point_idx
while True:
# Find the next counter-clockwise point
next_idx = _find_next_hull_point(unique_points, current_idx)
if next_idx == left_point_idx:
break
if next_idx == current_idx:
break
current_idx = next_idx
_add_point_to_hull(convex_hull, unique_points[current_idx])
# Check for degenerate cases
if len(convex_hull) <= 2:
return []
# Check if last point is collinear with first and second-to-last
last = len(convex_hull) - 1
if _is_point_on_segment(convex_hull[last - 1], convex_hull[last], convex_hull[0]):
convex_hull.pop()
if len(convex_hull) == 2:
return []
# Verify the hull forms a valid polygon
if not _is_valid_polygon(convex_hull):
return []
return convex_hull
if __name__ == "__main__":
# Example usage
points = [Point(0, 0), Point(1, 1), Point(0, 1), Point(1, 0), Point(0.5, 0.5)]
hull = jarvis_march(points)
print(f"Convex hull: {hull}") | --- +++ @@ -1,8 +1,29 @@+"""
+Jarvis March (Gift Wrapping) algorithm for finding the convex hull of a set of points.
+
+The convex hull is the smallest convex polygon that contains all the points.
+
+Time Complexity: O(n*h) where n is the number of points and h is the number of
+hull points.
+Space Complexity: O(h) where h is the number of hull points.
+
+USAGE:
+ -> Import this file into your project.
+ -> Use the jarvis_march() function to find the convex hull of a set of points.
+ -> Parameters:
+ -> points: A list of Point objects representing 2D coordinates
+
+REFERENCES:
+ -> Wikipedia reference: https://en.wikipedia.org/wiki/Gift_wrapping_algorithm
+ -> GeeksforGeeks:
+ https://www.geeksforgeeks.org/convex-hull-set-1-jarviss-algorithm-or-wrapping/
+"""
from __future__ import annotations
class Point:
+ """Represents a 2D point with x and y coordinates."""
def __init__(self, x_coordinate: float, y_coordinate: float) -> None:
self.x = x_coordinate
@@ -21,12 +42,21 @@
def _cross_product(origin: Point, point_a: Point, point_b: Point) -> float:
+ """
+ Calculate the cross product of vectors OA and OB.
+
+ Returns:
+ > 0: Counter-clockwise turn (left turn)
+ = 0: Collinear
+ < 0: Clockwise turn (right turn)
+ """
return (point_a.x - origin.x) * (point_b.y - origin.y) - (point_a.y - origin.y) * (
point_b.x - origin.x
)
def _is_point_on_segment(p1: Point, p2: Point, point: Point) -> bool:
+ """Check if a point lies on the line segment between p1 and p2."""
# Check if point is collinear with segment endpoints
cross = (point.y - p1.y) * (p2.x - p1.x) - (point.x - p1.x) * (p2.y - p1.y)
@@ -40,6 +70,7 @@
def _find_leftmost_point(points: list[Point]) -> int:
+ """Find index of leftmost point (and bottom-most in case of tie)."""
left_idx = 0
for i in range(1, len(points)):
if points[i].x < points[left_idx].x or (
@@ -50,6 +81,7 @@
def _find_next_hull_point(points: list[Point], current_idx: int) -> int:
+ """Find the next point on the convex hull."""
next_idx = (current_idx + 1) % len(points)
# Ensure next_idx is not the same as current_idx
while next_idx == current_idx:
@@ -66,6 +98,7 @@
def _is_valid_polygon(hull: list[Point]) -> bool:
+ """Check if hull forms a valid polygon (has at least one non-collinear turn)."""
for i in range(len(hull)):
p1 = hull[i]
p2 = hull[(i + 1) % len(hull)]
@@ -76,6 +109,7 @@
def _add_point_to_hull(hull: list[Point], point: Point) -> None:
+ """Add a point to hull, removing collinear intermediate points."""
last = len(hull) - 1
if len(hull) > 1 and _is_point_on_segment(hull[last - 1], hull[last], point):
hull[last] = Point(point.x, point.y)
@@ -84,6 +118,19 @@
def jarvis_march(points: list[Point]) -> list[Point]:
+ """
+ Find the convex hull of a set of points using the Jarvis March algorithm.
+
+ The algorithm starts with the leftmost point and wraps around the set of
+ points, selecting the most counter-clockwise point at each step.
+
+ Args:
+ points: List of Point objects representing 2D coordinates
+
+ Returns:
+ List of Points that form the convex hull in counter-clockwise order.
+ Returns empty list if there are fewer than 3 non-collinear points.
+ """
if len(points) <= 2:
return []
@@ -137,4 +184,4 @@ # Example usage
points = [Point(0, 0), Point(1, 1), Point(0, 1), Point(1, 0), Point(0.5, 0.5)]
hull = jarvis_march(points)
- print(f"Convex hull: {hull}")+ print(f"Convex hull: {hull}")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/geometry/jarvis_march.py |
Write docstrings describing each step |
from __future__ import annotations
from collections.abc import Sequence
from dataclasses import dataclass
from typing import TypeVar
T = TypeVar("T", bound="Point")
@dataclass
class Point:
x: float
y: float
def __init__(self, x_coordinate: float, y_coordinate: float) -> None:
self.x = float(x_coordinate)
self.y = float(y_coordinate)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Point):
return NotImplemented
return self.x == other.x and self.y == other.y
def __lt__(self, other: Point) -> bool:
if self.y == other.y:
return self.x < other.x
return self.y < other.y
def euclidean_distance(self, other: Point) -> float:
return ((self.x - other.x) ** 2 + (self.y - other.y) ** 2) ** 0.5
def consecutive_orientation(self, point_a: Point, point_b: Point) -> float:
return (point_a.x - self.x) * (point_b.y - point_a.y) - (point_a.y - self.y) * (
point_b.x - point_a.x
)
def graham_scan(points: Sequence[Point]) -> list[Point]:
if len(points) <= 2:
return []
# Find the bottom-most point (left-most in case of tie)
min_point = min(points)
# Remove the min_point from the list
points_list = [p for p in points if p != min_point]
if not points_list:
# Edge case where all points are the same
return []
def polar_angle_key(point: Point) -> tuple[float, float, float]:
# We use a dummy third point (min_point itself) to calculate relative angles
# Instead, we'll compute the angle between points
dx = point.x - min_point.x
dy = point.y - min_point.y
# Use atan2 for angle, but we can also use cross product for comparison
# For sorting, we compare orientations between consecutive points
distance = min_point.euclidean_distance(point)
return (dx, dy, -distance) # Negative distance to sort farther points first
# Sort by polar angle using a comparison based on cross product
def compare_points(point_a: Point, point_b: Point) -> int:
orientation = min_point.consecutive_orientation(point_a, point_b)
if orientation < 0.0:
return 1 # point_a comes after point_b (clockwise)
elif orientation > 0.0:
return -1 # point_a comes before point_b (counter-clockwise)
else:
# Collinear: farther point should come first
dist_a = min_point.euclidean_distance(point_a)
dist_b = min_point.euclidean_distance(point_b)
if dist_b < dist_a:
return -1
elif dist_b > dist_a:
return 1
else:
return 0
from functools import cmp_to_key
points_list.sort(key=cmp_to_key(compare_points))
# Build the convex hull
convex_hull: list[Point] = [min_point, points_list[0]]
for point in points_list[1:]:
# Skip consecutive points with the same angle (collinear with min_point)
if min_point.consecutive_orientation(point, convex_hull[-1]) == 0.0:
continue
# Remove points that create a clockwise turn (or are collinear)
while len(convex_hull) >= 2:
orientation = convex_hull[-2].consecutive_orientation(
convex_hull[-1], point
)
if orientation <= 0.0:
convex_hull.pop()
else:
break
convex_hull.append(point)
# Need at least 3 points for a valid convex hull
if len(convex_hull) <= 2:
return []
return convex_hull
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example usage
points = [
Point(0, 0),
Point(1, 0),
Point(2, 0),
Point(2, 1),
Point(2, 2),
Point(1, 2),
Point(0, 2),
Point(0, 1),
Point(1, 1), # Interior point
]
hull = graham_scan(points)
print("Convex hull vertices:")
for point in hull:
print(f" ({point.x}, {point.y})") | --- +++ @@ -1,3 +1,19 @@+"""
+Graham Scan algorithm for finding the convex hull of a set of points.
+
+The Graham scan is a method of computing the convex hull of a finite set of points
+in the plane with time complexity O(n log n). It is named after Ronald Graham, who
+published the original algorithm in 1972.
+
+The algorithm finds all vertices of the convex hull ordered along its boundary.
+It uses a stack to efficiently identify and remove points that would create
+non-convex angles.
+
+References:
+- https://en.wikipedia.org/wiki/Graham_scan
+- Graham, R.L. (1972). "An Efficient Algorithm for Determining the Convex Hull of a
+ Finite Planar Set"
+"""
from __future__ import annotations
@@ -10,34 +26,123 @@
@dataclass
class Point:
+ """
+ A point in 2D space.
+
+ >>> Point(0, 0)
+ Point(x=0.0, y=0.0)
+ >>> Point(1.5, 2.5)
+ Point(x=1.5, y=2.5)
+ """
x: float
y: float
def __init__(self, x_coordinate: float, y_coordinate: float) -> None:
+ """
+ Initialize a 2D point.
+
+ Args:
+ x_coordinate: The x-coordinate (horizontal position) of the point
+ y_coordinate: The y-coordinate (vertical position) of the point
+ """
self.x = float(x_coordinate)
self.y = float(y_coordinate)
def __eq__(self, other: object) -> bool:
+ """
+ Check if two points are equal.
+
+ >>> Point(1, 2) == Point(1, 2)
+ True
+ >>> Point(1, 2) == Point(2, 1)
+ False
+ """
if not isinstance(other, Point):
return NotImplemented
return self.x == other.x and self.y == other.y
def __lt__(self, other: Point) -> bool:
+ """
+ Compare two points for sorting (bottom-most, then left-most).
+
+ >>> Point(1, 2) < Point(1, 3)
+ True
+ >>> Point(1, 2) < Point(2, 2)
+ True
+ >>> Point(2, 2) < Point(1, 2)
+ False
+ """
if self.y == other.y:
return self.x < other.x
return self.y < other.y
def euclidean_distance(self, other: Point) -> float:
+ """
+ Calculate Euclidean distance between two points.
+
+ >>> Point(0, 0).euclidean_distance(Point(3, 4))
+ 5.0
+ >>> Point(1, 1).euclidean_distance(Point(4, 5))
+ 5.0
+ """
return ((self.x - other.x) ** 2 + (self.y - other.y) ** 2) ** 0.5
def consecutive_orientation(self, point_a: Point, point_b: Point) -> float:
+ """
+ Calculate the cross product of vectors (self -> point_a) and
+ (point_a -> point_b).
+
+ Returns:
+ - Positive value: counter-clockwise turn
+ - Negative value: clockwise turn
+ - Zero: collinear points
+
+ >>> Point(0, 0).consecutive_orientation(Point(1, 0), Point(1, 1))
+ 1.0
+ >>> Point(0, 0).consecutive_orientation(Point(1, 0), Point(1, -1))
+ -1.0
+ >>> Point(0, 0).consecutive_orientation(Point(1, 0), Point(2, 0))
+ 0.0
+ """
return (point_a.x - self.x) * (point_b.y - point_a.y) - (point_a.y - self.y) * (
point_b.x - point_a.x
)
def graham_scan(points: Sequence[Point]) -> list[Point]:
+ """
+ Find the convex hull of a set of points using the Graham scan algorithm.
+
+ The algorithm works as follows:
+ 1. Find the bottom-most point (or left-most in case of tie)
+ 2. Sort all other points by polar angle with respect to the bottom-most point
+ 3. Process points in order, maintaining a stack of hull candidates
+ 4. Remove points that would create a clockwise turn
+
+ Args:
+ points: A sequence of Point objects
+
+ Returns:
+ A list of Point objects representing the convex hull in counter-clockwise order.
+ Returns an empty list if there are fewer than 3 distinct points or if all
+ points are collinear.
+
+ Time Complexity: O(n log n) due to sorting
+ Space Complexity: O(n) for the output hull
+
+ >>> graham_scan([])
+ []
+ >>> graham_scan([Point(0, 0)])
+ []
+ >>> graham_scan([Point(0, 0), Point(1, 1)])
+ []
+ >>> hull = graham_scan([Point(0, 0), Point(1, 0), Point(0.5, 1)])
+ >>> len(hull)
+ 3
+ >>> Point(0, 0) in hull and Point(1, 0) in hull and Point(0.5, 1) in hull
+ True
+ """
if len(points) <= 2:
return []
@@ -51,6 +156,12 @@ return []
def polar_angle_key(point: Point) -> tuple[float, float, float]:
+ """
+ Key function for sorting points by polar angle relative to min_point.
+
+ Points are sorted counter-clockwise. When two points have the same angle,
+ the farther point comes first (we'll remove duplicates later).
+ """
# We use a dummy third point (min_point itself) to calculate relative angles
# Instead, we'll compute the angle between points
dx = point.x - min_point.x
@@ -63,6 +174,7 @@
# Sort by polar angle using a comparison based on cross product
def compare_points(point_a: Point, point_b: Point) -> int:
+ """Compare two points by polar angle relative to min_point."""
orientation = min_point.consecutive_orientation(point_a, point_b)
if orientation < 0.0:
return 1 # point_a comes after point_b (clockwise)
@@ -131,4 +243,4 @@ hull = graham_scan(points)
print("Convex hull vertices:")
for point in hull:
- print(f" ({point.x}, {point.y})")+ print(f" ({point.x}, {point.y})")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/geometry/graham_scan.py |
Add docstrings to improve collaboration |
from __future__ import annotations
import math
__version__ = "2020.9.26"
__author__ = "xcodz-dot, cclaus, dhruvmanila"
def convert_to_2d(
x: float, y: float, z: float, scale: float, distance: float
) -> tuple[float, float]:
if not all(isinstance(val, (float, int)) for val in locals().values()):
msg = f"Input values must either be float or int: {list(locals().values())}"
raise TypeError(msg)
projected_x = ((x * distance) / (z + distance)) * scale
projected_y = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def rotate(
x: float, y: float, z: float, axis: str, angle: float
) -> tuple[float, float, float]:
if not isinstance(axis, str):
raise TypeError("Axis must be a str")
input_variables = locals()
del input_variables["axis"]
if not all(isinstance(val, (float, int)) for val in input_variables.values()):
msg = (
"Input values except axis must either be float or int: "
f"{list(input_variables.values())}"
)
raise TypeError(msg)
angle = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
new_x = x * math.cos(angle) - y * math.sin(angle)
new_y = y * math.cos(angle) + x * math.sin(angle)
new_z = z
elif axis == "x":
new_y = y * math.cos(angle) - z * math.sin(angle)
new_z = z * math.cos(angle) + y * math.sin(angle)
new_x = x
elif axis == "y":
new_x = x * math.cos(angle) - z * math.sin(angle)
new_z = z * math.cos(angle) + x * math.sin(angle)
new_y = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }") | --- +++ @@ -1,3 +1,6 @@+"""
+render 3d points for 2d surfaces.
+"""
from __future__ import annotations
@@ -10,6 +13,20 @@ def convert_to_2d(
x: float, y: float, z: float, scale: float, distance: float
) -> tuple[float, float]:
+ """
+ Converts 3d point to a 2d drawable point
+
+ >>> convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0)
+ (7.6923076923076925, 15.384615384615385)
+
+ >>> convert_to_2d(1, 2, 3, 10, 10)
+ (7.6923076923076925, 15.384615384615385)
+
+ >>> convert_to_2d("1", 2, 3, 10, 10) # '1' is str
+ Traceback (most recent call last):
+ ...
+ TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10]
+ """
if not all(isinstance(val, (float, int)) for val in locals().values()):
msg = f"Input values must either be float or int: {list(locals().values())}"
raise TypeError(msg)
@@ -21,6 +38,33 @@ def rotate(
x: float, y: float, z: float, axis: str, angle: float
) -> tuple[float, float, float]:
+ """
+ rotate a point around a certain axis with a certain angle
+ angle can be any integer between 1, 360 and axis can be any one of
+ 'x', 'y', 'z'
+
+ >>> rotate(1.0, 2.0, 3.0, 'y', 90.0)
+ (3.130524675073759, 2.0, 0.4470070007889556)
+
+ >>> rotate(1, 2, 3, "z", 180)
+ (0.999736015495891, -2.0001319704760485, 3)
+
+ >>> rotate('1', 2, 3, "z", 90.0) # '1' is str
+ Traceback (most recent call last):
+ ...
+ TypeError: Input values except axis must either be float or int: ['1', 2, 3, 90.0]
+
+ >>> rotate(1, 2, 3, "n", 90) # 'n' is not a valid axis
+ Traceback (most recent call last):
+ ...
+ ValueError: not a valid axis, choose one of 'x', 'y', 'z'
+
+ >>> rotate(1, 2, 3, "x", -90)
+ (1, -2.5049096187183877, -2.5933429780983657)
+
+ >>> rotate(1, 2, 3, "x", 450) # 450 wrap around to 90
+ (1, 3.5776792428178217, -0.44744970165427644)
+ """
if not isinstance(axis, str):
raise TypeError("Axis must be a str")
input_variables = locals()
@@ -55,4 +99,4 @@
doctest.testmod()
print(f"{convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0) = }")
- print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")+ print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphics/vector3_for_2d_rendering.py |
Add docstrings explaining edge cases |
import copy
import random
cities = {
0: [0, 0],
1: [0, 5],
2: [3, 8],
3: [8, 10],
4: [12, 8],
5: [12, 4],
6: [8, 0],
7: [6, 2],
}
def main(
cities: dict[int, list[int]],
ants_num: int,
iterations_num: int,
pheromone_evaporation: float,
alpha: float,
beta: float,
q: float, # Pheromone system parameters Q, which is a constant
) -> tuple[list[int], float]:
# Initialize the pheromone matrix
cities_num = len(cities)
pheromone = [[1.0] * cities_num] * cities_num
best_path: list[int] = []
best_distance = float("inf")
for _ in range(iterations_num):
ants_route = []
for _ in range(ants_num):
unvisited_cities = copy.deepcopy(cities)
current_city = {next(iter(cities.keys())): next(iter(cities.values()))}
del unvisited_cities[next(iter(current_city.keys()))]
ant_route = [next(iter(current_city.keys()))]
while unvisited_cities:
current_city, unvisited_cities = city_select(
pheromone, current_city, unvisited_cities, alpha, beta
)
ant_route.append(next(iter(current_city.keys())))
ant_route.append(0)
ants_route.append(ant_route)
pheromone, best_path, best_distance = pheromone_update(
pheromone,
cities,
pheromone_evaporation,
ants_route,
q,
best_path,
best_distance,
)
return best_path, best_distance
def distance(city1: list[int], city2: list[int]) -> float:
return (((city1[0] - city2[0]) ** 2) + ((city1[1] - city2[1]) ** 2)) ** 0.5
def pheromone_update(
pheromone: list[list[float]],
cities: dict[int, list[int]],
pheromone_evaporation: float,
ants_route: list[list[int]],
q: float, # Pheromone system parameters Q, which is a constant
best_path: list[int],
best_distance: float,
) -> tuple[list[list[float]], list[int], float]:
for a in range(len(cities)): # Update the volatilization of pheromone on all routes
for b in range(len(cities)):
pheromone[a][b] *= pheromone_evaporation
for ant_route in ants_route:
total_distance = 0.0
for i in range(len(ant_route) - 1): # Calculate total distance
total_distance += distance(cities[ant_route[i]], cities[ant_route[i + 1]])
delta_pheromone = q / total_distance
for i in range(len(ant_route) - 1): # Update pheromones
pheromone[ant_route[i]][ant_route[i + 1]] += delta_pheromone
pheromone[ant_route[i + 1]][ant_route[i]] = pheromone[ant_route[i]][
ant_route[i + 1]
]
if total_distance < best_distance:
best_path = ant_route
best_distance = total_distance
return pheromone, best_path, best_distance
def city_select(
pheromone: list[list[float]],
current_city: dict[int, list[int]],
unvisited_cities: dict[int, list[int]],
alpha: float,
beta: float,
) -> tuple[dict[int, list[int]], dict[int, list[int]]]:
probabilities = []
for city, value in unvisited_cities.items():
city_distance = distance(value, next(iter(current_city.values())))
probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * (
(1 / city_distance) ** beta
)
probabilities.append(probability)
chosen_city_i = random.choices(
list(unvisited_cities.keys()), weights=probabilities
)[0]
chosen_city = {chosen_city_i: unvisited_cities[chosen_city_i]}
del unvisited_cities[next(iter(chosen_city.keys()))]
return chosen_city, unvisited_cities
if __name__ == "__main__":
best_path, best_distance = main(
cities=cities,
ants_num=10,
iterations_num=20,
pheromone_evaporation=0.7,
alpha=1.0,
beta=5.0,
q=10,
)
print(f"{best_path = }")
print(f"{best_distance = }") | --- +++ @@ -1,3 +1,15 @@+"""
+Use an ant colony optimization algorithm to solve the travelling salesman problem (TSP)
+which asks the following question:
+"Given a list of cities and the distances between each pair of cities, what is the
+ shortest possible route that visits each city exactly once and returns to the origin
+ city?"
+
+https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms
+https://en.wikipedia.org/wiki/Travelling_salesman_problem
+
+Author: Clark
+"""
import copy
import random
@@ -23,6 +35,37 @@ beta: float,
q: float, # Pheromone system parameters Q, which is a constant
) -> tuple[list[int], float]:
+ """
+ Ant colony algorithm main function
+ >>> main(cities=cities, ants_num=10, iterations_num=20,
+ ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10)
+ ([0, 1, 2, 3, 4, 5, 6, 7, 0], 37.909778143828696)
+ >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5,
+ ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10)
+ ([0, 1, 0], 5.656854249492381)
+ >>> main(cities={0: [0, 0], 1: [2, 2], 4: [4, 4]}, ants_num=5, iterations_num=5,
+ ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> main(cities={}, ants_num=5, iterations_num=5,
+ ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=0, iterations_num=5,
+ ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10)
+ ([], inf)
+ >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=0,
+ ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10)
+ ([], inf)
+ >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5,
+ ... pheromone_evaporation=1, alpha=1.0, beta=5.0, q=10)
+ ([0, 1, 0], 5.656854249492381)
+ >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5,
+ ... pheromone_evaporation=0, alpha=1.0, beta=5.0, q=10)
+ ([0, 1, 0], 5.656854249492381)
+ """
# Initialize the pheromone matrix
cities_num = len(cities)
pheromone = [[1.0] * cities_num] * cities_num
@@ -57,6 +100,15 @@
def distance(city1: list[int], city2: list[int]) -> float:
+ """
+ Calculate the distance between two coordinate points
+ >>> distance([0, 0], [3, 4] )
+ 5.0
+ >>> distance([0, 0], [-3, 4] )
+ 5.0
+ >>> distance([0, 0], [-3, -4] )
+ 5.0
+ """
return (((city1[0] - city2[0]) ** 2) + ((city1[1] - city2[1]) ** 2)) ** 0.5
@@ -69,6 +121,29 @@ best_path: list[int],
best_distance: float,
) -> tuple[list[list[float]], list[int], float]:
+ """
+ Update pheromones on the route and update the best route
+ >>>
+ >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]],
+ ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7,
+ ... ants_route=[[0, 1, 0]], q=10, best_path=[],
+ ... best_distance=float("inf"))
+ ([[0.7, 4.235533905932737], [4.235533905932737, 0.7]], [0, 1, 0], 5.656854249492381)
+ >>> pheromone_update(pheromone=[],
+ ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7,
+ ... ants_route=[[0, 1, 0]], q=10, best_path=[],
+ ... best_distance=float("inf"))
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]],
+ ... cities={}, pheromone_evaporation=0.7,
+ ... ants_route=[[0, 1, 0]], q=10, best_path=[],
+ ... best_distance=float("inf"))
+ Traceback (most recent call last):
+ ...
+ KeyError: 0
+ """
for a in range(len(cities)): # Update the volatilization of pheromone on all routes
for b in range(len(cities)):
pheromone[a][b] *= pheromone_evaporation
@@ -97,6 +172,27 @@ alpha: float,
beta: float,
) -> tuple[dict[int, list[int]], dict[int, list[int]]]:
+ """
+ Choose the next city for ants
+ >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]},
+ ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0)
+ ({1: [2, 2]}, {})
+ >>> city_select(pheromone=[], current_city={0: [0,0]},
+ ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={},
+ ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]},
+ ... unvisited_cities={}, alpha=1.0, beta=5.0)
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ """
probabilities = []
for city, value in unvisited_cities.items():
city_distance = distance(value, next(iter(current_city.values())))
@@ -125,4 +221,4 @@ )
print(f"{best_path = }")
- print(f"{best_distance = }")+ print(f"{best_distance = }")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/ant_colony_optimization_algorithms.py |
Turn comments into proper docstrings | from collections import deque
def _input(message):
return input(message).strip().split(" ")
def initialize_unweighted_directed_graph(
node_count: int, edge_count: int
) -> dict[int, list[int]]:
graph: dict[int, list[int]] = {}
for i in range(node_count):
graph[i + 1] = []
for e in range(edge_count):
x, y = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> "))
graph[x].append(y)
return graph
def initialize_unweighted_undirected_graph(
node_count: int, edge_count: int
) -> dict[int, list[int]]:
graph: dict[int, list[int]] = {}
for i in range(node_count):
graph[i + 1] = []
for e in range(edge_count):
x, y = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> "))
graph[x].append(y)
graph[y].append(x)
return graph
def initialize_weighted_undirected_graph(
node_count: int, edge_count: int
) -> dict[int, list[tuple[int, int]]]:
graph: dict[int, list[tuple[int, int]]] = {}
for i in range(node_count):
graph[i + 1] = []
for e in range(edge_count):
x, y, w = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> <weight> "))
graph[x].append((y, w))
graph[y].append((x, w))
return graph
if __name__ == "__main__":
n, m = (int(i) for i in _input("Number of nodes and edges: "))
graph_choice = int(
_input(
"Press 1 or 2 or 3 \n"
"1. Unweighted directed \n"
"2. Unweighted undirected \n"
"3. Weighted undirected \n"
)[0]
)
g = {
1: initialize_unweighted_directed_graph,
2: initialize_unweighted_undirected_graph,
3: initialize_weighted_undirected_graph,
}[graph_choice](n, m)
"""
--------------------------------------------------------------------------------
Depth First Search.
Args : G - Dictionary of edges
s - Starting Node
Vars : vis - Set of visited nodes
S - Traversal Stack
--------------------------------------------------------------------------------
"""
def dfs(g, s):
vis, _s = {s}, [s]
print(s)
while _s:
flag = 0
for i in g[_s[-1]]:
if i not in vis:
_s.append(i)
vis.add(i)
flag = 1
print(i)
break
if not flag:
_s.pop()
"""
--------------------------------------------------------------------------------
Breadth First Search.
Args : G - Dictionary of edges
s - Starting Node
Vars : vis - Set of visited nodes
Q - Traversal Stack
--------------------------------------------------------------------------------
"""
def bfs(g, s):
vis, q = {s}, deque([s])
print(s)
while q:
u = q.popleft()
for v in g[u]:
if v not in vis:
vis.add(v)
q.append(v)
print(v)
"""
--------------------------------------------------------------------------------
Dijkstra's shortest path Algorithm
Args : G - Dictionary of edges
s - Starting Node
Vars : dist - Dictionary storing shortest distance from s to every other node
known - Set of knows nodes
path - Preceding node in path
--------------------------------------------------------------------------------
"""
def dijk(g, s):
dist, known, path = {s: 0}, set(), {s: 0}
while True:
if len(known) == len(g) - 1:
break
mini = 100000
for key, value in dist:
if key not in known and value < mini:
mini = value
u = key
known.add(u)
for v in g[u]:
if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000):
dist[v[0]] = dist[u] + v[1]
path[v[0]] = u
for key, value in dist.items():
if key != s:
print(value)
"""
--------------------------------------------------------------------------------
Topological Sort
--------------------------------------------------------------------------------
"""
def topo(g, ind=None, q=None):
if q is None:
q = [1]
if ind is None:
ind = [0] * (len(g) + 1) # SInce oth Index is ignored
for u in g:
for v in g[u]:
ind[v] += 1
q = deque()
for i in g:
if ind[i] == 0:
q.append(i)
if len(q) == 0:
return
v = q.popleft()
print(v)
for w in g[v]:
ind[w] -= 1
if ind[w] == 0:
q.append(w)
topo(g, ind, q)
"""
--------------------------------------------------------------------------------
Reading an Adjacency matrix
--------------------------------------------------------------------------------
"""
def adjm():
n = int(input().strip())
a = []
for _ in range(n):
a.append(tuple(map(int, input().strip().split())))
return a, n
"""
--------------------------------------------------------------------------------
Floyd Warshall's algorithm
Args : G - Dictionary of edges
s - Starting Node
Vars : dist - Dictionary storing shortest distance from s to every other node
known - Set of knows nodes
path - Preceding node in path
--------------------------------------------------------------------------------
"""
def floy(a_and_n):
(a, n) = a_and_n
dist = list(a)
path = [[0] * n for i in range(n)]
for k in range(n):
for i in range(n):
for j in range(n):
if dist[i][j] > dist[i][k] + dist[k][j]:
dist[i][j] = dist[i][k] + dist[k][j]
path[i][k] = k
print(dist)
"""
--------------------------------------------------------------------------------
Prim's MST Algorithm
Args : G - Dictionary of edges
s - Starting Node
Vars : dist - Dictionary storing shortest distance from s to nearest node
known - Set of knows nodes
path - Preceding node in path
--------------------------------------------------------------------------------
"""
def prim(g, s):
dist, known, path = {s: 0}, set(), {s: 0}
while True:
if len(known) == len(g) - 1:
break
mini = 100000
for key, value in dist.items():
if key not in known and value < mini:
mini = value
u = key
known.add(u)
for v in g[u]:
if v[0] not in known and v[1] < dist.get(v[0], 100000):
dist[v[0]] = v[1]
path[v[0]] = u
return dist
"""
--------------------------------------------------------------------------------
Accepting Edge list
Vars : n - Number of nodes
m - Number of edges
Returns : l - Edge list
n - Number of Nodes
--------------------------------------------------------------------------------
"""
def edglist():
n, m = tuple(map(int, input().split(" ")))
edges = []
for _ in range(m):
edges.append(tuple(map(int, input().split(" "))))
return edges, n
"""
--------------------------------------------------------------------------------
Kruskal's MST Algorithm
Args : E - Edge list
n - Number of Nodes
Vars : s - Set of all nodes as unique disjoint sets (initially)
--------------------------------------------------------------------------------
"""
def krusk(e_and_n):
(e, n) = e_and_n
e.sort(reverse=True, key=lambda x: x[2])
s = [{i} for i in range(1, n + 1)]
while True:
if len(s) == 1:
break
print(s)
x = e.pop()
for i in range(len(s)):
if x[0] in s[i]:
break
for j in range(len(s)):
if x[1] in s[j]:
if i == j:
break
s[j].update(s[i])
s.pop(i)
break
def find_isolated_nodes(graph):
isolated = []
for node in graph:
if not graph[node]:
isolated.append(node)
return isolated | --- +++ @@ -77,6 +77,14 @@
def dfs(g, s):
+ """
+ >>> dfs({1: [2, 3], 2: [4, 5], 3: [], 4: [], 5: []}, 1)
+ 1
+ 2
+ 4
+ 5
+ 3
+ """
vis, _s = {s}, [s]
print(s)
while _s:
@@ -104,6 +112,17 @@
def bfs(g, s):
+ """
+ >>> bfs({1: [2, 3], 2: [4, 5], 3: [6, 7], 4: [], 5: [8], 6: [], 7: [], 8: []}, 1)
+ 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ """
vis, q = {s}, deque([s])
print(s)
while q:
@@ -128,6 +147,19 @@
def dijk(g, s):
+ """
+ dijk({1: [(2, 7), (3, 9), (6, 14)],
+ 2: [(1, 7), (3, 10), (4, 15)],
+ 3: [(1, 9), (2, 10), (4, 11), (6, 2)],
+ 4: [(2, 15), (3, 11), (5, 6)],
+ 5: [(4, 6), (6, 9)],
+ 6: [(1, 14), (3, 2), (5, 9)]}, 1)
+ 7
+ 9
+ 11
+ 20
+ 20
+ """
dist, known, path = {s: 0}, set(), {s: 0}
while True:
if len(known) == len(g) - 1:
@@ -185,6 +217,25 @@
def adjm():
+ r"""
+ Reading an Adjacency matrix
+
+ Parameters:
+ None
+
+ Returns:
+ tuple: A tuple containing a list of edges and number of edges
+
+ Example:
+ >>> # Simulate user input for 3 nodes
+ >>> input_data = "4\n0 1 0 1\n1 0 1 0\n0 1 0 1\n1 0 1 0\n"
+ >>> import sys,io
+ >>> original_input = sys.stdin
+ >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing
+ >>> adjm()
+ ([(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)], 4)
+ >>> sys.stdin = original_input # Restore original stdin
+ """
n = int(input().strip())
a = []
for _ in range(n):
@@ -260,6 +311,25 @@
def edglist():
+ r"""
+ Get the edges and number of edges from the user
+
+ Parameters:
+ None
+
+ Returns:
+ tuple: A tuple containing a list of edges and number of edges
+
+ Example:
+ >>> # Simulate user input for 3 edges and 4 vertices: (1, 2), (2, 3), (3, 4)
+ >>> input_data = "4 3\n1 2\n2 3\n3 4\n"
+ >>> import sys,io
+ >>> original_input = sys.stdin
+ >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing
+ >>> edglist()
+ ([(1, 2), (2, 3), (3, 4)], 4)
+ >>> sys.stdin = original_input # Restore original stdin
+ """
n, m = tuple(map(int, input().split(" ")))
edges = []
for _ in range(m):
@@ -278,6 +348,9 @@
def krusk(e_and_n):
+ """
+ Sort edges on the basis of distance
+ """
(e, n) = e_and_n
e.sort(reverse=True, key=lambda x: x[2])
s = [{i} for i in range(1, n + 1)]
@@ -299,8 +372,38 @@
def find_isolated_nodes(graph):
+ """
+ Find the isolated node in the graph
+
+ Parameters:
+ graph (dict): A dictionary representing a graph.
+
+ Returns:
+ list: A list of isolated nodes.
+
+ Examples:
+ >>> graph1 = {1: [2, 3], 2: [1, 3], 3: [1, 2], 4: []}
+ >>> find_isolated_nodes(graph1)
+ [4]
+
+ >>> graph2 = {'A': ['B', 'C'], 'B': ['A'], 'C': ['A'], 'D': []}
+ >>> find_isolated_nodes(graph2)
+ ['D']
+
+ >>> graph3 = {'X': [], 'Y': [], 'Z': []}
+ >>> find_isolated_nodes(graph3)
+ ['X', 'Y', 'Z']
+
+ >>> graph4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
+ >>> find_isolated_nodes(graph4)
+ []
+
+ >>> graph5 = {}
+ >>> find_isolated_nodes(graph5)
+ []
+ """
isolated = []
for node in graph:
if not graph[node]:
isolated.append(node)
- return isolated+ return isolated
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/basic_graphs.py |
Add docstrings that explain inputs and outputs | #!/usr/bin/python
from __future__ import annotations
from queue import Queue
class Graph:
def __init__(self) -> None:
self.vertices: dict[int, list[int]] = {}
def print_graph(self) -> None:
for i in self.vertices:
print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]]))
def add_edge(self, from_vertex: int, to_vertex: int) -> None:
if from_vertex in self.vertices:
self.vertices[from_vertex].append(to_vertex)
else:
self.vertices[from_vertex] = [to_vertex]
def bfs(self, start_vertex: int) -> set[int]:
# initialize set for storing already visited vertices
visited = set()
# create a first in first out queue to store all the vertices for BFS
queue: Queue = Queue()
# mark the source node as visited and enqueue it
visited.add(start_vertex)
queue.put(start_vertex)
while not queue.empty():
vertex = queue.get()
# loop through all adjacent vertex and enqueue it if not yet visited
for adjacent_vertex in self.vertices[vertex]:
if adjacent_vertex not in visited:
queue.put(adjacent_vertex)
visited.add(adjacent_vertex)
return visited
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
g = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
# 0 : 1 -> 2
# 1 : 2
# 2 : 0 -> 3
# 3 : 3
assert sorted(g.bfs(2)) == [0, 1, 2, 3] | --- +++ @@ -1,5 +1,6 @@ #!/usr/bin/python
+"""Author: OMKAR PATHAK"""
from __future__ import annotations
@@ -11,16 +12,44 @@ self.vertices: dict[int, list[int]] = {}
def print_graph(self) -> None:
+ """
+ prints adjacency list representation of graaph
+ >>> g = Graph()
+ >>> g.print_graph()
+ >>> g.add_edge(0, 1)
+ >>> g.print_graph()
+ 0 : 1
+ """
for i in self.vertices:
print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]]))
def add_edge(self, from_vertex: int, to_vertex: int) -> None:
+ """
+ adding the edge between two vertices
+ >>> g = Graph()
+ >>> g.print_graph()
+ >>> g.add_edge(0, 1)
+ >>> g.print_graph()
+ 0 : 1
+ """
if from_vertex in self.vertices:
self.vertices[from_vertex].append(to_vertex)
else:
self.vertices[from_vertex] = [to_vertex]
def bfs(self, start_vertex: int) -> set[int]:
+ """
+ >>> g = Graph()
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 2)
+ >>> g.add_edge(1, 2)
+ >>> g.add_edge(2, 0)
+ >>> g.add_edge(2, 3)
+ >>> g.add_edge(3, 3)
+ >>> sorted(g.bfs(2))
+ [0, 1, 2, 3]
+ """
# initialize set for storing already visited vertices
visited = set()
@@ -61,4 +90,4 @@ # 2 : 0 -> 3
# 3 : 3
- assert sorted(g.bfs(2)) == [0, 1, 2, 3]+ assert sorted(g.bfs(2)) == [0, 1, 2, 3]
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/breadth_first_search.py |
Write docstrings describing functionality |
import sys
import turtle
def get_mid(p1: tuple[float, float], p2: tuple[float, float]) -> tuple[float, float]:
return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2
def triangle(
vertex1: tuple[float, float],
vertex2: tuple[float, float],
vertex3: tuple[float, float],
depth: int,
) -> None:
my_pen.up()
my_pen.goto(vertex1[0], vertex1[1])
my_pen.down()
my_pen.goto(vertex2[0], vertex2[1])
my_pen.goto(vertex3[0], vertex3[1])
my_pen.goto(vertex1[0], vertex1[1])
if depth == 0:
return
triangle(vertex1, get_mid(vertex1, vertex2), get_mid(vertex1, vertex3), depth - 1)
triangle(vertex2, get_mid(vertex1, vertex2), get_mid(vertex2, vertex3), depth - 1)
triangle(vertex3, get_mid(vertex3, vertex2), get_mid(vertex1, vertex3), depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
my_pen = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
vertices = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
turtle.Screen().exitonclick() | --- +++ @@ -1,9 +1,47 @@+"""
+Author Anurag Kumar | anuragkumarak95@gmail.com | git/anuragkumarak95
+
+Simple example of fractal generation using recursion.
+
+What is the Sierpiński Triangle?
+ The Sierpiński triangle (sometimes spelled Sierpinski), also called the
+Sierpiński gasket or Sierpiński sieve, is a fractal attractive fixed set with
+the overall shape of an equilateral triangle, subdivided recursively into
+smaller equilateral triangles. Originally constructed as a curve, this is one of
+the basic examples of self-similar sets—that is, it is a mathematically
+generated pattern that is reproducible at any magnification or reduction. It is
+named after the Polish mathematician Wacław Sierpiński, but appeared as a
+decorative pattern many centuries before the work of Sierpiński.
+
+
+Usage: python sierpinski_triangle.py <int:depth_for_fractal>
+
+Credits:
+ The above description is taken from
+ https://en.wikipedia.org/wiki/Sierpi%C5%84ski_triangle
+ This code was written by editing the code from
+ https://www.riannetrujillo.com/blog/python-fractal/
+"""
import sys
import turtle
def get_mid(p1: tuple[float, float], p2: tuple[float, float]) -> tuple[float, float]:
+ """
+ Find the midpoint of two points
+
+ >>> get_mid((0, 0), (2, 2))
+ (1.0, 1.0)
+ >>> get_mid((-3, -3), (3, 3))
+ (0.0, 0.0)
+ >>> get_mid((1, 0), (3, 2))
+ (2.0, 1.0)
+ >>> get_mid((0, 0), (1, 1))
+ (0.5, 0.5)
+ >>> get_mid((0, 0), (0, 0))
+ (0.0, 0.0)
+ """
return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2
@@ -13,6 +51,10 @@ vertex3: tuple[float, float],
depth: int,
) -> None:
+ """
+ Recursively draw the Sierpinski triangle given the vertices of the triangle
+ and the recursion depth
+ """
my_pen.up()
my_pen.goto(vertex1[0], vertex1[1])
my_pen.down()
@@ -41,4 +83,4 @@
vertices = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
- turtle.Screen().exitonclick()+ turtle.Screen().exitonclick()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/fractals/sierpinski_triangle.py |
Help me write clear docstrings |
import numpy as np
def validate_adjacency_list(graph: list[list[int | None]]) -> None:
if not isinstance(graph, list):
raise ValueError("Graph should be a list of lists.")
for node_index, neighbors in enumerate(graph):
if not isinstance(neighbors, list):
no_neighbors_message: str = (
f"Node {node_index} should have a list of neighbors."
)
raise ValueError(no_neighbors_message)
for neighbor_index in neighbors:
if (
not isinstance(neighbor_index, int)
or neighbor_index < 0
or neighbor_index >= len(graph)
):
invalid_neighbor_message: str = (
f"Invalid neighbor {neighbor_index} in node {node_index} "
f"adjacency list."
)
raise ValueError(invalid_neighbor_message)
def lanczos_iteration(
graph: list[list[int | None]], num_eigenvectors: int
) -> tuple[np.ndarray, np.ndarray]:
num_nodes: int = len(graph)
if not (1 <= num_eigenvectors <= num_nodes):
raise ValueError(
"Number of eigenvectors must be between 1 and the number of "
"nodes in the graph."
)
orthonormal_basis: np.ndarray = np.zeros((num_nodes, num_eigenvectors))
tridiagonal_matrix: np.ndarray = np.zeros((num_eigenvectors, num_eigenvectors))
rng = np.random.default_rng()
initial_vector: np.ndarray = rng.random(num_nodes)
initial_vector /= np.sqrt(np.dot(initial_vector, initial_vector))
orthonormal_basis[:, 0] = initial_vector
prev_beta: float = 0.0
for iter_index in range(num_eigenvectors):
result_vector: np.ndarray = multiply_matrix_vector(
graph, orthonormal_basis[:, iter_index]
)
if iter_index > 0:
result_vector -= prev_beta * orthonormal_basis[:, iter_index - 1]
alpha_value: float = np.dot(orthonormal_basis[:, iter_index], result_vector)
result_vector -= alpha_value * orthonormal_basis[:, iter_index]
prev_beta = np.sqrt(np.dot(result_vector, result_vector))
if iter_index < num_eigenvectors - 1 and prev_beta > 1e-10:
orthonormal_basis[:, iter_index + 1] = result_vector / prev_beta
tridiagonal_matrix[iter_index, iter_index] = alpha_value
if iter_index < num_eigenvectors - 1:
tridiagonal_matrix[iter_index, iter_index + 1] = prev_beta
tridiagonal_matrix[iter_index + 1, iter_index] = prev_beta
return tridiagonal_matrix, orthonormal_basis
def multiply_matrix_vector(
graph: list[list[int | None]], vector: np.ndarray
) -> np.ndarray:
num_nodes: int = len(graph)
if vector.shape[0] != num_nodes:
raise ValueError("Vector length must match the number of nodes in the graph.")
result: np.ndarray = np.zeros(num_nodes)
for node_index, neighbors in enumerate(graph):
for neighbor_index in neighbors:
result[node_index] += vector[neighbor_index]
return result
def find_lanczos_eigenvectors(
graph: list[list[int | None]], num_eigenvectors: int
) -> tuple[np.ndarray, np.ndarray]:
validate_adjacency_list(graph)
tridiagonal_matrix, orthonormal_basis = lanczos_iteration(graph, num_eigenvectors)
eigenvalues, eigenvectors = np.linalg.eigh(tridiagonal_matrix)
return eigenvalues[::-1], np.dot(orthonormal_basis, eigenvectors[:, ::-1])
def main() -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
main() | --- +++ @@ -1,8 +1,51 @@+"""
+Lanczos Method for Finding Eigenvalues and Eigenvectors of a Graph.
+
+This module demonstrates the Lanczos method to approximate the largest eigenvalues
+and corresponding eigenvectors of a symmetric matrix represented as a graph's
+adjacency list. The method efficiently handles large, sparse matrices by converting
+the graph to a tridiagonal matrix, whose eigenvalues and eigenvectors are then
+computed.
+
+Key Functions:
+- `find_lanczos_eigenvectors`: Computes the k largest eigenvalues and vectors.
+- `lanczos_iteration`: Constructs the tridiagonal matrix and orthonormal basis vectors.
+- `multiply_matrix_vector`: Multiplies an adjacency list graph with a vector.
+
+Complexity:
+- Time: O(k * n), where k is the number of eigenvalues and n is the matrix size.
+- Space: O(n), due to sparse representation and tridiagonal matrix structure.
+
+Further Reading:
+- Lanczos Algorithm: https://en.wikipedia.org/wiki/Lanczos_algorithm
+- Eigenvector Centrality: https://en.wikipedia.org/wiki/Eigenvector_centrality
+
+Example Usage:
+Given a graph represented by an adjacency list, the `find_lanczos_eigenvectors`
+function returns the largest eigenvalues and eigenvectors. This can be used to
+analyze graph centrality.
+"""
import numpy as np
def validate_adjacency_list(graph: list[list[int | None]]) -> None:
+ """Validates the adjacency list format for the graph.
+
+ Args:
+ graph: A list of lists where each sublist contains the neighbors of a node.
+
+ Raises:
+ ValueError: If the graph is not a list of lists, or if any node has
+ invalid neighbors (e.g., out-of-range or non-integer values).
+
+ >>> validate_adjacency_list([[1, 2], [0], [0, 1]])
+ >>> validate_adjacency_list([[]]) # No neighbors, valid case
+ >>> validate_adjacency_list([[1], [2], [-1]]) # Invalid neighbor
+ Traceback (most recent call last):
+ ...
+ ValueError: Invalid neighbor -1 in node 2 adjacency list.
+ """
if not isinstance(graph, list):
raise ValueError("Graph should be a list of lists.")
@@ -28,6 +71,30 @@ def lanczos_iteration(
graph: list[list[int | None]], num_eigenvectors: int
) -> tuple[np.ndarray, np.ndarray]:
+ """Constructs the tridiagonal matrix and orthonormal basis vectors using the
+ Lanczos method.
+
+ Args:
+ graph: The graph represented as a list of adjacency lists.
+ num_eigenvectors: The number of largest eigenvalues and eigenvectors
+ to approximate.
+
+ Returns:
+ A tuple containing:
+ - tridiagonal_matrix: A (num_eigenvectors x num_eigenvectors) symmetric
+ matrix.
+ - orthonormal_basis: A (num_nodes x num_eigenvectors) matrix of orthonormal
+ basis vectors.
+
+ Raises:
+ ValueError: If num_eigenvectors is less than 1 or greater than the number of
+ nodes.
+
+ >>> graph = [[1, 2], [0, 2], [0, 1]]
+ >>> T, Q = lanczos_iteration(graph, 2)
+ >>> T.shape == (2, 2) and Q.shape == (3, 2)
+ True
+ """
num_nodes: int = len(graph)
if not (1 <= num_eigenvectors <= num_nodes):
raise ValueError(
@@ -66,6 +133,24 @@ def multiply_matrix_vector(
graph: list[list[int | None]], vector: np.ndarray
) -> np.ndarray:
+ """Performs multiplication of a graph's adjacency list representation with a vector.
+
+ Args:
+ graph: The adjacency list of the graph.
+ vector: A 1D numpy array representing the vector to multiply.
+
+ Returns:
+ A numpy array representing the product of the adjacency list and the vector.
+
+ Raises:
+ ValueError: If the vector's length does not match the number of nodes in the
+ graph.
+
+ >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([1, 1, 1]))
+ array([2., 2., 2.])
+ >>> multiply_matrix_vector([[1, 2], [0, 2], [0, 1]], np.array([0, 1, 0]))
+ array([1., 0., 1.])
+ """
num_nodes: int = len(graph)
if vector.shape[0] != num_nodes:
raise ValueError("Vector length must match the number of nodes in the graph.")
@@ -80,6 +165,28 @@ def find_lanczos_eigenvectors(
graph: list[list[int | None]], num_eigenvectors: int
) -> tuple[np.ndarray, np.ndarray]:
+ """Computes the largest eigenvalues and their corresponding eigenvectors using the
+ Lanczos method.
+
+ Args:
+ graph: The graph as a list of adjacency lists.
+ num_eigenvectors: Number of largest eigenvalues and eigenvectors to compute.
+
+ Returns:
+ A tuple containing:
+ - eigenvalues: 1D array of the largest eigenvalues in descending order.
+ - eigenvectors: 2D array where each column is an eigenvector corresponding
+ to an eigenvalue.
+
+ Raises:
+ ValueError: If the graph format is invalid or num_eigenvectors is out of bounds.
+
+ >>> eigenvalues, eigenvectors = find_lanczos_eigenvectors(
+ ... [[1, 2], [0, 2], [0, 1]], 2
+ ... )
+ >>> len(eigenvalues) == 2 and eigenvectors.shape[1] == 2
+ True
+ """
validate_adjacency_list(graph)
tridiagonal_matrix, orthonormal_basis = lanczos_iteration(graph, num_eigenvectors)
eigenvalues, eigenvectors = np.linalg.eigh(tridiagonal_matrix)
@@ -87,10 +194,13 @@
def main() -> None:
+ """
+ Main driver function for testing the implementation with doctests.
+ """
import doctest
doctest.testmod()
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/lanczos_eigenvectors.py |
Add well-formatted docstrings |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
HEURISTIC = 0
grid = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
TPosition = tuple[int, int]
class Node:
def __init__(
self,
pos_x: int,
pos_y: int,
goal_x: int,
goal_y: int,
g_cost: int,
parent: Node | None,
) -> None:
self.pos_x = pos_x
self.pos_y = pos_y
self.pos = (pos_y, pos_x)
self.goal_x = goal_x
self.goal_y = goal_y
self.g_cost = g_cost
self.parent = parent
self.h_cost = self.calculate_heuristic()
self.f_cost = self.g_cost + self.h_cost
def calculate_heuristic(self) -> float:
dy = self.pos_x - self.goal_x
dx = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(dx) + abs(dy)
else:
return sqrt(dy**2 + dx**2)
def __lt__(self, other: Node) -> bool:
return self.f_cost < other.f_cost
class AStar:
def __init__(self, start: TPosition, goal: TPosition):
self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
self.open_nodes = [self.start]
self.closed_nodes: list[Node] = []
self.reached = False
def search(self) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
current_node = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(current_node)
self.closed_nodes.append(current_node)
successors = self.get_successors(current_node)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(child_node)
else:
# retrieve the best current path
better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(child_node)
else:
self.open_nodes.append(better_node)
return [self.start.pos]
def get_successors(self, parent: Node) -> list[Node]:
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
pos_y = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
pos_x,
pos_y,
self.target.pos_y,
self.target.pos_x,
parent.g_cost + 1,
parent,
)
)
return successors
def retrace_path(self, node: Node | None) -> list[TPosition]:
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
class BidirectionalAStar:
def __init__(self, start: TPosition, goal: TPosition) -> None:
self.fwd_astar = AStar(start, goal)
self.bwd_astar = AStar(goal, start)
self.reached = False
def search(self) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
current_fwd_node = self.fwd_astar.open_nodes.pop(0)
current_bwd_node = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
current_fwd_node, current_bwd_node
)
self.fwd_astar.closed_nodes.append(current_fwd_node)
self.bwd_astar.closed_nodes.append(current_bwd_node)
self.fwd_astar.target = current_bwd_node
self.bwd_astar.target = current_fwd_node
successors = {
self.fwd_astar: self.fwd_astar.get_successors(current_fwd_node),
self.bwd_astar: self.bwd_astar.get_successors(current_bwd_node),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(child_node)
else:
# retrieve the best current path
better_node = astar.open_nodes.pop(
astar.open_nodes.index(child_node)
)
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(child_node)
else:
astar.open_nodes.append(better_node)
return [self.fwd_astar.start.pos]
def retrace_bidirectional_path(
self, fwd_node: Node, bwd_node: Node
) -> list[TPosition]:
fwd_path = self.fwd_astar.retrace_path(fwd_node)
bwd_path = self.bwd_astar.retrace_path(bwd_node)
bwd_path.pop()
bwd_path.reverse()
path = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
init = (0, 0)
goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
start_time = time.time()
a_star = AStar(init, goal)
path = a_star.search()
end_time = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
bd_start_time = time.time()
bidir_astar = BidirectionalAStar(init, goal)
bd_end_time = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds") | --- +++ @@ -1,3 +1,6 @@+"""
+https://en.wikipedia.org/wiki/Bidirectional_search
+"""
from __future__ import annotations
@@ -23,6 +26,20 @@
class Node:
+ """
+ >>> k = Node(0, 0, 4, 3, 0, None)
+ >>> k.calculate_heuristic()
+ 5.0
+ >>> n = Node(1, 4, 3, 4, 2, None)
+ >>> n.calculate_heuristic()
+ 2.0
+ >>> l = [k, n]
+ >>> n == l[0]
+ False
+ >>> l.sort()
+ >>> n == l[0]
+ True
+ """
def __init__(
self,
@@ -44,6 +61,9 @@ self.f_cost = self.g_cost + self.h_cost
def calculate_heuristic(self) -> float:
+ """
+ Heuristic for the A*
+ """
dy = self.pos_x - self.goal_x
dx = self.pos_y - self.goal_y
if HEURISTIC == 1:
@@ -56,6 +76,20 @@
class AStar:
+ """
+ >>> astar = AStar((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> (astar.start.pos_y + delta[3][0], astar.start.pos_x + delta[3][1])
+ (0, 1)
+ >>> [x.pos for x in astar.get_successors(astar.start)]
+ [(1, 0), (0, 1)]
+ >>> (astar.start.pos_y + delta[2][0], astar.start.pos_x + delta[2][1])
+ (1, 0)
+ >>> astar.retrace_path(astar.start)
+ [(0, 0)]
+ >>> astar.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (1, 0), (2, 0), (2, 1), (2, 2), (2, 3), (3, 3),
+ (4, 3), (4, 4), (5, 4), (5, 5), (6, 5), (6, 6)]
+ """
def __init__(self, start: TPosition, goal: TPosition):
self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
@@ -96,6 +130,9 @@ return [self.start.pos]
def get_successors(self, parent: Node) -> list[Node]:
+ """
+ Returns a list of successors (both in the grid and free spaces)
+ """
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
@@ -119,6 +156,9 @@ return successors
def retrace_path(self, node: Node | None) -> list[TPosition]:
+ """
+ Retrace the path from parents to parents until start node
+ """
current_node = node
path = []
while current_node is not None:
@@ -129,6 +169,17 @@
class BidirectionalAStar:
+ """
+ >>> bd_astar = BidirectionalAStar((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> bd_astar.fwd_astar.start.pos == bd_astar.bwd_astar.target.pos
+ True
+ >>> bd_astar.retrace_bidirectional_path(bd_astar.fwd_astar.start,
+ ... bd_astar.bwd_astar.start)
+ [(0, 0)]
+ >>> bd_astar.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (2, 4),
+ (2, 5), (3, 5), (4, 5), (5, 5), (5, 6), (6, 6)]
+ """
def __init__(self, start: TPosition, goal: TPosition) -> None:
self.fwd_astar = AStar(start, goal)
@@ -205,4 +256,4 @@ bd_start_time = time.time()
bidir_astar = BidirectionalAStar(init, goal)
bd_end_time = time.time() - bd_start_time
- print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")+ print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/bidirectional_a_star.py |
Generate docstrings for this script | #!/usr/bin/env python3
from __future__ import annotations
import random
import unittest
from pprint import pformat
from typing import TypeVar
import pytest
T = TypeVar("T")
class GraphAdjacencyList[T]:
def __init__(
self, vertices: list[T], edges: list[list[T]], directed: bool = True
) -> None:
self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T
self.directed = directed
# Falsey checks
edges = edges or []
vertices = vertices or []
for vertex in vertices:
self.add_vertex(vertex)
for edge in edges:
if len(edge) != 2:
msg = f"Invalid input: {edge} is the wrong length."
raise ValueError(msg)
self.add_edge(edge[0], edge[1])
def add_vertex(self, vertex: T) -> None:
if self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} is already in the graph."
raise ValueError(msg)
self.adj_list[vertex] = []
def add_edge(self, source_vertex: T, destination_vertex: T) -> None:
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
):
msg = (
f"Incorrect input: Either {source_vertex} or "
f"{destination_vertex} does not exist"
)
raise ValueError(msg)
if self.contains_edge(source_vertex, destination_vertex):
msg = (
"Incorrect input: The edge already exists between "
f"{source_vertex} and {destination_vertex}"
)
raise ValueError(msg)
# add the destination vertex to the list associated with the source vertex
# and vice versa if not directed
self.adj_list[source_vertex].append(destination_vertex)
if not self.directed:
self.adj_list[destination_vertex].append(source_vertex)
def remove_vertex(self, vertex: T) -> None:
if not self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} does not exist in this graph."
raise ValueError(msg)
if not self.directed:
# If not directed, find all neighboring vertices and delete all references
# of edges connecting to the given vertex
for neighbor in self.adj_list[vertex]:
self.adj_list[neighbor].remove(vertex)
else:
# If directed, search all neighbors of all vertices and delete all
# references of edges connecting to the given vertex
for edge_list in self.adj_list.values():
if vertex in edge_list:
edge_list.remove(vertex)
# Finally, delete the given vertex and all of its outgoing edge references
self.adj_list.pop(vertex)
def remove_edge(self, source_vertex: T, destination_vertex: T) -> None:
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
):
msg = (
f"Incorrect input: Either {source_vertex} or "
f"{destination_vertex} does not exist"
)
raise ValueError(msg)
if not self.contains_edge(source_vertex, destination_vertex):
msg = (
"Incorrect input: The edge does NOT exist between "
f"{source_vertex} and {destination_vertex}"
)
raise ValueError(msg)
# remove the destination vertex from the list associated with the source
# vertex and vice versa if not directed
self.adj_list[source_vertex].remove(destination_vertex)
if not self.directed:
self.adj_list[destination_vertex].remove(source_vertex)
def contains_vertex(self, vertex: T) -> bool:
return vertex in self.adj_list
def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool:
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
):
msg = (
f"Incorrect input: Either {source_vertex} "
f"or {destination_vertex} does not exist."
)
raise ValueError(msg)
return destination_vertex in self.adj_list[source_vertex]
def clear_graph(self) -> None:
self.adj_list = {}
def __repr__(self) -> str:
return pformat(self.adj_list)
class TestGraphAdjacencyList(unittest.TestCase):
def __assert_graph_edge_exists_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
edge: list[int],
) -> None:
assert undirected_graph.contains_edge(edge[0], edge[1])
assert undirected_graph.contains_edge(edge[1], edge[0])
assert directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_edge_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
edge: list[int],
) -> None:
assert not undirected_graph.contains_edge(edge[0], edge[1])
assert not undirected_graph.contains_edge(edge[1], edge[0])
assert not directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_vertex_exists_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
vertex: int,
) -> None:
assert undirected_graph.contains_vertex(vertex)
assert directed_graph.contains_vertex(vertex)
def __assert_graph_vertex_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyList,
directed_graph: GraphAdjacencyList,
vertex: int,
) -> None:
assert not undirected_graph.contains_vertex(vertex)
assert not directed_graph.contains_vertex(vertex)
def __generate_random_edges(
self, vertices: list[int], edge_pick_count: int
) -> list[list[int]]:
assert edge_pick_count <= len(vertices)
random_source_vertices: list[int] = random.sample(
vertices[0 : int(len(vertices) / 2)], edge_pick_count
)
random_destination_vertices: list[int] = random.sample(
vertices[int(len(vertices) / 2) :], edge_pick_count
)
random_edges: list[list[int]] = []
for source in random_source_vertices:
for dest in random_destination_vertices:
random_edges.append([source, dest])
return random_edges
def __generate_graphs(
self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int
) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]:
if max_val - min_val + 1 < vertex_count:
raise ValueError(
"Will result in duplicate vertices. Either increase range "
"between min_val and max_val or decrease vertex count."
)
# generate graph input
random_vertices: list[int] = random.sample(
range(min_val, max_val + 1), vertex_count
)
random_edges: list[list[int]] = self.__generate_random_edges(
random_vertices, edge_pick_count
)
# build graphs
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=random_edges, directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=random_edges, directed=True
)
return undirected_graph, directed_graph, random_vertices, random_edges
def test_init_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# test graph initialization with vertices and edges
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
assert not undirected_graph.directed
assert directed_graph.directed
def test_contains_vertex(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# Build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=True
)
# Test contains_vertex
for num in range(101):
assert (num in random_vertices) == undirected_graph.contains_vertex(num)
assert (num in random_vertices) == directed_graph.contains_vertex(num)
def test_add_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build empty graphs
undirected_graph: GraphAdjacencyList = GraphAdjacencyList(
vertices=[], edges=[], directed=False
)
directed_graph: GraphAdjacencyList = GraphAdjacencyList(
vertices=[], edges=[], directed=True
)
# run add_vertex
for num in random_vertices:
undirected_graph.add_vertex(num)
for num in random_vertices:
directed_graph.add_vertex(num)
# test add_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
def test_remove_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=True
)
# test remove_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
undirected_graph.remove_vertex(num)
directed_graph.remove_vertex(num)
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, num
)
def test_add_and_remove_vertices_repeatedly(self) -> None:
random_vertices1: list[int] = random.sample(range(51), 20)
random_vertices2: list[int] = random.sample(range(51, 101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices1, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices1, edges=[], directed=True
)
# test adding and removing vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.add_vertex(random_vertices2[i])
directed_graph.add_vertex(random_vertices2[i])
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, random_vertices2[i]
)
undirected_graph.remove_vertex(random_vertices1[i])
directed_graph.remove_vertex(random_vertices1[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices1[i]
)
# remove all vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.remove_vertex(random_vertices2[i])
directed_graph.remove_vertex(random_vertices2[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices2[i]
)
def test_contains_edge(self) -> None:
# generate graphs and graph input
vertex_count = 20
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(vertex_count, 0, 100, 4)
# generate all possible edges for testing
all_possible_edges: list[list[int]] = []
for i in range(vertex_count - 1):
for j in range(i + 1, vertex_count):
all_possible_edges.append([random_vertices[i], random_vertices[j]])
all_possible_edges.append([random_vertices[j], random_vertices[i]])
# test contains_edge function
for edge in all_possible_edges:
if edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
elif [edge[1], edge[0]] in random_edges:
# since this edge exists for undirected but the reverse
# may not exist for directed
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, [edge[1], edge[0]]
)
else:
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_edge(self) -> None:
# generate graph input
random_vertices: list[int] = random.sample(range(101), 15)
random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyList(
vertices=random_vertices, edges=[], directed=True
)
# run and test add_edge
for edge in random_edges:
undirected_graph.add_edge(edge[0], edge[1])
directed_graph.add_edge(edge[0], edge[1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
def test_remove_edge(self) -> None:
# generate graph input and graphs
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# run and test remove_edge
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
undirected_graph.remove_edge(edge[0], edge[1])
directed_graph.remove_edge(edge[0], edge[1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_and_remove_edges_repeatedly(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# make some more edge options!
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for i, _ in enumerate(random_edges):
undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, more_random_edges[i]
)
undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1])
directed_graph.remove_edge(random_edges[i][0], random_edges[i][1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, random_edges[i]
)
def test_add_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.add_vertex(vertex)
with pytest.raises(ValueError):
directed_graph.add_vertex(vertex)
def test_remove_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for i in range(101):
if i not in random_vertices:
with pytest.raises(ValueError):
undirected_graph.remove_vertex(i)
with pytest.raises(ValueError):
directed_graph.remove_vertex(i)
def test_add_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for edge in random_edges:
with pytest.raises(ValueError):
undirected_graph.add_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.add_edge(edge[0], edge[1])
def test_remove_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for edge in more_random_edges:
with pytest.raises(ValueError):
undirected_graph.remove_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.remove_edge(edge[0], edge[1])
def test_contains_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
undirected_graph.contains_edge(103, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(103, 102)
if __name__ == "__main__":
unittest.main() | --- +++ @@ -1,4 +1,20 @@ #!/usr/bin/env python3
+"""
+Author: Vikram Nithyanandam
+
+Description:
+The following implementation is a robust unweighted Graph data structure
+implemented using an adjacency list. This vertices and edges of this graph can be
+effectively initialized and modified while storing your chosen generic
+value in each vertex.
+
+Adjacency List: https://en.wikipedia.org/wiki/Adjacency_list
+
+Potential Future Ideas:
+- Add a flag to set edge weights on and set edge weights
+- Make edge weights and vertex values customizable to store whatever the client wants
+- Support multigraph functionality if the client wants it
+"""
from __future__ import annotations
@@ -16,6 +32,15 @@ def __init__(
self, vertices: list[T], edges: list[list[T]], directed: bool = True
) -> None:
+ """
+ Parameters:
+ - vertices: (list[T]) The list of vertex names the client wants to
+ pass in. Default is empty.
+ - edges: (list[list[T]]) The list of edges the client wants to
+ pass in. Each edge is a 2-element list. Default is empty.
+ - directed: (bool) Indicates if graph is directed or undirected.
+ Default is True.
+ """
self.adj_list: dict[T, list[T]] = {} # dictionary of lists of T
self.directed = directed
@@ -33,12 +58,30 @@ self.add_edge(edge[0], edge[1])
def add_vertex(self, vertex: T) -> None:
+ """
+ Adds a vertex to the graph. If the given vertex already exists,
+ a ValueError will be thrown.
+
+ >>> g = GraphAdjacencyList(vertices=[], edges=[], directed=False)
+ >>> g.add_vertex("A")
+ >>> g.adj_list
+ {'A': []}
+ >>> g.add_vertex("A")
+ Traceback (most recent call last):
+ ...
+ ValueError: Incorrect input: A is already in the graph.
+ """
if self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} is already in the graph."
raise ValueError(msg)
self.adj_list[vertex] = []
def add_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Creates an edge from source vertex to destination vertex. If any
+ given vertex doesn't exist or the edge already exists, a ValueError
+ will be thrown.
+ """
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
@@ -62,6 +105,11 @@ self.adj_list[destination_vertex].append(source_vertex)
def remove_vertex(self, vertex: T) -> None:
+ """
+ Removes the given vertex from the graph and deletes all incoming and
+ outgoing edges from the given vertex as well. If the given vertex
+ does not exist, a ValueError will be thrown.
+ """
if not self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} does not exist in this graph."
raise ValueError(msg)
@@ -82,6 +130,10 @@ self.adj_list.pop(vertex)
def remove_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Removes the edge between the two vertices. If any given vertex
+ doesn't exist or the edge does not exist, a ValueError will be thrown.
+ """
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
@@ -105,9 +157,17 @@ self.adj_list[destination_vertex].remove(source_vertex)
def contains_vertex(self, vertex: T) -> bool:
+ """
+ Returns True if the graph contains the vertex, False otherwise.
+ """
return vertex in self.adj_list
def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool:
+ """
+ Returns True if the graph contains the edge from the source_vertex to the
+ destination_vertex, False otherwise. If any given vertex doesn't exist, a
+ ValueError will be thrown.
+ """
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
@@ -121,6 +181,9 @@ return destination_vertex in self.adj_list[source_vertex]
def clear_graph(self) -> None:
+ """
+ Clears all vertices and edges.
+ """
self.adj_list = {}
def __repr__(self) -> str:
@@ -531,4 +594,4 @@
if __name__ == "__main__":
- unittest.main()+ unittest.main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/graph_adjacency_list.py |
Add documentation for all methods |
from __future__ import annotations
import time
Path = list[tuple[int, int]]
grid = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class Node:
def __init__(
self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Node | None
):
self.pos_x = pos_x
self.pos_y = pos_y
self.pos = (pos_y, pos_x)
self.goal_x = goal_x
self.goal_y = goal_y
self.parent = parent
class BreadthFirstSearch:
def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
self.start = Node(start[1], start[0], goal[1], goal[0], None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], None)
self.node_queue = [self.start]
self.reached = False
def search(self) -> Path | None:
while self.node_queue:
current_node = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
self.reached = True
return self.retrace_path(current_node)
successors = self.get_successors(current_node)
for node in successors:
self.node_queue.append(node)
if not self.reached:
return [self.start.pos]
return None
def get_successors(self, parent: Node) -> list[Node]:
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
pos_y = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent)
)
return successors
def retrace_path(self, node: Node | None) -> Path:
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
class BidirectionalBreadthFirstSearch:
def __init__(self, start, goal):
self.fwd_bfs = BreadthFirstSearch(start, goal)
self.bwd_bfs = BreadthFirstSearch(goal, start)
self.reached = False
def search(self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
current_fwd_node = self.fwd_bfs.node_queue.pop(0)
current_bwd_node = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
self.reached = True
return self.retrace_bidirectional_path(
current_fwd_node, current_bwd_node
)
self.fwd_bfs.target = current_bwd_node
self.bwd_bfs.target = current_fwd_node
successors = {
self.fwd_bfs: self.fwd_bfs.get_successors(current_fwd_node),
self.bwd_bfs: self.bwd_bfs.get_successors(current_bwd_node),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(node)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def retrace_bidirectional_path(self, fwd_node: Node, bwd_node: Node) -> Path:
fwd_path = self.fwd_bfs.retrace_path(fwd_node)
bwd_path = self.bwd_bfs.retrace_path(bwd_node)
bwd_path.pop()
bwd_path.reverse()
path = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
init = (0, 0)
goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
start_bfs_time = time.time()
bfs = BreadthFirstSearch(init, goal)
path = bfs.search()
bfs_time = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
start_bd_bfs_time = time.time()
bd_bfs = BidirectionalBreadthFirstSearch(init, goal)
bd_path = bd_bfs.search()
bd_bfs_time = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time) | --- +++ @@ -1,3 +1,6 @@+"""
+https://en.wikipedia.org/wiki/Bidirectional_search
+"""
from __future__ import annotations
@@ -31,6 +34,23 @@
class BreadthFirstSearch:
+ """
+ # Comment out slow pytests...
+ # 9.15s call graphs/bidirectional_breadth_first_search.py:: \
+ # graphs.bidirectional_breadth_first_search.BreadthFirstSearch
+ # >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ # >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1])
+ (0, 1)
+ # >>> [x.pos for x in bfs.get_successors(bfs.start)]
+ [(1, 0), (0, 1)]
+ # >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1])
+ (1, 0)
+ # >>> bfs.retrace_path(bfs.start)
+ [(0, 0)]
+ # >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1),
+ (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
+ """
def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
self.start = Node(start[1], start[0], goal[1], goal[0], None)
@@ -57,6 +77,9 @@ return None
def get_successors(self, parent: Node) -> list[Node]:
+ """
+ Returns a list of successors (both in the grid and free spaces)
+ """
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
@@ -73,6 +96,9 @@ return successors
def retrace_path(self, node: Node | None) -> Path:
+ """
+ Retrace the path from parents to parents until start node
+ """
current_node = node
path = []
while current_node is not None:
@@ -83,6 +109,18 @@
class BidirectionalBreadthFirstSearch:
+ """
+ >>> bd_bfs = BidirectionalBreadthFirstSearch((0, 0), (len(grid) - 1,
+ ... len(grid[0]) - 1))
+ >>> bd_bfs.fwd_bfs.start.pos == bd_bfs.bwd_bfs.target.pos
+ True
+ >>> bd_bfs.retrace_bidirectional_path(bd_bfs.fwd_bfs.start,
+ ... bd_bfs.bwd_bfs.start)
+ [(0, 0)]
+ >>> bd_bfs.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 3),
+ (2, 4), (3, 4), (3, 5), (3, 6), (4, 6), (5, 6), (6, 6)]
+ """
def __init__(self, start, goal):
self.fwd_bfs = BreadthFirstSearch(start, goal)
@@ -147,4 +185,4 @@ bd_path = bd_bfs.search()
bd_bfs_time = time.time() - start_bd_bfs_time
- print("Bidirectional BFS computation time : ", bd_bfs_time)+ print("Bidirectional BFS computation time : ", bd_bfs_time)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/bidirectional_breadth_first_search.py |
Fill in missing docstrings in my code | # Title: Dijkstra's Algorithm for finding single source shortest path from scratch
# Author: Shubham Malik
# References: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
import math
import sys
# For storing the vertex set to retrieve node with the lowest distance
class PriorityQueue:
# Based on Min Heap
def __init__(self):
self.cur_size = 0
self.array = []
self.pos = {} # To store the pos of node in array
def is_empty(self):
return self.cur_size == 0
def min_heapify(self, idx):
lc = self.left(idx)
rc = self.right(idx)
if lc < self.cur_size and self.array[lc][0] < self.array[idx][0]:
smallest = lc
else:
smallest = idx
if rc < self.cur_size and self.array[rc][0] < self.array[smallest][0]:
smallest = rc
if smallest != idx:
self.swap(idx, smallest)
self.min_heapify(smallest)
def insert(self, tup):
self.pos[tup[1]] = self.cur_size
self.cur_size += 1
self.array.append((sys.maxsize, tup[1]))
self.decrease_key((sys.maxsize, tup[1]), tup[0])
def extract_min(self):
min_node = self.array[0][1]
self.array[0] = self.array[self.cur_size - 1]
self.cur_size -= 1
self.min_heapify(0)
del self.pos[min_node]
return min_node
def left(self, i):
return 2 * i + 1
def right(self, i):
return 2 * i + 2
def par(self, i):
return math.floor(i / 2)
def swap(self, i, j):
self.pos[self.array[i][1]] = j
self.pos[self.array[j][1]] = i
temp = self.array[i]
self.array[i] = self.array[j]
self.array[j] = temp
def decrease_key(self, tup, new_d):
idx = self.pos[tup[1]]
# assuming the new_d is at most old_d
self.array[idx] = (new_d, tup[1])
while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]:
self.swap(idx, self.par(idx))
idx = self.par(idx)
class Graph:
def __init__(self, num):
self.adjList = {} # To store graph: u -> (v,w)
self.num_nodes = num # Number of nodes in graph
# To store the distance from source vertex
self.dist = [0] * self.num_nodes
self.par = [-1] * self.num_nodes # To store the path
def add_edge(self, u, v, w):
# Check if u already in graph
if u in self.adjList:
self.adjList[u].append((v, w))
else:
self.adjList[u] = [(v, w)]
# Assuming undirected graph
if v in self.adjList:
self.adjList[v].append((u, w))
else:
self.adjList[v] = [(u, w)]
def show_graph(self):
for u in self.adjList:
print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u]))
def dijkstra(self, src):
# Flush old junk values in par[]
self.par = [-1] * self.num_nodes
# src is the source node
self.dist[src] = 0
q = PriorityQueue()
q.insert((0, src)) # (dist from src, node)
for u in self.adjList:
if u != src:
self.dist[u] = sys.maxsize # Infinity
self.par[u] = -1
while not q.is_empty():
u = q.extract_min() # Returns node with the min dist from source
# Update the distance of all the neighbours of u and
# if their prev dist was INFINITY then push them in Q
for v, w in self.adjList[u]:
new_dist = self.dist[u] + w
if self.dist[v] > new_dist:
if self.dist[v] == sys.maxsize:
q.insert((new_dist, v))
else:
q.decrease_key((self.dist[v], v), new_dist)
self.dist[v] = new_dist
self.par[v] = u
# Show the shortest distances from src
self.show_distances(src)
def show_distances(self, src):
print(f"Distance from node: {src}")
for u in range(self.num_nodes):
print(f"Node {u} has distance: {self.dist[u]}")
def show_path(self, src, dest):
path = []
cost = 0
temp = dest
# Backtracking from dest to src
while self.par[temp] != -1:
path.append(temp)
if temp != src:
for v, w in self.adjList[temp]:
if v == self.par[temp]:
cost += w
break
temp = self.par[temp]
path.append(src)
path.reverse()
print(f"----Path to reach {dest} from {src}----")
for u in path:
print(f"{u}", end=" ")
if u != dest:
print("-> ", end="")
print("\nTotal cost of path: ", cost)
if __name__ == "__main__":
from doctest import testmod
testmod()
graph = Graph(9)
graph.add_edge(0, 1, 4)
graph.add_edge(0, 7, 8)
graph.add_edge(1, 2, 8)
graph.add_edge(1, 7, 11)
graph.add_edge(2, 3, 7)
graph.add_edge(2, 8, 2)
graph.add_edge(2, 5, 4)
graph.add_edge(3, 4, 9)
graph.add_edge(3, 5, 14)
graph.add_edge(4, 5, 10)
graph.add_edge(5, 6, 2)
graph.add_edge(6, 7, 1)
graph.add_edge(6, 8, 6)
graph.add_edge(7, 8, 7)
graph.show_graph()
graph.dijkstra(0)
graph.show_path(0, 4)
# OUTPUT
# 0 -> 1(4) -> 7(8)
# 1 -> 0(4) -> 2(8) -> 7(11)
# 7 -> 0(8) -> 1(11) -> 6(1) -> 8(7)
# 2 -> 1(8) -> 3(7) -> 8(2) -> 5(4)
# 3 -> 2(7) -> 4(9) -> 5(14)
# 8 -> 2(2) -> 6(6) -> 7(7)
# 5 -> 2(4) -> 3(14) -> 4(10) -> 6(2)
# 4 -> 3(9) -> 5(10)
# 6 -> 5(2) -> 7(1) -> 8(6)
# Distance from node: 0
# Node 0 has distance: 0
# Node 1 has distance: 4
# Node 2 has distance: 12
# Node 3 has distance: 19
# Node 4 has distance: 21
# Node 5 has distance: 11
# Node 6 has distance: 9
# Node 7 has distance: 8
# Node 8 has distance: 14
# ----Path to reach 4 from 0----
# 0 -> 7 -> 6 -> 5 -> 4
# Total cost of path: 21 | --- +++ @@ -11,14 +11,67 @@ class PriorityQueue:
# Based on Min Heap
def __init__(self):
+ """
+ Priority queue class constructor method.
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.cur_size
+ 0
+ >>> priority_queue_test.array
+ []
+ >>> priority_queue_test.pos
+ {}
+ """
self.cur_size = 0
self.array = []
self.pos = {} # To store the pos of node in array
def is_empty(self):
+ """
+ Conditional boolean method to determine if the priority queue is empty or not.
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.is_empty()
+ True
+ >>> priority_queue_test.insert((2, 'A'))
+ >>> priority_queue_test.is_empty()
+ False
+ """
return self.cur_size == 0
def min_heapify(self, idx):
+ """
+ Sorts the queue array so that the minimum element is root.
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.cur_size = 3
+ >>> priority_queue_test.pos = {'A': 0, 'B': 1, 'C': 2}
+
+ >>> priority_queue_test.array = [(5, 'A'), (10, 'B'), (15, 'C')]
+ >>> priority_queue_test.min_heapify(0)
+ >>> priority_queue_test.array
+ [(5, 'A'), (10, 'B'), (15, 'C')]
+
+ >>> priority_queue_test.array = [(10, 'A'), (5, 'B'), (15, 'C')]
+ >>> priority_queue_test.min_heapify(0)
+ >>> priority_queue_test.array
+ [(5, 'B'), (10, 'A'), (15, 'C')]
+
+ >>> priority_queue_test.array = [(10, 'A'), (15, 'B'), (5, 'C')]
+ >>> priority_queue_test.min_heapify(0)
+ >>> priority_queue_test.array
+ [(5, 'C'), (15, 'B'), (10, 'A')]
+
+ >>> priority_queue_test.array = [(10, 'A'), (5, 'B')]
+ >>> priority_queue_test.cur_size = len(priority_queue_test.array)
+ >>> priority_queue_test.pos = {'A': 0, 'B': 1}
+ >>> priority_queue_test.min_heapify(0)
+ >>> priority_queue_test.array
+ [(5, 'B'), (10, 'A')]
+ """
lc = self.left(idx)
rc = self.right(idx)
if lc < self.cur_size and self.array[lc][0] < self.array[idx][0]:
@@ -32,12 +85,41 @@ self.min_heapify(smallest)
def insert(self, tup):
+ """
+ Inserts a node into the Priority Queue.
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.insert((10, 'A'))
+ >>> priority_queue_test.array
+ [(10, 'A')]
+ >>> priority_queue_test.insert((15, 'B'))
+ >>> priority_queue_test.array
+ [(10, 'A'), (15, 'B')]
+ >>> priority_queue_test.insert((5, 'C'))
+ >>> priority_queue_test.array
+ [(5, 'C'), (10, 'A'), (15, 'B')]
+ """
self.pos[tup[1]] = self.cur_size
self.cur_size += 1
self.array.append((sys.maxsize, tup[1]))
self.decrease_key((sys.maxsize, tup[1]), tup[0])
def extract_min(self):
+ """
+ Removes and returns the min element at top of priority queue.
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.array = [(10, 'A'), (15, 'B')]
+ >>> priority_queue_test.cur_size = len(priority_queue_test.array)
+ >>> priority_queue_test.pos = {'A': 0, 'B': 1}
+ >>> priority_queue_test.insert((5, 'C'))
+ >>> priority_queue_test.extract_min()
+ 'C'
+ >>> priority_queue_test.array[0]
+ (10, 'A')
+ """
min_node = self.array[0][1]
self.array[0] = self.array[self.cur_size - 1]
self.cur_size -= 1
@@ -46,15 +128,61 @@ return min_node
def left(self, i):
+ """
+ Returns the index of left child
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.left(0)
+ 1
+ >>> priority_queue_test.left(1)
+ 3
+ """
return 2 * i + 1
def right(self, i):
+ """
+ Returns the index of right child
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.right(0)
+ 2
+ >>> priority_queue_test.right(1)
+ 4
+ """
return 2 * i + 2
def par(self, i):
+ """
+ Returns the index of parent
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.par(1)
+ 0
+ >>> priority_queue_test.par(2)
+ 1
+ >>> priority_queue_test.par(4)
+ 2
+ """
return math.floor(i / 2)
def swap(self, i, j):
+ """
+ Swaps array elements at indices i and j, update the pos{}
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.array = [(10, 'A'), (15, 'B')]
+ >>> priority_queue_test.cur_size = len(priority_queue_test.array)
+ >>> priority_queue_test.pos = {'A': 0, 'B': 1}
+ >>> priority_queue_test.swap(0, 1)
+ >>> priority_queue_test.array
+ [(15, 'B'), (10, 'A')]
+ >>> priority_queue_test.pos
+ {'A': 1, 'B': 0}
+ """
self.pos[self.array[i][1]] = j
self.pos[self.array[j][1]] = i
temp = self.array[i]
@@ -62,6 +190,18 @@ self.array[j] = temp
def decrease_key(self, tup, new_d):
+ """
+ Decrease the key value for a given tuple, assuming the new_d is at most old_d.
+
+ Examples:
+ >>> priority_queue_test = PriorityQueue()
+ >>> priority_queue_test.array = [(10, 'A'), (15, 'B')]
+ >>> priority_queue_test.cur_size = len(priority_queue_test.array)
+ >>> priority_queue_test.pos = {'A': 0, 'B': 1}
+ >>> priority_queue_test.decrease_key((10, 'A'), 5)
+ >>> priority_queue_test.array
+ [(5, 'A'), (15, 'B')]
+ """
idx = self.pos[tup[1]]
# assuming the new_d is at most old_d
self.array[idx] = (new_d, tup[1])
@@ -72,6 +212,20 @@
class Graph:
def __init__(self, num):
+ """
+ Graph class constructor
+
+ Examples:
+ >>> graph_test = Graph(1)
+ >>> graph_test.num_nodes
+ 1
+ >>> graph_test.dist
+ [0]
+ >>> graph_test.par
+ [-1]
+ >>> graph_test.adjList
+ {}
+ """
self.adjList = {} # To store graph: u -> (v,w)
self.num_nodes = num # Number of nodes in graph
# To store the distance from source vertex
@@ -79,6 +233,16 @@ self.par = [-1] * self.num_nodes # To store the path
def add_edge(self, u, v, w):
+ """
+ Add edge going from node u to v and v to u with weight w: u (w)-> v, v (w) -> u
+
+ Examples:
+ >>> graph_test = Graph(1)
+ >>> graph_test.add_edge(1, 2, 1)
+ >>> graph_test.add_edge(2, 3, 2)
+ >>> graph_test.adjList
+ {1: [(2, 1)], 2: [(1, 1), (3, 2)], 3: [(2, 2)]}
+ """
# Check if u already in graph
if u in self.adjList:
self.adjList[u].append((v, w))
@@ -92,10 +256,99 @@ self.adjList[v] = [(u, w)]
def show_graph(self):
+ """
+ Show the graph: u -> v(w)
+
+ Examples:
+ >>> graph_test = Graph(1)
+ >>> graph_test.add_edge(1, 2, 1)
+ >>> graph_test.show_graph()
+ 1 -> 2(1)
+ 2 -> 1(1)
+ >>> graph_test.add_edge(2, 3, 2)
+ >>> graph_test.show_graph()
+ 1 -> 2(1)
+ 2 -> 1(1) -> 3(2)
+ 3 -> 2(2)
+ """
for u in self.adjList:
print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u]))
def dijkstra(self, src):
+ """
+ Dijkstra algorithm
+
+ Examples:
+ >>> graph_test = Graph(3)
+ >>> graph_test.add_edge(0, 1, 2)
+ >>> graph_test.add_edge(1, 2, 2)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 2
+ Node 2 has distance: 4
+ >>> graph_test.dist
+ [0, 2, 4]
+
+ >>> graph_test = Graph(2)
+ >>> graph_test.add_edge(0, 1, 2)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 2
+ >>> graph_test.dist
+ [0, 2]
+
+ >>> graph_test = Graph(3)
+ >>> graph_test.add_edge(0, 1, 2)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 2
+ Node 2 has distance: 0
+ >>> graph_test.dist
+ [0, 2, 0]
+
+ >>> graph_test = Graph(3)
+ >>> graph_test.add_edge(0, 1, 2)
+ >>> graph_test.add_edge(1, 2, 2)
+ >>> graph_test.add_edge(0, 2, 1)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 2
+ Node 2 has distance: 1
+ >>> graph_test.dist
+ [0, 2, 1]
+
+ >>> graph_test = Graph(4)
+ >>> graph_test.add_edge(0, 1, 4)
+ >>> graph_test.add_edge(1, 2, 2)
+ >>> graph_test.add_edge(2, 3, 1)
+ >>> graph_test.add_edge(0, 2, 3)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 4
+ Node 2 has distance: 3
+ Node 3 has distance: 4
+ >>> graph_test.dist
+ [0, 4, 3, 4]
+
+ >>> graph_test = Graph(4)
+ >>> graph_test.add_edge(0, 1, 4)
+ >>> graph_test.add_edge(1, 2, 2)
+ >>> graph_test.add_edge(2, 3, 1)
+ >>> graph_test.add_edge(0, 2, 7)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 4
+ Node 2 has distance: 6
+ Node 3 has distance: 7
+ >>> graph_test.dist
+ [0, 4, 6, 7]
+ """
# Flush old junk values in par[]
self.par = [-1] * self.num_nodes
# src is the source node
@@ -125,11 +378,40 @@ self.show_distances(src)
def show_distances(self, src):
+ """
+ Show the distances from src to all other nodes in a graph
+
+ Examples:
+ >>> graph_test = Graph(1)
+ >>> graph_test.show_distances(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ """
print(f"Distance from node: {src}")
for u in range(self.num_nodes):
print(f"Node {u} has distance: {self.dist[u]}")
def show_path(self, src, dest):
+ """
+ Shows the shortest path from src to dest.
+ WARNING: Use it *after* calling dijkstra.
+
+ Examples:
+ >>> graph_test = Graph(4)
+ >>> graph_test.add_edge(0, 1, 1)
+ >>> graph_test.add_edge(1, 2, 2)
+ >>> graph_test.add_edge(2, 3, 3)
+ >>> graph_test.dijkstra(0)
+ Distance from node: 0
+ Node 0 has distance: 0
+ Node 1 has distance: 1
+ Node 2 has distance: 3
+ Node 3 has distance: 6
+ >>> graph_test.show_path(0, 3) # doctest: +NORMALIZE_WHITESPACE
+ ----Path to reach 3 from 0----
+ 0 -> 1 -> 2 -> 3
+ Total cost of path: 6
+ """
path = []
cost = 0
temp = dest
@@ -199,4 +481,4 @@ # Node 8 has distance: 14
# ----Path to reach 4 from 0----
# 0 -> 7 -> 6 -> 5 -> 4
-# Total cost of path: 21+# Total cost of path: 21
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/dijkstra_algorithm.py |
Create docstrings for each class method |
from __future__ import annotations
Path = list[tuple[int, int]]
# 0's are free path whereas 1's are obstacles
TEST_GRIDS = [
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
],
[
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
],
[
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 0],
],
]
delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class Node:
def __init__(
self,
pos_x: int,
pos_y: int,
goal_x: int,
goal_y: int,
g_cost: float,
parent: Node | None,
):
self.pos_x = pos_x
self.pos_y = pos_y
self.pos = (pos_y, pos_x)
self.goal_x = goal_x
self.goal_y = goal_y
self.g_cost = g_cost
self.parent = parent
self.f_cost = self.calculate_heuristic()
def calculate_heuristic(self) -> float:
dx = abs(self.pos_x - self.goal_x)
dy = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__(self, other) -> bool:
return self.f_cost < other.f_cost
def __eq__(self, other) -> bool:
return self.pos == other.pos
class GreedyBestFirst:
def __init__(
self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int]
):
self.grid = grid
self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
self.open_nodes = [self.start]
self.closed_nodes: list[Node] = []
self.reached = False
def search(self) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
current_node = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
self.reached = True
return self.retrace_path(current_node)
self.closed_nodes.append(current_node)
successors = self.get_successors(current_node)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(child_node)
if not self.reached:
return [self.start.pos]
return None
def get_successors(self, parent: Node) -> list[Node]:
return [
Node(
pos_x,
pos_y,
self.target.pos_x,
self.target.pos_y,
parent.g_cost + 1,
parent,
)
for action in delta
if (
0 <= (pos_x := parent.pos_x + action[1]) < len(self.grid[0])
and 0 <= (pos_y := parent.pos_y + action[0]) < len(self.grid)
and self.grid[pos_y][pos_x] == 0
)
]
def retrace_path(self, node: Node | None) -> Path:
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
for idx, grid in enumerate(TEST_GRIDS):
print(f"==grid-{idx + 1}==")
init = (0, 0)
goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
greedy_bf = GreedyBestFirst(grid, init, goal)
path = greedy_bf.search()
if path:
for pos_x, pos_y in path:
grid[pos_x][pos_y] = 2
for elem in grid:
print(elem) | --- +++ @@ -1,3 +1,6 @@+"""
+https://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS
+"""
from __future__ import annotations
@@ -35,6 +38,20 @@
class Node:
+ """
+ >>> k = Node(0, 0, 4, 5, 0, None)
+ >>> k.calculate_heuristic()
+ 9
+ >>> n = Node(1, 4, 3, 4, 2, None)
+ >>> n.calculate_heuristic()
+ 2
+ >>> l = [k, n]
+ >>> n == l[0]
+ False
+ >>> l.sort()
+ >>> n == l[0]
+ True
+ """
def __init__(
self,
@@ -55,6 +72,10 @@ self.f_cost = self.calculate_heuristic()
def calculate_heuristic(self) -> float:
+ """
+ The heuristic here is the Manhattan Distance
+ Could elaborate to offer more than one choice
+ """
dx = abs(self.pos_x - self.goal_x)
dy = abs(self.pos_y - self.goal_y)
return dx + dy
@@ -67,6 +88,21 @@
class GreedyBestFirst:
+ """
+ >>> grid = TEST_GRIDS[2]
+ >>> gbf = GreedyBestFirst(grid, (0, 0), (len(grid) - 1, len(grid[0]) - 1))
+ >>> [x.pos for x in gbf.get_successors(gbf.start)]
+ [(1, 0), (0, 1)]
+ >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])
+ (0, 1)
+ >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])
+ (1, 0)
+ >>> gbf.retrace_path(gbf.start)
+ [(0, 0)]
+ >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE
+ [(0, 0), (1, 0), (2, 0), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3),
+ (4, 4)]
+ """
def __init__(
self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int]
@@ -81,6 +117,10 @@ self.reached = False
def search(self) -> Path | None:
+ """
+ Search for the path,
+ if a path is not found, only the starting position is returned
+ """
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
@@ -105,6 +145,9 @@ return None
def get_successors(self, parent: Node) -> list[Node]:
+ """
+ Returns a list of successors (both in the grid and free spaces)
+ """
return [
Node(
pos_x,
@@ -123,6 +166,9 @@ ]
def retrace_path(self, node: Node | None) -> Path:
+ """
+ Retrace the path from parents to parents until start node
+ """
current_node = node
path = []
while current_node is not None:
@@ -150,4 +196,4 @@ grid[pos_x][pos_y] = 2
for elem in grid:
- print(elem)+ print(elem)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/greedy_best_first.py |
Turn comments into proper docstrings | # https://en.wikipedia.org/wiki/B%C3%A9zier_curve
# https://www.tutorialspoint.com/computer_graphics/computer_graphics_curves.htm
from __future__ import annotations
from scipy.special import comb
class BezierCurve:
def __init__(self, list_of_points: list[tuple[float, float]]):
self.list_of_points = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
self.degree = len(list_of_points) - 1
def basis_function(self, t: float) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
output_values: list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t**i)
)
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(output_values), 5) == 1
return output_values
def bezier_curve_function(self, t: float) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
basis_function = self.basis_function(t)
x = 0.0
y = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def plot_curve(self, step_size: float = 0.01):
from matplotlib import pyplot as plt
to_plot_x: list[float] = [] # x coordinates of points to plot
to_plot_y: list[float] = [] # y coordinates of points to plot
t = 0.0
while t <= 1:
value = self.bezier_curve_function(t)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
x = [i[0] for i in self.list_of_points]
y = [i[1] for i in self.list_of_points]
plt.plot(
to_plot_x,
to_plot_y,
color="blue",
label="Curve of Degree " + str(self.degree),
)
plt.scatter(x, y, color="red", label="Control Points")
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | --- +++ @@ -6,14 +6,35 @@
class BezierCurve:
+ """
+ Bezier curve is a weighted sum of a set of control points.
+ Generate Bezier curves from a given set of control points.
+ This implementation works only for 2d coordinates in the xy plane.
+ """
def __init__(self, list_of_points: list[tuple[float, float]]):
+ """
+ list_of_points: Control points in the xy plane on which to interpolate. These
+ points control the behavior (shape) of the Bezier curve.
+ """
self.list_of_points = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
self.degree = len(list_of_points) - 1
def basis_function(self, t: float) -> list[float]:
+ """
+ The basis function determines the weight of each control point at time t.
+ t: time value between 0 and 1 inclusive at which to evaluate the basis of
+ the curve.
+ returns the x, y values of basis function at time t
+
+ >>> curve = BezierCurve([(1,1), (1,2)])
+ >>> [float(x) for x in curve.basis_function(0)]
+ [1.0, 0.0]
+ >>> [float(x) for x in curve.basis_function(1)]
+ [0.0, 1.0]
+ """
assert 0 <= t <= 1, "Time t must be between 0 and 1."
output_values: list[float] = []
for i in range(len(self.list_of_points)):
@@ -26,6 +47,19 @@ return output_values
def bezier_curve_function(self, t: float) -> tuple[float, float]:
+ """
+ The function to produce the values of the Bezier curve at time t.
+ t: the value of time t at which to evaluate the Bezier function
+ Returns the x, y coordinates of the Bezier curve at time t.
+ The first point in the curve is when t = 0.
+ The last point in the curve is when t = 1.
+
+ >>> curve = BezierCurve([(1,1), (1,2)])
+ >>> tuple(float(x) for x in curve.bezier_curve_function(0))
+ (1.0, 1.0)
+ >>> tuple(float(x) for x in curve.bezier_curve_function(1))
+ (1.0, 2.0)
+ """
assert 0 <= t <= 1, "Time t must be between 0 and 1."
@@ -39,6 +73,11 @@ return (x, y)
def plot_curve(self, step_size: float = 0.01):
+ """
+ Plots the Bezier curve using matplotlib plotting capabilities.
+ step_size: defines the step(s) at which to evaluate the Bezier curve.
+ The smaller the step size, the finer the curve produced.
+ """
from matplotlib import pyplot as plt
to_plot_x: list[float] = [] # x coordinates of points to plot
@@ -72,4 +111,4 @@
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
- BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3+ BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphics/bezier_curve.py |
Write proper docstrings for these functions |
import heapq
def greedy_min_vertex_cover(graph: dict) -> set[int]:
# queue used to store nodes and their rank
queue: list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(queue, [-1 * len(value), (key, value)])
# chosen_vertices = set of chosen vertices
chosen_vertices = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
argmax = heapq.heappop(queue)[1][0]
chosen_vertices.add(argmax)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
index = elem[1][1].index(argmax)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(queue)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") | --- +++ @@ -1,8 +1,24 @@+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Approximization algorithm for minimum vertex cover problem.
+ Greedy Approach. Uses graphs represented with an adjacency list
+URL: https://mathworld.wolfram.com/MinimumVertexCover.html
+URL: https://cs.stackexchange.com/questions/129017/greedy-algorithm-for-vertex-cover
+"""
import heapq
def greedy_min_vertex_cover(graph: dict) -> set[int]:
+ """
+ Greedy APX Algorithm for min Vertex Cover
+ @input: graph (graph stored in an adjacency list where each vertex
+ is represented with an integer)
+ @example:
+ >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
+ >>> greedy_min_vertex_cover(graph)
+ {0, 1, 2, 4}
+ """
# queue used to store nodes and their rank
queue: list[list] = []
@@ -45,4 +61,4 @@ doctest.testmod()
graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
- print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")+ print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/greedy_min_vertex_cover.py |
Add verbose docstrings with examples | #!/usr/bin/env python3
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import TypeVar
T = TypeVar("T")
class GraphAdjacencyList[T]:
def __init__(self, directed: bool = True) -> None:
self.adj_list: dict[T, list[T]] = {} # dictionary of lists
self.directed = directed
def add_edge(
self, source_vertex: T, destination_vertex: T
) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
self.adj_list[destination_vertex].append(source_vertex)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
self.adj_list[destination_vertex] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(source_vertex)
self.adj_list[source_vertex] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
self.adj_list[source_vertex] = [destination_vertex]
self.adj_list[destination_vertex] = [source_vertex]
# For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
elif source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(destination_vertex)
self.adj_list[destination_vertex] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
self.adj_list[source_vertex] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
self.adj_list[source_vertex] = [destination_vertex]
self.adj_list[destination_vertex] = []
return self
def __repr__(self) -> str:
return pformat(self.adj_list) | --- +++ @@ -12,8 +12,71 @@
class GraphAdjacencyList[T]:
+ """
+ Adjacency List type Graph Data Structure that accounts for directed and undirected
+ Graphs. Initialize graph object indicating whether it's directed or undirected.
+
+ Directed graph example:
+ >>> d_graph = GraphAdjacencyList()
+ >>> print(d_graph)
+ {}
+ >>> d_graph.add_edge(0, 1)
+ {0: [1], 1: []}
+ >>> d_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5)
+ {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []}
+ >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7)
+ {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
+ >>> d_graph
+ {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
+ >>> print(repr(d_graph))
+ {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []}
+
+ Undirected graph example:
+ >>> u_graph = GraphAdjacencyList(directed=False)
+ >>> u_graph.add_edge(0, 1)
+ {0: [1], 1: [0]}
+ >>> u_graph.add_edge(1, 2).add_edge(1, 4).add_edge(1, 5)
+ {0: [1], 1: [0, 2, 4, 5], 2: [1], 4: [1], 5: [1]}
+ >>> u_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7)
+ {0: [1, 2], 1: [0, 2, 4, 5], 2: [1, 0, 6, 7], 4: [1], 5: [1], 6: [2], 7: [2]}
+ >>> u_graph.add_edge(4, 5)
+ {0: [1, 2],
+ 1: [0, 2, 4, 5],
+ 2: [1, 0, 6, 7],
+ 4: [1, 5],
+ 5: [1, 4],
+ 6: [2],
+ 7: [2]}
+ >>> print(u_graph)
+ {0: [1, 2],
+ 1: [0, 2, 4, 5],
+ 2: [1, 0, 6, 7],
+ 4: [1, 5],
+ 5: [1, 4],
+ 6: [2],
+ 7: [2]}
+ >>> print(repr(u_graph))
+ {0: [1, 2],
+ 1: [0, 2, 4, 5],
+ 2: [1, 0, 6, 7],
+ 4: [1, 5],
+ 5: [1, 4],
+ 6: [2],
+ 7: [2]}
+ >>> char_graph = GraphAdjacencyList(directed=False)
+ >>> char_graph.add_edge('a', 'b')
+ {'a': ['b'], 'b': ['a']}
+ >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f')
+ {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
+ >>> char_graph
+ {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']}
+ """
def __init__(self, directed: bool = True) -> None:
+ """
+ Parameters:
+ directed: (bool) Indicates if graph is directed or undirected. Default is True.
+ """
self.adj_list: dict[T, list[T]] = {} # dictionary of lists
self.directed = directed
@@ -21,6 +84,11 @@ def add_edge(
self, source_vertex: T, destination_vertex: T
) -> GraphAdjacencyList[T]:
+ """
+ Connects vertices together. Creates and Edge from source vertex to destination
+ vertex.
+ Vertices will be created if not found in graph
+ """
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
@@ -79,4 +147,4 @@ return self
def __repr__(self) -> str:
- return pformat(self.adj_list)+ return pformat(self.adj_list)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/graph_list.py |
Generate docstrings for exported functions |
def matching_min_vertex_cover(graph: dict) -> set:
# chosen_vertices = set of chosen vertices
chosen_vertices = set()
# edges = list of graph's edges
edges = get_edges(graph)
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
from_node, to_node = edges.pop()
chosen_vertices.add(from_node)
chosen_vertices.add(to_node)
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(edge)
return chosen_vertices
def get_edges(graph: dict) -> set:
edges = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node))
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | --- +++ @@ -1,6 +1,23 @@+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Approximization algorithm for minimum vertex cover problem.
+ Matching Approach. Uses graphs represented with an adjacency list
+
+URL: https://mathworld.wolfram.com/MinimumVertexCover.html
+URL: https://www.princeton.edu/~aaa/Public/Teaching/ORF523/ORF523_Lec6.pdf
+"""
def matching_min_vertex_cover(graph: dict) -> set:
+ """
+ APX Algorithm for min Vertex Cover using Matching Approach
+ @input: graph (graph stored in an adjacency list where each vertex
+ is represented as an integer)
+ @example:
+ >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
+ >>> matching_min_vertex_cover(graph)
+ {0, 1, 2, 4}
+ """
# chosen_vertices = set of chosen vertices
chosen_vertices = set()
# edges = list of graph's edges
@@ -20,6 +37,15 @@
def get_edges(graph: dict) -> set:
+ """
+ Return a set of couples that represents all of the edges.
+ @input: graph (graph stored in an adjacency list where each vertex is
+ represented as an integer)
+ @example:
+ >>> graph = {0: [1, 3], 1: [0, 3], 2: [0, 3], 3: [0, 1, 2]}
+ >>> get_edges(graph)
+ {(0, 1), (3, 1), (0, 3), (2, 0), (3, 0), (2, 3), (1, 0), (3, 2), (1, 3)}
+ """
edges = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
@@ -33,4 +59,4 @@ doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
- # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")+ # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/matching_min_vertex_cover.py |
Add inline docstrings for readability |
from __future__ import annotations
import random
# Adjacency list representation of this graph:
# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
TEST_GRAPH = {
"1": ["2", "3", "4", "5"],
"2": ["1", "3", "4", "5"],
"3": ["1", "2", "4", "5", "10"],
"4": ["1", "2", "3", "5", "6"],
"5": ["1", "2", "3", "4", "7"],
"6": ["7", "8", "9", "10", "4"],
"7": ["6", "8", "9", "10", "5"],
"8": ["6", "7", "9", "10"],
"9": ["6", "7", "8", "10"],
"10": ["6", "7", "8", "9", "3"],
}
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
# Dict that maps contracted nodes to a list of all the nodes it "contains."
contracted_nodes = {node: {node} for node in graph}
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
# Choose a random edge.
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u])
# Contract edge (u, v) to new node uv
uv = u + v
uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
uv_neighbors.remove(u)
uv_neighbors.remove(v)
graph_copy[uv] = uv_neighbors
for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv)
contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
# Remove nodes u and v.
del graph_copy[u]
del graph_copy[v]
for neighbor in uv_neighbors:
if u in graph_copy[neighbor]:
graph_copy[neighbor].remove(u)
if v in graph_copy[neighbor]:
graph_copy[neighbor].remove(v)
# Find cutset.
groups = [contracted_nodes[node] for node in graph_copy]
return {
(node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__":
print(partition_graph(TEST_GRAPH)) | --- +++ @@ -1,3 +1,6 @@+"""
+An implementation of Karger's Algorithm for partitioning a graph.
+"""
from __future__ import annotations
@@ -20,6 +23,24 @@
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
+ """
+ Partitions a graph using Karger's Algorithm. Implemented from
+ pseudocode found here:
+ https://en.wikipedia.org/wiki/Karger%27s_algorithm.
+ This function involves random choices, meaning it will not give
+ consistent outputs.
+
+ Args:
+ graph: A dictionary containing adacency lists for the graph.
+ Nodes must be strings.
+
+ Returns:
+ The cutset of the cut found by Karger's Algorithm.
+
+ >>> graph = {'0':['1'], '1':['0']}
+ >>> partition_graph(graph)
+ {('0', '1')}
+ """
# Dict that maps contracted nodes to a list of all the nodes it "contains."
contracted_nodes = {node: {node} for node in graph}
@@ -61,4 +82,4 @@
if __name__ == "__main__":
- print(partition_graph(TEST_GRAPH))+ print(partition_graph(TEST_GRAPH))
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/karger.py |
Add structured docstrings to improve clarity |
from __future__ import annotations
from typing import Any
class Graph:
def __init__(self, num_of_nodes: int) -> None:
self.m_num_of_nodes = num_of_nodes
self.m_edges: list[list[int]] = []
self.m_component: dict[int, int] = {}
def add_edge(self, u_node: int, v_node: int, weight: int) -> None:
self.m_edges.append([u_node, v_node, weight])
def find_component(self, u_node: int) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def set_component(self, u_node: int) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
self.m_component[k] = self.find_component(k)
def union(self, component_size: list[int], u_node: int, v_node: int) -> None:
if component_size[u_node] <= component_size[v_node]:
self.m_component[u_node] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(u_node)
elif component_size[u_node] >= component_size[v_node]:
self.m_component[v_node] = self.find_component(u_node)
component_size[u_node] += component_size[v_node]
self.set_component(v_node)
def boruvka(self) -> None:
# Initialize additional lists required to algorithm.
component_size = []
mst_weight = 0
minimum_weight_edge: list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
num_of_components = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
u, v, w = edge
u_component = self.m_component[u]
v_component = self.m_component[v]
if u_component != v_component:
"""If the current minimum weight edge of component u doesn't
exist (is -1), or if it's greater than the edge we're
observing right now, we will assign the value of the edge
we're observing to it.
If the current minimum weight edge of component v doesn't
exist (is -1), or if it's greater than the edge we're
observing right now, we will assign the value of the edge
we're observing to it"""
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
minimum_weight_edge[component] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(edge, list):
u, v, w = edge
u_component = self.m_component[u]
v_component = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(component_size, u_component, v_component)
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n")
num_of_components -= 1
minimum_weight_edge = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}")
def test_vector() -> None:
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,29 @@+"""Borůvka's algorithm.
+
+Determines the minimum spanning tree (MST) of a graph using the Borůvka's algorithm.
+Borůvka's algorithm is a greedy algorithm for finding a minimum spanning tree in a
+connected graph, or a minimum spanning forest if a graph that is not connected.
+
+The time complexity of this algorithm is O(ELogV), where E represents the number
+of edges, while V represents the number of nodes.
+O(number_of_edges Log number_of_nodes)
+
+The space complexity of this algorithm is O(V + E), since we have to keep a couple
+of lists whose sizes are equal to the number of nodes, as well as keep all the
+edges of a graph inside of the data structure itself.
+
+Borůvka's algorithm gives us pretty much the same result as other MST Algorithms -
+they all find the minimum spanning tree, and the time complexity is approximately
+the same.
+
+One advantage that Borůvka's algorithm has compared to the alternatives is that it
+doesn't need to presort the edges or maintain a priority queue in order to find the
+minimum spanning tree.
+Even though that doesn't help its complexity, since it still passes the edges logE
+times, it is a bit simpler to code.
+
+Details: https://en.wikipedia.org/wiki/Bor%C5%AFvka%27s_algorithm
+"""
from __future__ import annotations
@@ -6,28 +32,43 @@
class Graph:
def __init__(self, num_of_nodes: int) -> None:
+ """
+ Arguments:
+ num_of_nodes - the number of nodes in the graph
+ Attributes:
+ m_num_of_nodes - the number of nodes in the graph.
+ m_edges - the list of edges.
+ m_component - the dictionary which stores the index of the component which
+ a node belongs to.
+ """
self.m_num_of_nodes = num_of_nodes
self.m_edges: list[list[int]] = []
self.m_component: dict[int, int] = {}
def add_edge(self, u_node: int, v_node: int, weight: int) -> None:
+ """Adds an edge in the format [first, second, edge weight] to graph."""
self.m_edges.append([u_node, v_node, weight])
def find_component(self, u_node: int) -> int:
+ """Propagates a new component throughout a given component."""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def set_component(self, u_node: int) -> None:
+ """Finds the component index of a given node"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
self.m_component[k] = self.find_component(k)
def union(self, component_size: list[int], u_node: int, v_node: int) -> None:
+ """Union finds the roots of components for two nodes, compares the components
+ in terms of size, and attaches the smaller one to the larger one to form
+ single component"""
if component_size[u_node] <= component_size[v_node]:
self.m_component[u_node] = v_node
@@ -40,6 +81,7 @@ self.set_component(v_node)
def boruvka(self) -> None:
+ """Performs Borůvka's algorithm to find MST."""
# Initialize additional lists required to algorithm.
component_size = []
@@ -97,9 +139,38 @@
def test_vector() -> None:
+ """
+ >>> g = Graph(8)
+ >>> for u_v_w in ((0, 1, 10), (0, 2, 6), (0, 3, 5), (1, 3, 15), (2, 3, 4),
+ ... (3, 4, 8), (4, 5, 10), (4, 6, 6), (4, 7, 5), (5, 7, 15), (6, 7, 4)):
+ ... g.add_edge(*u_v_w)
+ >>> g.boruvka()
+ Added edge [0 - 3]
+ Added weight: 5
+ <BLANKLINE>
+ Added edge [0 - 1]
+ Added weight: 10
+ <BLANKLINE>
+ Added edge [2 - 3]
+ Added weight: 4
+ <BLANKLINE>
+ Added edge [4 - 7]
+ Added weight: 5
+ <BLANKLINE>
+ Added edge [4 - 5]
+ Added weight: 10
+ <BLANKLINE>
+ Added edge [6 - 7]
+ Added weight: 4
+ <BLANKLINE>
+ Added edge [3 - 4]
+ Added weight: 8
+ <BLANKLINE>
+ The total weight of the minimal spanning tree is: 46
+ """
if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/boruvka.py |
Add detailed docstrings explaining each function | from collections import defaultdict, deque
def is_bipartite_dfs(graph: dict[int, list[int]]) -> bool:
def depth_first_search(node: int, color: int) -> bool:
if visited[node] == -1:
visited[node] = color
if node not in graph:
return True
for neighbor in graph[node]:
if not depth_first_search(neighbor, 1 - color):
return False
return visited[node] == color
visited: defaultdict[int, int] = defaultdict(lambda: -1)
for node in graph:
if visited[node] == -1 and not depth_first_search(node, 0):
return False
return True
def is_bipartite_bfs(graph: dict[int, list[int]]) -> bool:
visited: defaultdict[int, int] = defaultdict(lambda: -1)
for node in graph:
if visited[node] == -1:
queue: deque[int] = deque()
queue.append(node)
visited[node] = 0
while queue:
curr_node = queue.popleft()
if curr_node not in graph:
continue
for neighbor in graph[curr_node]:
if visited[neighbor] == -1:
visited[neighbor] = 1 - visited[curr_node]
queue.append(neighbor)
elif visited[neighbor] == visited[curr_node]:
return False
return True
if __name__ == "__main__":
import doctest
result = doctest.testmod()
if result.failed:
print(f"{result.failed} test(s) failed.")
else:
print("All tests passed!") | --- +++ @@ -2,8 +2,69 @@
def is_bipartite_dfs(graph: dict[int, list[int]]) -> bool:
+ """
+ Check if a graph is bipartite using depth-first search (DFS).
+
+ Args:
+ `graph`: Adjacency list representing the graph.
+
+ Returns:
+ ``True`` if bipartite, ``False`` otherwise.
+
+ Checks if the graph can be divided into two sets of vertices, such that no two
+ vertices within the same set are connected by an edge.
+
+ Examples:
+
+ >>> is_bipartite_dfs({0: [1, 2], 1: [0, 3], 2: [0, 4]})
+ True
+ >>> is_bipartite_dfs({0: [1, 2], 1: [0, 3], 2: [0, 1]})
+ False
+ >>> is_bipartite_dfs({})
+ True
+ >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})
+ True
+ >>> is_bipartite_dfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]})
+ False
+ >>> is_bipartite_dfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]})
+ True
+ >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]})
+ False
+ >>> is_bipartite_dfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]})
+ False
+
+ >>> # FIXME: This test should fails with KeyError: 4.
+ >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]})
+ False
+ >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]})
+ False
+ >>> is_bipartite_dfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]})
+ True
+ >>> is_bipartite_dfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})
+ True
+
+ >>> # FIXME: This test should fails with
+ >>> # TypeError: list indices must be integers or...
+ >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]})
+ True
+ >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]})
+ True
+ >>> is_bipartite_dfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]})
+ True
+ """
def depth_first_search(node: int, color: int) -> bool:
+ """
+ Perform Depth-First Search (DFS) on the graph starting from a node.
+
+ Args:
+ node: The current node being visited.
+ color: The color assigned to the current node.
+
+ Returns:
+ True if the graph is bipartite starting from the current node,
+ False otherwise.
+ """
if visited[node] == -1:
visited[node] = color
if node not in graph:
@@ -21,6 +82,56 @@
def is_bipartite_bfs(graph: dict[int, list[int]]) -> bool:
+ """
+ Check if a graph is bipartite using a breadth-first search (BFS).
+
+ Args:
+ `graph`: Adjacency list representing the graph.
+
+ Returns:
+ ``True`` if bipartite, ``False`` otherwise.
+
+ Check if the graph can be divided into two sets of vertices, such that no two
+ vertices within the same set are connected by an edge.
+
+ Examples:
+
+ >>> is_bipartite_bfs({0: [1, 2], 1: [0, 3], 2: [0, 4]})
+ True
+ >>> is_bipartite_bfs({0: [1, 2], 1: [0, 2], 2: [0, 1]})
+ False
+ >>> is_bipartite_bfs({})
+ True
+ >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})
+ True
+ >>> is_bipartite_bfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]})
+ False
+ >>> is_bipartite_bfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]})
+ True
+ >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]})
+ False
+ >>> is_bipartite_bfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]})
+ False
+
+ >>> # FIXME: This test should fails with KeyError: 4.
+ >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]})
+ False
+ >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]})
+ False
+ >>> is_bipartite_bfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]})
+ True
+ >>> is_bipartite_bfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})
+ True
+
+ >>> # FIXME: This test should fails with
+ >>> # TypeError: list indices must be integers or...
+ >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]})
+ True
+ >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]})
+ True
+ >>> is_bipartite_bfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]})
+ True
+ """
visited: defaultdict[int, int] = defaultdict(lambda: -1)
for node in graph:
if visited[node] == -1:
@@ -47,4 +158,4 @@ if result.failed:
print(f"{result.failed} test(s) failed.")
else:
- print("All tests passed!")+ print("All tests passed!")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/check_bipatrite.py |
Add docstrings to improve readability | def min_path_sum(grid: list) -> int:
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information")
for cell_n in range(1, len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
row_above = grid[0]
for row_n in range(1, len(grid)):
current_row = grid[row_n]
grid[row_n] = fill_row(current_row, row_above)
row_above = grid[row_n]
return grid[-1][-1]
def fill_row(current_row: list, row_above: list) -> list:
current_row[0] += row_above[0]
for cell_n in range(1, len(current_row)):
current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,4 +1,33 @@ def min_path_sum(grid: list) -> int:
+ """
+ Find the path from top left to bottom right of array of numbers
+ with the lowest possible sum and return the sum along this path.
+ >>> min_path_sum([
+ ... [1, 3, 1],
+ ... [1, 5, 1],
+ ... [4, 2, 1],
+ ... ])
+ 7
+
+ >>> min_path_sum([
+ ... [1, 0, 5, 6, 7],
+ ... [8, 9, 0, 4, 2],
+ ... [4, 4, 4, 5, 1],
+ ... [9, 6, 3, 1, 0],
+ ... [8, 4, 3, 2, 7],
+ ... ])
+ 20
+
+ >>> min_path_sum(None)
+ Traceback (most recent call last):
+ ...
+ TypeError: The grid does not contain the appropriate information
+
+ >>> min_path_sum([[]])
+ Traceback (most recent call last):
+ ...
+ TypeError: The grid does not contain the appropriate information
+ """
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information")
@@ -16,6 +45,10 @@
def fill_row(current_row: list, row_above: list) -> list:
+ """
+ >>> fill_row([2, 2, 2], [1, 2, 3])
+ [3, 4, 5]
+ """
current_row[0] += row_above[0]
for cell_n in range(1, len(current_row)):
@@ -27,4 +60,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/minimum_path_sum.py |
Document this code for team use |
# Author: Swayam Singh (https://github.com/practice404)
from queue import PriorityQueue
from typing import Any
import numpy as np
def pass_and_relaxation(
graph: dict,
v: str,
visited_forward: set,
visited_backward: set,
cst_fwd: dict,
cst_bwd: dict,
queue: PriorityQueue,
parent: dict,
shortest_distance: float,
) -> float:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
old_cost_f = cst_fwd.get(nxt, np.inf)
new_cost_f = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt))
cst_fwd[nxt] = new_cost_f
parent[nxt] = v
if (
nxt in visited_backward
and cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance
):
shortest_distance = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def bidirectional_dij(
source: str, destination: str, graph_forward: dict, graph_backward: dict
) -> int:
shortest_path_distance = -1
visited_forward = set()
visited_backward = set()
cst_fwd = {source: 0}
cst_bwd = {destination: 0}
parent_forward = {source: None}
parent_backward = {destination: None}
queue_forward: PriorityQueue[Any] = PriorityQueue()
queue_backward: PriorityQueue[Any] = PriorityQueue()
shortest_distance = np.inf
queue_forward.put((0, source))
queue_backward.put((0, destination))
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_, v_fwd = queue_forward.get()
visited_forward.add(v_fwd)
_, v_bwd = queue_backward.get()
visited_backward.add(v_bwd)
shortest_distance = pass_and_relaxation(
graph_forward,
v_fwd,
visited_forward,
visited_backward,
cst_fwd,
cst_bwd,
queue_forward,
parent_forward,
shortest_distance,
)
shortest_distance = pass_and_relaxation(
graph_backward,
v_bwd,
visited_backward,
visited_forward,
cst_bwd,
cst_fwd,
queue_backward,
parent_backward,
shortest_distance,
)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
shortest_path_distance = shortest_distance
return shortest_path_distance
graph_fwd = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
graph_bwd = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,12 @@+"""
+Bi-directional Dijkstra's algorithm.
+
+A bi-directional approach is an efficient and
+less time consuming optimization for Dijkstra's
+searching algorithm
+
+Reference: shorturl.at/exHM7
+"""
# Author: Swayam Singh (https://github.com/practice404)
@@ -38,6 +47,18 @@ def bidirectional_dij(
source: str, destination: str, graph_forward: dict, graph_backward: dict
) -> int:
+ """
+ Bi-directional Dijkstra's algorithm.
+
+ Returns:
+ shortest_path_distance (int): length of the shortest path.
+
+ Warnings:
+ If the destination is not reachable, function returns -1
+
+ >>> bidirectional_dij("E", "F", graph_fwd, graph_bwd)
+ 3
+ """
shortest_path_distance = -1
visited_forward = set()
@@ -116,4 +137,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/bi_directional_dijkstra.py |
Add well-formatted docstrings | from __future__ import annotations
from collections import Counter
from random import random
class MarkovChainGraphUndirectedUnweighted:
def __init__(self):
self.connections = {}
def add_node(self, node: str) -> None:
self.connections[node] = {}
def add_transition_probability(
self, node1: str, node2: str, probability: float
) -> None:
if node1 not in self.connections:
self.add_node(node1)
if node2 not in self.connections:
self.add_node(node2)
self.connections[node1][node2] = probability
def get_nodes(self) -> list[str]:
return list(self.connections)
def transition(self, node: str) -> str:
current_probability = 0
random_value = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def get_transitions(
start: str, transitions: list[tuple[str, str, float]], steps: int
) -> dict[str, int]:
graph = MarkovChainGraphUndirectedUnweighted()
for node1, node2, probability in transitions:
graph.add_transition_probability(node1, node2, probability)
visited = Counter(graph.get_nodes())
node = start
for _ in range(steps):
node = graph.transition(node)
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -5,6 +5,9 @@
class MarkovChainGraphUndirectedUnweighted:
+ """
+ Undirected Unweighted Graph for running Markov Chain Algorithm
+ """
def __init__(self):
self.connections = {}
@@ -38,6 +41,27 @@ def get_transitions(
start: str, transitions: list[tuple[str, str, float]], steps: int
) -> dict[str, int]:
+ """
+ Running Markov Chain algorithm and calculating the number of times each node is
+ visited
+
+ >>> transitions = [
+ ... ('a', 'a', 0.9),
+ ... ('a', 'b', 0.075),
+ ... ('a', 'c', 0.025),
+ ... ('b', 'a', 0.15),
+ ... ('b', 'b', 0.8),
+ ... ('b', 'c', 0.05),
+ ... ('c', 'a', 0.25),
+ ... ('c', 'b', 0.25),
+ ... ('c', 'c', 0.5)
+ ... ]
+
+ >>> result = get_transitions('a', transitions, 5000)
+
+ >>> result['a'] > result['b'] > result['c']
+ True
+ """
graph = MarkovChainGraphUndirectedUnweighted()
@@ -57,4 +81,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/markov_chain.py |
Add docstrings with type hints explained |
def max_profit(prices: list[int]) -> int:
if not prices:
return 0
min_price = prices[0]
max_profit: int = 0
for price in prices:
min_price = min(price, min_price)
max_profit = max(price - min_price, max_profit)
return max_profit
if __name__ == "__main__":
import doctest
doctest.testmod()
print(max_profit([7, 1, 5, 3, 6, 4])) | --- +++ @@ -1,6 +1,27 @@+"""
+Given a list of stock prices calculate the maximum profit that can be made from a
+single buy and sell of one share of stock. We only allowed to complete one buy
+transaction and one sell transaction but must buy before we sell.
+
+Example : prices = [7, 1, 5, 3, 6, 4]
+max_profit will return 5 - which is by buying at price 1 and selling at price 6.
+
+This problem can be solved using the concept of "GREEDY ALGORITHM".
+
+We iterate over the price array once, keeping track of the lowest price point
+(buy) and the maximum profit we can get at each point. The greedy choice at each point
+is to either buy at the current price if it's less than our current buying price, or
+sell at the current price if the profit is more than our current maximum profit.
+"""
def max_profit(prices: list[int]) -> int:
+ """
+ >>> max_profit([7, 1, 5, 3, 6, 4])
+ 5
+ >>> max_profit([7, 6, 4, 3, 1])
+ 0
+ """
if not prices:
return 0
@@ -18,4 +39,4 @@ import doctest
doctest.testmod()
- print(max_profit([7, 1, 5, 3, 6, 4]))+ print(max_profit([7, 1, 5, 3, 6, 4]))
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/best_time_to_buy_and_sell_stock.py |
Document this script properly |
import random
def random_graph(
vertices_number: int, probability: float, directed: bool = False
) -> dict:
graph: dict = {i: [] for i in range(vertices_number)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(vertices_number)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(vertices_number):
for j in range(i + 1, vertices_number):
if random.random() < probability:
graph[i].append(j)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(i)
return graph
def complete_graph(vertices_number: int) -> dict:
return {
i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,10 @@+"""
+* Author: Manuel Di Lullo (https://github.com/manueldilullo)
+* Description: Random graphs generator.
+ Uses graphs represented with an adjacency list.
+
+URL: https://en.wikipedia.org/wiki/Random_graph
+"""
import random
@@ -5,6 +12,20 @@ def random_graph(
vertices_number: int, probability: float, directed: bool = False
) -> dict:
+ """
+ Generate a random graph
+ @input: vertices_number (number of vertices),
+ probability (probability that a generic edge (u,v) exists),
+ directed (if True: graph will be a directed graph,
+ otherwise it will be an undirected graph)
+ @examples:
+ >>> random.seed(1)
+ >>> random_graph(4, 0.5)
+ {0: [1], 1: [0, 2, 3], 2: [1, 3], 3: [1, 2]}
+ >>> random.seed(1)
+ >>> random_graph(4, 0.5, True)
+ {0: [1], 1: [2, 3], 2: [3], 3: []}
+ """
graph: dict = {i: [] for i in range(vertices_number)}
# if probability is greater or equal than 1, then generate a complete graph
@@ -27,6 +48,14 @@
def complete_graph(vertices_number: int) -> dict:
+ """
+ Generate a complete graph with vertices_number vertices.
+ @input: vertices_number (number of vertices),
+ directed (False if the graph is undirected, True otherwise)
+ @example:
+ >>> complete_graph(3)
+ {0: [1, 2], 1: [0, 2], 2: [0, 1]}
+ """
return {
i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number)
}
@@ -35,4 +64,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/random_graph_generator.py |
Insert docstrings into my code |
def minimum_waiting_time(queries: list[int]) -> int:
n = len(queries)
if n in (0, 1):
return 0
return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries)))
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,6 +1,41 @@+"""
+Calculate the minimum waiting time using a greedy algorithm.
+reference: https://www.youtube.com/watch?v=Sf3eiO12eJs
+
+For doctests run following command:
+python -m doctest -v minimum_waiting_time.py
+
+The minimum_waiting_time function uses a greedy algorithm to calculate the minimum
+time for queries to complete. It sorts the list in non-decreasing order, calculates
+the waiting time for each query by multiplying its position in the list with the
+sum of all remaining query times, and returns the total waiting time. A doctest
+ensures that the function produces the correct output.
+"""
def minimum_waiting_time(queries: list[int]) -> int:
+ """
+ This function takes a list of query times and returns the minimum waiting time
+ for all queries to be completed.
+
+ Args:
+ queries: A list of queries measured in picoseconds
+
+ Returns:
+ total_waiting_time: Minimum waiting time measured in picoseconds
+
+ Examples:
+ >>> minimum_waiting_time([3, 2, 1, 2, 6])
+ 17
+ >>> minimum_waiting_time([3, 2, 1])
+ 4
+ >>> minimum_waiting_time([1, 2, 3, 4])
+ 10
+ >>> minimum_waiting_time([5, 5, 5, 5])
+ 30
+ >>> minimum_waiting_time([])
+ 0
+ """
n = len(queries)
if n in (0, 1):
return 0
@@ -10,4 +45,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/minimum_waiting_time.py |
Add docstrings that explain purpose and usage |
import heapq as hq
import math
from collections.abc import Iterator
class Vertex:
def __init__(self, id_):
self.id = str(id_)
self.key = None
self.pi = None
self.neighbors = []
self.edges = {} # {vertex:distance}
def __lt__(self, other):
return self.key < other.key
def __repr__(self):
return self.id
def add_neighbor(self, vertex):
self.neighbors.append(vertex)
def add_edge(self, vertex, weight):
self.edges[vertex.id] = weight
def connect(graph, a, b, edge):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1], edge)
graph[b - 1].add_edge(graph[a - 1], edge)
def prim(graph: list, root: Vertex) -> list:
a = []
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
q = graph[:]
while q:
u = min(q)
q.remove(u)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
for i in range(1, len(graph)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]:
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
h = list(graph)
hq.heapify(h)
while h:
u = hq.heappop(h)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
hq.heapify(h)
for i in range(1, len(graph)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def test_vector() -> None:
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,9 @@+"""Prim's Algorithm.
+
+Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm.
+
+Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm
+"""
import heapq as hq
import math
@@ -5,8 +11,16 @@
class Vertex:
+ """Class Vertex."""
def __init__(self, id_):
+ """
+ Arguments:
+ id - input an id to identify the vertex
+ Attributes:
+ neighbors - a list of the vertices it is linked to
+ edges - a dict to store the edges's weight
+ """
self.id = str(id_)
self.key = None
self.pi = None
@@ -14,15 +28,19 @@ self.edges = {} # {vertex:distance}
def __lt__(self, other):
+ """Comparison rule to < operator."""
return self.key < other.key
def __repr__(self):
+ """Return the vertex id."""
return self.id
def add_neighbor(self, vertex):
+ """Add a pointer to a vertex at neighbor's list."""
self.neighbors.append(vertex)
def add_edge(self, vertex, weight):
+ """Destination vertex and weight."""
self.edges[vertex.id] = weight
@@ -36,6 +54,17 @@
def prim(graph: list, root: Vertex) -> list:
+ """Prim's Algorithm.
+
+ Runtime:
+ O(mn) with `m` edges and `n` vertices
+
+ Return:
+ List with the edges of a Minimum Spanning Tree
+
+ Usage:
+ prim(graph, graph[0])
+ """
a = []
for u in graph:
u.key = math.inf
@@ -55,6 +84,17 @@
def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]:
+ """Prim's Algorithm with min heap.
+
+ Runtime:
+ O((m + n)log n) with `m` edges and `n` vertices
+
+ Yield:
+ Edges of a Minimum Spanning Tree
+
+ Usage:
+ prim(graph, graph[0])
+ """
for u in graph:
u.key = math.inf
u.pi = None
@@ -76,9 +116,37 @@
def test_vector() -> None:
+ """
+ # Creates a list to store x vertices.
+ >>> x = 5
+ >>> G = [Vertex(n) for n in range(x)]
+
+ >>> connect(G, 1, 2, 15)
+ >>> connect(G, 1, 3, 12)
+ >>> connect(G, 2, 4, 13)
+ >>> connect(G, 2, 5, 5)
+ >>> connect(G, 3, 2, 6)
+ >>> connect(G, 3, 4, 6)
+ >>> connect(G, 0, 0, 0) # Generate the minimum spanning tree:
+ >>> G_heap = G[:]
+ >>> MST = prim(G, G[0])
+ >>> MST_heap = prim_heap(G, G[0])
+ >>> for i in MST:
+ ... print(i)
+ (2, 3)
+ (3, 1)
+ (4, 3)
+ (5, 2)
+ >>> for i in MST_heap:
+ ... print(i)
+ (2, 3)
+ (3, 1)
+ (4, 3)
+ (5, 2)
+ """
if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/prim.py |
Document all public functions with docstrings | class Graph:
def __init__(self):
self.num_vertices = 0
self.num_edges = 0
self.adjacency = {}
def add_vertex(self, vertex):
if vertex not in self.adjacency:
self.adjacency[vertex] = {}
self.num_vertices += 1
def add_edge(self, head, tail, weight):
self.add_vertex(head)
self.add_vertex(tail)
if head == tail:
return
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def distinct_weight(self):
edges = self.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for i in range(len(edges)):
edges[i] = list(edges[i])
edges.sort(key=lambda e: e[2])
for i in range(len(edges) - 1):
if edges[i][2] >= edges[i + 1][2]:
edges[i + 1][2] = edges[i][2] + 1
for edge in edges:
head, tail, weight = edge
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def __str__(self):
string = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
weight = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def get_edges(self):
output = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def get_vertices(self):
return self.adjacency.keys()
@staticmethod
def build(vertices=None, edges=None):
g = Graph()
if vertices is None:
vertices = []
if edges is None:
edge = []
for vertex in vertices:
g.add_vertex(vertex)
for edge in edges:
g.add_edge(*edge)
return g
class UnionFind:
def __init__(self):
self.parent = {}
self.rank = {}
def __len__(self):
return len(self.parent)
def make_set(self, item):
if item in self.parent:
return self.find(item)
self.parent[item] = item
self.rank[item] = 0
return item
def find(self, item):
if item not in self.parent:
return self.make_set(item)
if item != self.parent[item]:
self.parent[item] = self.find(self.parent[item])
return self.parent[item]
def union(self, item1, item2):
root1 = self.find(item1)
root2 = self.find(item2)
if root1 == root2:
return root1
if self.rank[root1] > self.rank[root2]:
self.parent[root2] = root1
return root1
if self.rank[root1] < self.rank[root2]:
self.parent[root1] = root2
return root2
if self.rank[root1] == self.rank[root2]:
self.rank[root1] += 1
self.parent[root2] = root1
return root1
return None
@staticmethod
def boruvka_mst(graph):
num_components = graph.num_vertices
union_find = Graph.UnionFind()
mst_edges = []
while num_components > 1:
cheap_edge = {}
for vertex in graph.get_vertices():
cheap_edge[vertex] = -1
edges = graph.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for edge in edges:
head, tail, weight = edge
set1 = union_find.find(head)
set2 = union_find.find(tail)
if set1 != set2:
if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
cheap_edge[set1] = [head, tail, weight]
if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
cheap_edge[set2] = [head, tail, weight]
for head_tail_weight in cheap_edge.values():
if head_tail_weight != -1:
head, tail, weight = head_tail_weight
if union_find.find(head) != union_find.find(tail):
union_find.union(head, tail)
mst_edges.append(head_tail_weight)
num_components = num_components - 1
mst = Graph.build(edges=mst_edges)
return mst | --- +++ @@ -1,150 +1,196 @@-class Graph:
-
- def __init__(self):
- self.num_vertices = 0
- self.num_edges = 0
- self.adjacency = {}
-
- def add_vertex(self, vertex):
- if vertex not in self.adjacency:
- self.adjacency[vertex] = {}
- self.num_vertices += 1
-
- def add_edge(self, head, tail, weight):
-
- self.add_vertex(head)
- self.add_vertex(tail)
-
- if head == tail:
- return
-
- self.adjacency[head][tail] = weight
- self.adjacency[tail][head] = weight
-
- def distinct_weight(self):
- edges = self.get_edges()
- for edge in edges:
- head, tail, weight = edge
- edges.remove((tail, head, weight))
- for i in range(len(edges)):
- edges[i] = list(edges[i])
-
- edges.sort(key=lambda e: e[2])
- for i in range(len(edges) - 1):
- if edges[i][2] >= edges[i + 1][2]:
- edges[i + 1][2] = edges[i][2] + 1
- for edge in edges:
- head, tail, weight = edge
- self.adjacency[head][tail] = weight
- self.adjacency[tail][head] = weight
-
- def __str__(self):
- string = ""
- for tail in self.adjacency:
- for head in self.adjacency[tail]:
- weight = self.adjacency[head][tail]
- string += f"{head} -> {tail} == {weight}\n"
- return string.rstrip("\n")
-
- def get_edges(self):
- output = []
- for tail in self.adjacency:
- for head in self.adjacency[tail]:
- output.append((tail, head, self.adjacency[head][tail]))
- return output
-
- def get_vertices(self):
- return self.adjacency.keys()
-
- @staticmethod
- def build(vertices=None, edges=None):
- g = Graph()
- if vertices is None:
- vertices = []
- if edges is None:
- edge = []
- for vertex in vertices:
- g.add_vertex(vertex)
- for edge in edges:
- g.add_edge(*edge)
- return g
-
- class UnionFind:
-
- def __init__(self):
- self.parent = {}
- self.rank = {}
-
- def __len__(self):
- return len(self.parent)
-
- def make_set(self, item):
- if item in self.parent:
- return self.find(item)
-
- self.parent[item] = item
- self.rank[item] = 0
- return item
-
- def find(self, item):
- if item not in self.parent:
- return self.make_set(item)
- if item != self.parent[item]:
- self.parent[item] = self.find(self.parent[item])
- return self.parent[item]
-
- def union(self, item1, item2):
- root1 = self.find(item1)
- root2 = self.find(item2)
-
- if root1 == root2:
- return root1
-
- if self.rank[root1] > self.rank[root2]:
- self.parent[root2] = root1
- return root1
-
- if self.rank[root1] < self.rank[root2]:
- self.parent[root1] = root2
- return root2
-
- if self.rank[root1] == self.rank[root2]:
- self.rank[root1] += 1
- self.parent[root2] = root1
- return root1
- return None
-
- @staticmethod
- def boruvka_mst(graph):
- num_components = graph.num_vertices
-
- union_find = Graph.UnionFind()
- mst_edges = []
- while num_components > 1:
- cheap_edge = {}
- for vertex in graph.get_vertices():
- cheap_edge[vertex] = -1
-
- edges = graph.get_edges()
- for edge in edges:
- head, tail, weight = edge
- edges.remove((tail, head, weight))
- for edge in edges:
- head, tail, weight = edge
- set1 = union_find.find(head)
- set2 = union_find.find(tail)
- if set1 != set2:
- if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
- cheap_edge[set1] = [head, tail, weight]
-
- if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
- cheap_edge[set2] = [head, tail, weight]
- for head_tail_weight in cheap_edge.values():
- if head_tail_weight != -1:
- head, tail, weight = head_tail_weight
- if union_find.find(head) != union_find.find(tail):
- union_find.union(head, tail)
- mst_edges.append(head_tail_weight)
- num_components = num_components - 1
- mst = Graph.build(edges=mst_edges)
- return mst+class Graph:
+ """
+ Data structure to store graphs (based on adjacency lists)
+ """
+
+ def __init__(self):
+ self.num_vertices = 0
+ self.num_edges = 0
+ self.adjacency = {}
+
+ def add_vertex(self, vertex):
+ """
+ Adds a vertex to the graph
+
+ """
+ if vertex not in self.adjacency:
+ self.adjacency[vertex] = {}
+ self.num_vertices += 1
+
+ def add_edge(self, head, tail, weight):
+ """
+ Adds an edge to the graph
+
+ """
+
+ self.add_vertex(head)
+ self.add_vertex(tail)
+
+ if head == tail:
+ return
+
+ self.adjacency[head][tail] = weight
+ self.adjacency[tail][head] = weight
+
+ def distinct_weight(self):
+ """
+ For Boruvks's algorithm the weights should be distinct
+ Converts the weights to be distinct
+
+ """
+ edges = self.get_edges()
+ for edge in edges:
+ head, tail, weight = edge
+ edges.remove((tail, head, weight))
+ for i in range(len(edges)):
+ edges[i] = list(edges[i])
+
+ edges.sort(key=lambda e: e[2])
+ for i in range(len(edges) - 1):
+ if edges[i][2] >= edges[i + 1][2]:
+ edges[i + 1][2] = edges[i][2] + 1
+ for edge in edges:
+ head, tail, weight = edge
+ self.adjacency[head][tail] = weight
+ self.adjacency[tail][head] = weight
+
+ def __str__(self):
+ """
+ Returns string representation of the graph
+ """
+ string = ""
+ for tail in self.adjacency:
+ for head in self.adjacency[tail]:
+ weight = self.adjacency[head][tail]
+ string += f"{head} -> {tail} == {weight}\n"
+ return string.rstrip("\n")
+
+ def get_edges(self):
+ """
+ Returna all edges in the graph
+ """
+ output = []
+ for tail in self.adjacency:
+ for head in self.adjacency[tail]:
+ output.append((tail, head, self.adjacency[head][tail]))
+ return output
+
+ def get_vertices(self):
+ """
+ Returns all vertices in the graph
+ """
+ return self.adjacency.keys()
+
+ @staticmethod
+ def build(vertices=None, edges=None):
+ """
+ Builds a graph from the given set of vertices and edges
+
+ """
+ g = Graph()
+ if vertices is None:
+ vertices = []
+ if edges is None:
+ edge = []
+ for vertex in vertices:
+ g.add_vertex(vertex)
+ for edge in edges:
+ g.add_edge(*edge)
+ return g
+
+ class UnionFind:
+ """
+ Disjoint set Union and Find for Boruvka's algorithm
+ """
+
+ def __init__(self):
+ self.parent = {}
+ self.rank = {}
+
+ def __len__(self):
+ return len(self.parent)
+
+ def make_set(self, item):
+ if item in self.parent:
+ return self.find(item)
+
+ self.parent[item] = item
+ self.rank[item] = 0
+ return item
+
+ def find(self, item):
+ if item not in self.parent:
+ return self.make_set(item)
+ if item != self.parent[item]:
+ self.parent[item] = self.find(self.parent[item])
+ return self.parent[item]
+
+ def union(self, item1, item2):
+ root1 = self.find(item1)
+ root2 = self.find(item2)
+
+ if root1 == root2:
+ return root1
+
+ if self.rank[root1] > self.rank[root2]:
+ self.parent[root2] = root1
+ return root1
+
+ if self.rank[root1] < self.rank[root2]:
+ self.parent[root1] = root2
+ return root2
+
+ if self.rank[root1] == self.rank[root2]:
+ self.rank[root1] += 1
+ self.parent[root2] = root1
+ return root1
+ return None
+
+ @staticmethod
+ def boruvka_mst(graph):
+ """
+ Implementation of Boruvka's algorithm
+ >>> g = Graph()
+ >>> g = Graph.build([0, 1, 2, 3], [[0, 1, 1], [0, 2, 1],[2, 3, 1]])
+ >>> g.distinct_weight()
+ >>> bg = Graph.boruvka_mst(g)
+ >>> print(bg)
+ 1 -> 0 == 1
+ 2 -> 0 == 2
+ 0 -> 1 == 1
+ 0 -> 2 == 2
+ 3 -> 2 == 3
+ 2 -> 3 == 3
+ """
+ num_components = graph.num_vertices
+
+ union_find = Graph.UnionFind()
+ mst_edges = []
+ while num_components > 1:
+ cheap_edge = {}
+ for vertex in graph.get_vertices():
+ cheap_edge[vertex] = -1
+
+ edges = graph.get_edges()
+ for edge in edges:
+ head, tail, weight = edge
+ edges.remove((tail, head, weight))
+ for edge in edges:
+ head, tail, weight = edge
+ set1 = union_find.find(head)
+ set2 = union_find.find(tail)
+ if set1 != set2:
+ if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
+ cheap_edge[set1] = [head, tail, weight]
+
+ if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
+ cheap_edge[set2] = [head, tail, weight]
+ for head_tail_weight in cheap_edge.values():
+ if head_tail_weight != -1:
+ head, tail, weight = head_tail_weight
+ if union_find.find(head) != union_find.find(tail):
+ union_find.union(head, tail)
+ mst_edges.append(head_tail_weight)
+ num_components = num_components - 1
+ mst = Graph.build(edges=mst_edges)
+ return mst
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/minimum_spanning_tree_boruvka.py |
Add docstrings that explain inputs and outputs |
from __future__ import annotations
from collections import deque
from queue import Queue
from timeit import timeit
G = {
"A": ["B", "C"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B"],
"E": ["B", "F"],
"F": ["C", "E"],
}
def breadth_first_search(graph: dict, start: str) -> list[str]:
explored = {start}
result = [start]
queue: Queue = Queue()
queue.put(start)
while not queue.empty():
v = queue.get()
for w in graph[v]:
if w not in explored:
explored.add(w)
result.append(w)
queue.put(w)
return result
def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]:
visited = {start}
result = [start]
queue = deque([start])
while queue:
v = queue.popleft()
for child in graph[v]:
if child not in visited:
visited.add(child)
result.append(child)
queue.append(child)
return result
def benchmark_function(name: str) -> None:
setup = f"from __main__ import G, {name}"
number = 10000
res = timeit(f"{name}(G, 'A')", setup=setup, number=number)
print(f"{name:<35} finished {number} runs in {res:.5f} seconds")
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark_function("breadth_first_search")
benchmark_function("breadth_first_search_with_deque")
# breadth_first_search finished 10000 runs in 0.20999 seconds
# breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds | --- +++ @@ -1,3 +1,17 @@+"""
+https://en.wikipedia.org/wiki/Breadth-first_search
+pseudo-code:
+breadth_first_search(graph G, start vertex s):
+// all nodes initially unexplored
+mark s as explored
+let Q = queue data structure, initialized with s
+while Q is non-empty:
+ remove the first node of Q, call it v
+ for each edge(v, w): // for w in graph[v]
+ if w unexplored:
+ mark w as explored
+ add w to Q (at the end)
+"""
from __future__ import annotations
@@ -16,6 +30,12 @@
def breadth_first_search(graph: dict, start: str) -> list[str]:
+ """
+ Implementation of breadth first search using queue.Queue.
+
+ >>> ''.join(breadth_first_search(G, 'A'))
+ 'ABCDEF'
+ """
explored = {start}
result = [start]
queue: Queue = Queue()
@@ -31,6 +51,12 @@
def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]:
+ """
+ Implementation of breadth first search using collection.queue.
+
+ >>> ''.join(breadth_first_search_with_deque(G, 'A'))
+ 'ABCDEF'
+ """
visited = {start}
result = [start]
queue = deque([start])
@@ -59,4 +85,4 @@ benchmark_function("breadth_first_search")
benchmark_function("breadth_first_search_with_deque")
# breadth_first_search finished 10000 runs in 0.20999 seconds
- # breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds+ # breadth_first_search_with_deque finished 10000 runs in 0.01421 seconds
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/breadth_first_search_2.py |
Generate docstrings for each module |
test_graph_1 = {0: [1, 2], 1: [0, 3], 2: [0], 3: [1], 4: [5, 6], 5: [4, 6], 6: [4, 5]}
test_graph_2 = {0: [1, 2, 3], 1: [0, 3], 2: [0], 3: [0, 1], 4: [], 5: []}
def dfs(graph: dict, vert: int, visited: list) -> list:
visited[vert] = True
connected_verts = []
for neighbour in graph[vert]:
if not visited[neighbour]:
connected_verts += dfs(graph, neighbour, visited)
return [vert, *connected_verts]
def connected_components(graph: dict) -> list:
graph_size = len(graph)
visited = graph_size * [False]
components_list = []
for i in range(graph_size):
if not visited[i]:
i_connected = dfs(graph, i, visited)
components_list.append(i_connected)
return components_list
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,9 @@+"""
+https://en.wikipedia.org/wiki/Component_(graph_theory)
+
+Finding connected components in graph
+
+"""
test_graph_1 = {0: [1, 2], 1: [0, 3], 2: [0], 3: [1], 4: [5, 6], 5: [4, 6], 6: [4, 5]}
@@ -5,6 +11,14 @@
def dfs(graph: dict, vert: int, visited: list) -> list:
+ """
+ Use depth first search to find all vertices
+ being in the same component as initial vertex
+ >>> dfs(test_graph_1, 0, 5 * [False])
+ [0, 1, 3, 2]
+ >>> dfs(test_graph_2, 0, 6 * [False])
+ [0, 1, 3, 2]
+ """
visited[vert] = True
connected_verts = []
@@ -17,6 +31,14 @@
def connected_components(graph: dict) -> list:
+ """
+ This function takes graph as a parameter
+ and then returns the list of connected components
+ >>> connected_components(test_graph_1)
+ [[0, 1, 3, 2], [4, 5, 6]]
+ >>> connected_components(test_graph_2)
+ [[0, 1, 3, 2], [4], [5]]
+ """
graph_size = len(graph)
visited = graph_size * [False]
@@ -33,4 +55,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/connected_components.py |
Add documentation for all methods |
test_graph_1 = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
test_graph_2 = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def topology_sort(
graph: dict[int, list[int]], vert: int, visited: list[bool]
) -> list[int]:
visited[vert] = True
order = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(graph, neighbour, visited)
order.append(vert)
return order
def find_components(
reversed_graph: dict[int, list[int]], vert: int, visited: list[bool]
) -> list[int]:
visited[vert] = True
component = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(reversed_graph, neighbour, visited)
return component
def strongly_connected_components(graph: dict[int, list[int]]) -> list[list[int]]:
visited = len(graph) * [False]
reversed_graph: dict[int, list[int]] = {vert: [] for vert in range(len(graph))}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(vert)
order = []
for i, was_visited in enumerate(visited):
if not was_visited:
order += topology_sort(graph, i, visited)
components_list = []
visited = len(graph) * [False]
for i in range(len(graph)):
vert = order[len(graph) - i - 1]
if not visited[vert]:
component = find_components(reversed_graph, vert, visited)
components_list.append(component)
return components_list | --- +++ @@ -1,3 +1,9 @@+"""
+https://en.wikipedia.org/wiki/Strongly_connected_component
+
+Finding strongly connected components in directed graph
+
+"""
test_graph_1 = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
@@ -7,6 +13,14 @@ def topology_sort(
graph: dict[int, list[int]], vert: int, visited: list[bool]
) -> list[int]:
+ """
+ Use depth first search to sort graph
+ At this time graph is the same as input
+ >>> topology_sort(test_graph_1, 0, 5 * [False])
+ [1, 2, 4, 3, 0]
+ >>> topology_sort(test_graph_2, 0, 6 * [False])
+ [2, 1, 5, 4, 3, 0]
+ """
visited[vert] = True
order = []
@@ -23,6 +37,14 @@ def find_components(
reversed_graph: dict[int, list[int]], vert: int, visited: list[bool]
) -> list[int]:
+ """
+ Use depth first search to find strongly connected
+ vertices. Now graph is reversed
+ >>> find_components({0: [1], 1: [2], 2: [0]}, 0, 5 * [False])
+ [0, 1, 2]
+ >>> find_components({0: [2], 1: [0], 2: [0, 1]}, 0, 6 * [False])
+ [0, 2, 1]
+ """
visited[vert] = True
component = [vert]
@@ -35,6 +57,14 @@
def strongly_connected_components(graph: dict[int, list[int]]) -> list[list[int]]:
+ """
+ This function takes graph as a parameter
+ and then returns the list of strongly connected components
+ >>> strongly_connected_components(test_graph_1)
+ [[0, 1, 2], [3], [4]]
+ >>> strongly_connected_components(test_graph_2)
+ [[0, 2, 1], [3, 5, 4]]
+ """
visited = len(graph) * [False]
reversed_graph: dict[int, list[int]] = {vert: [] for vert in range(len(graph))}
@@ -57,4 +87,4 @@ component = find_components(reversed_graph, vert, visited)
components_list.append(component)
- return components_list+ return components_list
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/strongly_connected_components.py |
Document helper functions with docstrings | from collections import deque
def tarjan(g: list[list[int]]) -> list[list[int]]:
n = len(g)
stack: deque[int] = deque()
on_stack = [False for _ in range(n)]
index_of = [-1 for _ in range(n)]
lowlink_of = index_of[:]
def strong_connect(v: int, index: int, components: list[list[int]]) -> int:
index_of[v] = index # the number when this node is seen
lowlink_of[v] = index # lowest rank node reachable from here
index += 1
stack.append(v)
on_stack[v] = True
for w in g[v]:
if index_of[w] == -1:
index = strong_connect(w, index, components)
lowlink_of[v] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowlink_of[v] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
component = []
w = stack.pop()
on_stack[w] = False
component.append(w)
while w != v:
w = stack.pop()
on_stack[w] = False
component.append(w)
components.append(component)
return index
components: list[list[int]] = []
for v in range(n):
if index_of[v] == -1:
strong_connect(v, 0, components)
return components
def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]:
g: list[list[int]] = [[] for _ in range(n)]
for u, v in edges:
g[u].append(v)
return g
if __name__ == "__main__":
# Test
n_vertices = 7
source = [0, 0, 1, 2, 3, 3, 4, 4, 6]
target = [1, 3, 2, 0, 1, 4, 5, 6, 5]
edges = list(zip(source, target))
g = create_graph(n_vertices, edges)
assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]] | --- +++ @@ -2,6 +2,39 @@
def tarjan(g: list[list[int]]) -> list[list[int]]:
+ """
+ Tarjan's algo for finding strongly connected components in a directed graph
+
+ Uses two main attributes of each node to track reachability, the index of that node
+ within a component(index), and the lowest index reachable from that node(lowlink).
+
+ We then perform a dfs of the each component making sure to update these parameters
+ for each node and saving the nodes we visit on the way.
+
+ If ever we find that the lowest reachable node from a current node is equal to the
+ index of the current node then it must be the root of a strongly connected
+ component and so we save it and it's equireachable vertices as a strongly
+ connected component.
+
+ Complexity: strong_connect() is called at most once for each node and has a
+ complexity of O(|E|) as it is DFS.
+ Therefore this has complexity O(|V| + |E|) for a graph G = (V, E)
+
+ >>> tarjan([[2, 3, 4], [2, 3, 4], [0, 1, 3], [0, 1, 2], [1]])
+ [[4, 3, 1, 2, 0]]
+ >>> tarjan([[], [], [], []])
+ [[0], [1], [2], [3]]
+ >>> a = [0, 1, 2, 3, 4, 5, 4]
+ >>> b = [1, 0, 3, 2, 5, 4, 0]
+ >>> n = 7
+ >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) == sorted(
+ ... tarjan(create_graph(n, list(zip(a[::-1], b[::-1])))))
+ True
+ >>> a = [0, 1, 2, 3, 4, 5, 6]
+ >>> b = [0, 1, 2, 3, 4, 5, 6]
+ >>> sorted(tarjan(create_graph(n, list(zip(a, b)))))
+ [[0], [1], [2], [3], [4], [5], [6]]
+ """
n = len(g)
stack: deque[int] = deque()
@@ -48,6 +81,14 @@
def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]:
+ """
+ >>> n = 7
+ >>> source = [0, 0, 1, 2, 3, 3, 4, 4, 6]
+ >>> target = [1, 3, 2, 0, 1, 4, 5, 6, 5]
+ >>> edges = list(zip(source, target))
+ >>> create_graph(n, edges)
+ [[1, 3], [2], [0], [1, 4], [5, 6], [], [5]]
+ """
g: list[list[int]] = [[] for _ in range(n)]
for u, v in edges:
g[u].append(v)
@@ -62,4 +103,4 @@ edges = list(zip(source, target))
g = create_graph(n_vertices, edges)
- assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]]+ assert tarjan(g) == [[5], [6], [4], [3, 2, 1, 0]]
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/tarjans_scc.py |
Generate docstrings for each module |
def check_cycle(graph: dict) -> bool:
# Keep track of visited nodes
visited: set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
rec_stk: set[int] = set()
return any(
node not in visited and depth_first_search(graph, node, visited, rec_stk)
for node in graph
)
def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool:
# Mark current node as visited and add to recursion stack
visited.add(vertex)
rec_stk.add(vertex)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(graph, node, visited, rec_stk):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(vertex)
return False
if __name__ == "__main__":
from doctest import testmod
testmod() | --- +++ @@ -1,6 +1,16 @@+"""
+Program to check if a cycle is present in a given graph
+"""
def check_cycle(graph: dict) -> bool:
+ """
+ Returns True if graph is cyclic else False
+ >>> check_cycle(graph={0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]})
+ False
+ >>> check_cycle(graph={0:[1, 2], 1:[2], 2:[0, 3], 3:[3]})
+ True
+ """
# Keep track of visited nodes
visited: set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
@@ -12,6 +22,14 @@
def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool:
+ """
+ Recur for all neighbours.
+ If any neighbour is visited and in rec_stk then graph is cyclic.
+ >>> graph = {0:[], 1:[0, 3], 2:[0, 4], 3:[5], 4:[5], 5:[]}
+ >>> vertex, visited, rec_stk = 0, set(), set()
+ >>> depth_first_search(graph, vertex, visited, rec_stk)
+ False
+ """
# Mark current node as visited and add to recursion stack
visited.add(vertex)
rec_stk.add(vertex)
@@ -31,4 +49,4 @@ if __name__ == "__main__":
from doctest import testmod
- testmod()+ testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/check_cycle.py |
Improve documentation using docstrings |
def find_minimum_change(denominations: list[int], value: str) -> list[int]:
total_value = int(value)
# Initialize Result
answer = []
# Traverse through all denomination
for denomination in reversed(denominations):
# Find denominations
while int(total_value) >= int(denomination):
total_value -= int(denomination)
answer.append(denomination) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
denominations = []
value = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
n = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
value = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
denominations = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
value = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"Following is minimal change for {value}: ")
answer = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ") | --- +++ @@ -1,6 +1,60 @@+"""
+Test cases:
+Do you want to enter your denominations ? (Y/N) :N
+Enter the change you want to make in Indian Currency: 987
+Following is minimal change for 987 :
+500 100 100 100 100 50 20 10 5 2
+
+Do you want to enter your denominations ? (Y/N) :Y
+Enter number of denomination:10
+1
+5
+10
+20
+50
+100
+200
+500
+1000
+2000
+Enter the change you want to make: 18745
+Following is minimal change for 18745 :
+2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5
+
+Do you want to enter your denominations ? (Y/N) :N
+Enter the change you want to make: 0
+The total value cannot be zero or negative.
+Do you want to enter your denominations ? (Y/N) :N
+Enter the change you want to make: -98
+The total value cannot be zero or negative.
+
+Do you want to enter your denominations ? (Y/N) :Y
+Enter number of denomination:5
+1
+5
+100
+500
+1000
+Enter the change you want to make: 456
+Following is minimal change for 456 :
+100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1
+"""
def find_minimum_change(denominations: list[int], value: str) -> list[int]:
+ """
+ Find the minimum change from the given denominations and value
+ >>> find_minimum_change([1, 5, 10, 20, 50, 100, 200, 500, 1000,2000], 18745)
+ [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 500, 200, 20, 20, 5]
+ >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 987)
+ [500, 100, 100, 100, 100, 50, 20, 10, 5, 2]
+ >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 0)
+ []
+ >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], -98)
+ []
+ >>> find_minimum_change([1, 5, 100, 500, 1000], 456)
+ [100, 100, 100, 100, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1]
+ """
total_value = int(value)
# Initialize Result
@@ -43,4 +97,4 @@ answer = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
- print(answer[i], end=" ")+ print(answer[i], end=" ")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/minimum_coin_change.py |
Add docstrings to make code maintainable |
from dataclasses import dataclass
@dataclass
class GasStation:
gas_quantity: int
cost: int
def get_gas_stations(
gas_quantities: list[int], costs: list[int]
) -> tuple[GasStation, ...]:
return tuple(
GasStation(quantity, cost) for quantity, cost in zip(gas_quantities, costs)
)
def can_complete_journey(gas_stations: tuple[GasStation, ...]) -> int:
total_gas = sum(gas_station.gas_quantity for gas_station in gas_stations)
total_cost = sum(gas_station.cost for gas_station in gas_stations)
if total_gas < total_cost:
return -1
start = 0
net = 0
for i, gas_station in enumerate(gas_stations):
net += gas_station.gas_quantity - gas_station.cost
if net < 0:
start = i + 1
net = 0
return start
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,28 @@+"""
+Task:
+There are n gas stations along a circular route, where the amount of gas
+at the ith station is gas_quantities[i].
+
+You have a car with an unlimited gas tank and it costs costs[i] of gas
+to travel from the ith station to its next (i + 1)th station.
+You begin the journey with an empty tank at one of the gas stations.
+
+Given two integer arrays gas_quantities and costs, return the starting
+gas station's index if you can travel around the circuit once
+in the clockwise direction otherwise, return -1.
+If there exists a solution, it is guaranteed to be unique
+
+Reference: https://leetcode.com/problems/gas-station/description
+
+Implementation notes:
+First, check whether the total gas is enough to complete the journey. If not, return -1.
+However, if there is enough gas, it is guaranteed that there is a valid
+starting index to reach the end of the journey.
+Greedily calculate the net gain (gas_quantity - cost) at each station.
+If the net gain ever goes below 0 while iterating through the stations,
+start checking from the next station.
+
+"""
from dataclasses import dataclass
@@ -11,12 +36,47 @@ def get_gas_stations(
gas_quantities: list[int], costs: list[int]
) -> tuple[GasStation, ...]:
+ """
+ This function returns a tuple of gas stations.
+
+ Args:
+ gas_quantities: Amount of gas available at each station
+ costs: The cost of gas required to move from one station to the next
+
+ Returns:
+ A tuple of gas stations
+
+ >>> gas_stations = get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2])
+ >>> len(gas_stations)
+ 5
+ >>> gas_stations[0]
+ GasStation(gas_quantity=1, cost=3)
+ >>> gas_stations[-1]
+ GasStation(gas_quantity=5, cost=2)
+ """
return tuple(
GasStation(quantity, cost) for quantity, cost in zip(gas_quantities, costs)
)
def can_complete_journey(gas_stations: tuple[GasStation, ...]) -> int:
+ """
+ This function returns the index from which to start the journey
+ in order to reach the end.
+
+ Args:
+ gas_quantities [list]: Amount of gas available at each station
+ cost [list]: The cost of gas required to move from one station to the next
+
+ Returns:
+ start [int]: start index needed to complete the journey
+
+ Examples:
+ >>> can_complete_journey(get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2]))
+ 3
+ >>> can_complete_journey(get_gas_stations([2, 3, 4], [3, 4, 3]))
+ -1
+ """
total_gas = sum(gas_station.gas_quantity for gas_station in gas_stations)
total_cost = sum(gas_station.cost for gas_station in gas_stations)
if total_gas < total_cost:
@@ -35,4 +95,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/gas_station.py |
Generate docstrings for exported functions |
def optimal_merge_pattern(files: list) -> float:
optimal_merge_cost = 0
while len(files) > 1:
temp = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
min_index = files.index(min(files))
temp += files[min_index]
files.pop(min_index)
files.append(temp)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,6 +1,42 @@+"""
+This is a pure Python implementation of the greedy-merge-sort algorithm
+reference: https://www.geeksforgeeks.org/optimal-file-merge-patterns/
+
+For doctests run following command:
+python3 -m doctest -v greedy_merge_sort.py
+
+Objective
+Merge a set of sorted files of different length into a single sorted file.
+We need to find an optimal solution, where the resultant file
+will be generated in minimum time.
+
+Approach
+If the number of sorted files are given, there are many ways
+to merge them into a single sorted file.
+This merge can be performed pair wise.
+To merge a m-record file and a n-record file requires possibly m+n record moves
+the optimal choice being,
+merge the two smallest files together at each step (greedy approach).
+"""
def optimal_merge_pattern(files: list) -> float:
+ """Function to merge all the files with optimum cost
+
+ Args:
+ files [list]: A list of sizes of different files to be merged
+
+ Returns:
+ optimal_merge_cost [int]: Optimal cost to merge all those files
+
+ Examples:
+ >>> optimal_merge_pattern([2, 3, 4])
+ 14
+ >>> optimal_merge_pattern([5, 10, 20, 30, 30])
+ 205
+ >>> optimal_merge_pattern([8, 8, 8, 8, 8])
+ 96
+ """
optimal_merge_cost = 0
while len(files) > 1:
temp = 0
@@ -17,4 +53,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/optimal_merge_pattern.py |
Write docstrings describing each step |
from heapq import heappop, heappush
from sys import maxsize
def smallest_range(nums: list[list[int]]) -> list[int]:
min_heap: list[tuple[int, int, int]] = []
current_max = -maxsize - 1
for i, items in enumerate(nums):
heappush(min_heap, (items[0], i, 0))
current_max = max(current_max, items[0])
# Initialize smallest_range with large integer values
smallest_range = [-maxsize - 1, maxsize]
while min_heap:
current_min, list_index, element_index = heappop(min_heap)
if current_max - current_min < smallest_range[1] - smallest_range[0]:
smallest_range = [current_min, current_max]
if element_index == len(nums[list_index]) - 1:
break
next_element = nums[list_index][element_index + 1]
heappush(min_heap, (next_element, list_index, element_index + 1))
current_max = max(current_max, next_element)
return smallest_range
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])}") # Output: [1, 1] | --- +++ @@ -1,9 +1,43 @@+"""
+smallest_range function takes a list of sorted integer lists and finds the smallest
+range that includes at least one number from each list, using a min heap for efficiency.
+"""
from heapq import heappop, heappush
from sys import maxsize
def smallest_range(nums: list[list[int]]) -> list[int]:
+ """
+ Find the smallest range from each list in nums.
+
+ Uses min heap for efficiency. The range includes at least one number from each list.
+
+ Args:
+ `nums`: List of k sorted integer lists.
+
+ Returns:
+ list: Smallest range as a two-element list.
+
+ Examples:
+
+ >>> smallest_range([[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]])
+ [20, 24]
+ >>> smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
+ [1, 1]
+ >>> smallest_range(((1, 2, 3), (1, 2, 3), (1, 2, 3)))
+ [1, 1]
+ >>> smallest_range(((-3, -2, -1), (0, 0, 0), (1, 2, 3)))
+ [-1, 1]
+ >>> smallest_range([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ [3, 7]
+ >>> smallest_range([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
+ [0, 0]
+ >>> smallest_range([[], [], []])
+ Traceback (most recent call last):
+ ...
+ IndexError: list index out of range
+ """
min_heap: list[tuple[int, int, int]] = []
current_max = -maxsize - 1
@@ -35,4 +69,4 @@ from doctest import testmod
testmod()
- print(f"{smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])}") # Output: [1, 1]+ print(f"{smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])}") # Output: [1, 1]
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/smallest_range.py |
Add verbose docstrings with examples |
def djb2(s: str) -> int:
hash_value = 5381
for x in s:
hash_value = ((hash_value << 5) + hash_value) + ord(x)
return hash_value & 0xFFFFFFFF | --- +++ @@ -1,7 +1,35 @@+"""
+This algorithm (k=33) was first reported by Dan Bernstein many years ago in comp.lang.c
+Another version of this algorithm (now favored by Bernstein) uses xor:
+ hash(i) = hash(i - 1) * 33 ^ str[i];
+
+ First Magic constant 33:
+ It has never been adequately explained.
+ It's magic because it works better than many other constants, prime or not.
+
+ Second Magic Constant 5381:
+
+ 1. odd number
+ 2. prime number
+ 3. deficient number
+ 4. 001/010/100/000/101 b
+
+ source: http://www.cse.yorku.ca/~oz/hash.html
+"""
def djb2(s: str) -> int:
+ """
+ Implementation of djb2 hash algorithm that
+ is popular because of it's magic constants.
+
+ >>> djb2('Algorithms')
+ 3782405311
+
+ >>> djb2('scramble bits')
+ 1609059040
+ """
hash_value = 5381
for x in s:
hash_value = ((hash_value << 5) + hash_value) + ord(x)
- return hash_value & 0xFFFFFFFF+ return hash_value & 0xFFFFFFFF
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/djb2.py |
Fully document this Python code with docstrings |
MOD_ADLER = 65521
def adler32(plain_text: str) -> int:
a = 1
b = 0
for plain_chr in plain_text:
a = (a + ord(plain_chr)) % MOD_ADLER
b = (b + a) % MOD_ADLER
return (b << 16) | a | --- +++ @@ -1,11 +1,30 @@+"""
+Adler-32 is a checksum algorithm which was invented by Mark Adler in 1995.
+Compared to a cyclic redundancy check of the same length, it trades reliability for
+speed (preferring the latter).
+Adler-32 is more reliable than Fletcher-16, and slightly less reliable than
+Fletcher-32.[2]
+
+source: https://en.wikipedia.org/wiki/Adler-32
+"""
MOD_ADLER = 65521
def adler32(plain_text: str) -> int:
+ """
+ Function implements adler-32 hash.
+ Iterates and evaluates a new value for each character
+
+ >>> adler32('Algorithms')
+ 363791387
+
+ >>> adler32('go adler em all')
+ 708642122
+ """
a = 1
b = 0
for plain_chr in plain_text:
a = (a + ord(plain_chr)) % MOD_ADLER
b = (b + a) % MOD_ADLER
- return (b << 16) | a+ return (b << 16) | a
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/adler32.py |
Write Python docstrings for this snippet |
def fletcher16(text: str) -> int:
data = bytes(text, "ascii")
sum1 = 0
sum2 = 0
for character in data:
sum1 = (sum1 + character) % 255
sum2 = (sum1 + sum2) % 255
return (sum2 << 8) | sum1
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,16 +1,36 @@-
-
-def fletcher16(text: str) -> int:
- data = bytes(text, "ascii")
- sum1 = 0
- sum2 = 0
- for character in data:
- sum1 = (sum1 + character) % 255
- sum2 = (sum1 + sum2) % 255
- return (sum2 << 8) | sum1
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()+"""
+The Fletcher checksum is an algorithm for computing a position-dependent
+checksum devised by John G. Fletcher (1934-2012) at Lawrence Livermore Labs
+in the late 1970s.[1] The objective of the Fletcher checksum was to
+provide error-detection properties approaching those of a cyclic
+redundancy check but with the lower computational effort associated
+with summation techniques.
+
+Source: https://en.wikipedia.org/wiki/Fletcher%27s_checksum
+"""
+
+
+def fletcher16(text: str) -> int:
+ """
+ Loop through every character in the data and add to two sums.
+
+ >>> fletcher16('hello world')
+ 6752
+ >>> fletcher16('onethousandfourhundredthirtyfour')
+ 28347
+ >>> fletcher16('The quick brown fox jumps over the lazy dog.')
+ 5655
+ """
+ data = bytes(text, "ascii")
+ sum1 = 0
+ sum2 = 0
+ for character in data:
+ sum1 = (sum1 + character) % 255
+ sum2 = (sum1 + sum2) % 255
+ return (sum2 << 8) | sum1
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/fletcher16.py |
Add detailed documentation for each class | # Author: João Gustavo A. Amorim & Gabriel Kunz
# Author email: joaogustavoamorim@gmail.com and gabriel-kunz@uergs.edu.br
# Coding date: apr 2019
# Black: True
# Imports
import numpy as np
# Functions of binary conversion--------------------------------------
def text_to_bits(text, encoding="utf-8", errors="surrogatepass"):
bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"):
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, "big").decode(encoding, errors) or "\0"
# Functions of hamming code-------------------------------------------
def emitter_converter(size_par, data):
if size_par + len(data) <= 2**size_par - (len(data) - 1):
raise ValueError("size of parity don't match with size of data")
data_out = []
parity = []
bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data) + 1)]
# sorted information data for the size of the output data
data_ord = []
# data position template + parity
data_out_gab = []
# parity bit counter
qtd_bp = 0
# counter position of data bits
cont_data = 0
for x in range(1, size_par + len(data) + 1):
# Performs a template of bit positions - who should be given,
# and who should be parity
if qtd_bp < size_par:
if (np.log(x) / np.log(2)).is_integer():
data_out_gab.append("P")
qtd_bp = qtd_bp + 1
else:
data_out_gab.append("D")
else:
data_out_gab.append("D")
# Sorts the data to the new output size
if data_out_gab[-1] == "D":
data_ord.append(data[cont_data])
cont_data += 1
else:
data_ord.append(None)
# Calculates parity
qtd_bp = 0 # parity bit counter
for bp in range(1, size_par + 1):
# Bit counter one for a given parity
cont_bo = 0
# counter to control the loop reading
for cont_loop, x in enumerate(data_ord):
if x is not None:
try:
aux = (bin_pos[cont_loop])[-1 * (bp)]
except IndexError:
aux = "0"
if aux == "1" and x == "1":
cont_bo += 1
parity.append(cont_bo % 2)
qtd_bp += 1
# Mount the message
cont_bp = 0 # parity bit counter
for x in range(size_par + len(data)):
if data_ord[x] is None:
data_out.append(str(parity[cont_bp]))
cont_bp += 1
else:
data_out.append(data_ord[x])
return data_out
def receptor_converter(size_par, data):
# data position template + parity
data_out_gab = []
# Parity bit counter
qtd_bp = 0
# Counter p data bit reading
cont_data = 0
# list of parity received
parity_received = []
data_output = []
for i, item in enumerate(data, 1):
# Performs a template of bit positions - who should be given,
# and who should be parity
if qtd_bp < size_par and (np.log(i) / np.log(2)).is_integer():
data_out_gab.append("P")
qtd_bp = qtd_bp + 1
else:
data_out_gab.append("D")
# Sorts the data to the new output size
if data_out_gab[-1] == "D":
data_output.append(item)
else:
parity_received.append(item)
# -----------calculates the parity with the data
data_out = []
parity = []
bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data_output) + 1)]
# sorted information data for the size of the output data
data_ord = []
# Data position feedback + parity
data_out_gab = []
# Parity bit counter
qtd_bp = 0
# Counter p data bit reading
cont_data = 0
for x in range(1, size_par + len(data_output) + 1):
# Performs a template position of bits - who should be given,
# and who should be parity
if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer():
data_out_gab.append("P")
qtd_bp = qtd_bp + 1
else:
data_out_gab.append("D")
# Sorts the data to the new output size
if data_out_gab[-1] == "D":
data_ord.append(data_output[cont_data])
cont_data += 1
else:
data_ord.append(None)
# Calculates parity
qtd_bp = 0 # parity bit counter
for bp in range(1, size_par + 1):
# Bit counter one for a certain parity
cont_bo = 0
for cont_loop, x in enumerate(data_ord):
if x is not None:
try:
aux = (bin_pos[cont_loop])[-1 * (bp)]
except IndexError:
aux = "0"
if aux == "1" and x == "1":
cont_bo += 1
parity.append(str(cont_bo % 2))
qtd_bp += 1
# Mount the message
cont_bp = 0 # Parity bit counter
for x in range(size_par + len(data_output)):
if data_ord[x] is None:
data_out.append(str(parity[cont_bp]))
cont_bp += 1
else:
data_out.append(data_ord[x])
ack = parity_received == parity
return data_output, ack
# ---------------------------------------------------------------------
"""
# Example how to use
# number of parity bits
sizePari = 4
# location of the bit that will be forced an error
be = 2
# Message/word to be encoded and decoded with hamming
# text = input("Enter the word to be read: ")
text = "Message01"
# Convert the message to binary
binaryText = text_to_bits(text)
# Prints the binary of the string
print("Text input in binary is '" + binaryText + "'")
# total transmitted bits
totalBits = len(binaryText) + sizePari
print("Size of data is " + str(totalBits))
print("\n --Message exchange--")
print("Data to send ------------> " + binaryText)
dataOut = emitterConverter(sizePari, binaryText)
print("Data converted ----------> " + "".join(dataOut))
dataReceiv, ack = receptorConverter(sizePari, dataOut)
print(
"Data receive ------------> "
+ "".join(dataReceiv)
+ "\t\t -- Data integrity: "
+ str(ack)
)
print("\n --Force error--")
print("Data to send ------------> " + binaryText)
dataOut = emitterConverter(sizePari, binaryText)
print("Data converted ----------> " + "".join(dataOut))
# forces error
dataOut[-be] = "1" * (dataOut[-be] == "0") + "0" * (dataOut[-be] == "1")
print("Data after transmission -> " + "".join(dataOut))
dataReceiv, ack = receptorConverter(sizePari, dataOut)
print(
"Data receive ------------> "
+ "".join(dataReceiv)
+ "\t\t -- Data integrity: "
+ str(ack)
)
""" | --- +++ @@ -3,6 +3,46 @@ # Coding date: apr 2019
# Black: True
+"""
+* This code implement the Hamming code:
+ https://en.wikipedia.org/wiki/Hamming_code - In telecommunication,
+Hamming codes are a family of linear error-correcting codes. Hamming
+codes can detect up to two-bit errors or correct one-bit errors
+without detection of uncorrected errors. By contrast, the simple
+parity code cannot correct errors, and can detect only an odd number
+of bits in error. Hamming codes are perfect codes, that is, they
+achieve the highest possible rate for codes with their block length
+and minimum distance of three.
+
+* the implemented code consists of:
+ * a function responsible for encoding the message (emitterConverter)
+ * return the encoded message
+ * a function responsible for decoding the message (receptorConverter)
+ * return the decoded message and a ack of data integrity
+
+* how to use:
+ to be used you must declare how many parity bits (sizePari)
+ you want to include in the message.
+ it is desired (for test purposes) to select a bit to be set
+ as an error. This serves to check whether the code is working correctly.
+ Lastly, the variable of the message/word that must be desired to be
+ encoded (text).
+
+* how this work:
+ declaration of variables (sizePari, be, text)
+
+ converts the message/word (text) to binary using the
+ text_to_bits function
+ encodes the message using the rules of hamming encoding
+ decodes the message using the rules of hamming encoding
+ print the original message, the encoded message and the
+ decoded message
+
+ forces an error in the coded text variable
+ decodes the message that was forced the error
+ print the original message, the encoded message, the bit changed
+ message and the decoded message
+"""
# Imports
import numpy as np
@@ -10,17 +50,38 @@
# Functions of binary conversion--------------------------------------
def text_to_bits(text, encoding="utf-8", errors="surrogatepass"):
+ """
+ >>> text_to_bits("msg")
+ '011011010111001101100111'
+ """
bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"):
+ """
+ >>> text_from_bits('011011010111001101100111')
+ 'msg'
+ """
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, "big").decode(encoding, errors) or "\0"
# Functions of hamming code-------------------------------------------
def emitter_converter(size_par, data):
+ """
+ :param size_par: how many parity bits the message must have
+ :param data: information bits
+ :return: message to be transmitted by unreliable medium
+ - bits of information merged with parity bits
+
+ >>> emitter_converter(4, "101010111111")
+ ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1']
+ >>> emitter_converter(5, "101010111111")
+ Traceback (most recent call last):
+ ...
+ ValueError: size of parity don't match with size of data
+ """
if size_par + len(data) <= 2**size_par - (len(data) - 1):
raise ValueError("size of parity don't match with size of data")
@@ -87,6 +148,10 @@
def receptor_converter(size_par, data):
+ """
+ >>> receptor_converter(4, "1111010010111111")
+ (['1', '0', '1', '0', '1', '0', '1', '1', '1', '1', '1', '1'], True)
+ """
# data position template + parity
data_out_gab = []
# Parity bit counter
@@ -224,4 +289,4 @@ + "\t\t -- Data integrity: "
+ str(ack)
)
-"""+"""
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/hamming_code.py |
Write docstrings including parameters and return values |
from collections.abc import Generator
from math import sin
def to_little_endian(string_32: bytes) -> bytes:
if len(string_32) != 32:
raise ValueError("Input must be of length 32")
little_endian = b""
for i in [3, 2, 1, 0]:
little_endian += string_32[8 * i : 8 * i + 8]
return little_endian
def reformat_hex(i: int) -> bytes:
if i < 0:
raise ValueError("Input must be non-negative")
hex_rep = format(i, "08x")[-8:]
little_endian_hex = b""
for j in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * j : 2 * j + 2].encode("utf-8")
return little_endian_hex
def preprocess(message: bytes) -> bytes:
bit_string = b""
for char in message:
bit_string += format(char, "08b").encode("utf-8")
start_len = format(len(bit_string), "064b").encode("utf-8")
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(bit_string) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def get_block_words(bit_string: bytes) -> Generator[list[int]]:
if len(bit_string) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512")
for pos in range(0, len(bit_string), 512):
block = bit_string[pos : pos + 512]
block_words = []
for i in range(0, 512, 32):
block_words.append(int(to_little_endian(block[i : i + 32]), 2))
yield block_words
def not_32(i: int) -> int:
if i < 0:
raise ValueError("Input must be non-negative")
i_str = format(i, "032b")
new_str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(new_str, 2)
def sum_32(a: int, b: int) -> int:
return (a + b) % 2**32
def left_rotate_32(i: int, shift: int) -> int:
if i < 0:
raise ValueError("Input must be non-negative")
if shift < 0:
raise ValueError("Shift must be non-negative")
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def md5_me(message: bytes) -> bytes:
# Convert to bit string, add padding and append message length
bit_string = preprocess(message)
added_consts = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
a0 = 0x67452301
b0 = 0xEFCDAB89
c0 = 0x98BADCFE
d0 = 0x10325476
shift_amounts = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(bit_string):
a = a0
b = b0
c = c0
d = d0
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
f = d ^ (b & (c ^ d))
g = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
f = c ^ (d & (b ^ c))
g = (5 * i + 1) % 16
elif i <= 47:
f = b ^ c ^ d
g = (3 * i + 5) % 16
else:
f = c ^ (b | not_32(d))
g = (7 * i) % 16
f = (f + a + added_consts[i] + block_words[g]) % 2**32
a = d
d = c
c = b
b = sum_32(b, left_rotate_32(f, shift_amounts[i]))
# Add hashed chunk to running total
a0 = sum_32(a0, a)
b0 = sum_32(b0, b)
c0 = sum_32(c0, c)
d0 = sum_32(d0, d)
digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,9 +1,39 @@+"""
+The MD5 algorithm is a hash function that's commonly used as a checksum to
+detect data corruption. The algorithm works by processing a given message in
+blocks of 512 bits, padding the message as needed. It uses the blocks to operate
+a 128-bit state and performs a total of 64 such operations. Note that all values
+are little-endian, so inputs are converted as needed.
+
+Although MD5 was used as a cryptographic hash function in the past, it's since
+been cracked, so it shouldn't be used for security purposes.
+
+For more info, see https://en.wikipedia.org/wiki/MD5
+"""
from collections.abc import Generator
from math import sin
def to_little_endian(string_32: bytes) -> bytes:
+ """
+ Converts the given string to little-endian in groups of 8 chars.
+
+ Arguments:
+ string_32 {[string]} -- [32-char string]
+
+ Raises:
+ ValueError -- [input is not 32 char]
+
+ Returns:
+ 32-char little-endian string
+ >>> to_little_endian(b'1234567890abcdfghijklmnopqrstuvw')
+ b'pqrstuvwhijklmno90abcdfg12345678'
+ >>> to_little_endian(b'1234567890')
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be of length 32
+ """
if len(string_32) != 32:
raise ValueError("Input must be of length 32")
@@ -14,6 +44,39 @@
def reformat_hex(i: int) -> bytes:
+ """
+ Converts the given non-negative integer to hex string.
+
+ Example: Suppose the input is the following:
+ i = 1234
+
+ The input is 0x000004d2 in hex, so the little-endian hex string is
+ "d2040000".
+
+ Arguments:
+ i {[int]} -- [integer]
+
+ Raises:
+ ValueError -- [input is negative]
+
+ Returns:
+ 8-char little-endian hex string
+
+ >>> reformat_hex(1234)
+ b'd2040000'
+ >>> reformat_hex(666)
+ b'9a020000'
+ >>> reformat_hex(0)
+ b'00000000'
+ >>> reformat_hex(1234567890)
+ b'd2029649'
+ >>> reformat_hex(1234567890987654321)
+ b'b11c6cb1'
+ >>> reformat_hex(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be non-negative
+ """
if i < 0:
raise ValueError("Input must be non-negative")
@@ -25,6 +88,35 @@
def preprocess(message: bytes) -> bytes:
+ """
+ Preprocesses the message string:
+ - Convert message to bit string
+ - Pad bit string to a multiple of 512 chars:
+ - Append a 1
+ - Append 0's until length = 448 (mod 512)
+ - Append length of original message (64 chars)
+
+ Example: Suppose the input is the following:
+ message = "a"
+
+ The message bit string is "01100001", which is 8 bits long. Thus, the
+ bit string needs 439 bits of padding so that
+ (bit_string + "1" + padding) = 448 (mod 512).
+ The message length is "000010000...0" in 64-bit little-endian binary.
+ The combined bit string is then 512 bits long.
+
+ Arguments:
+ message {[string]} -- [message string]
+
+ Returns:
+ processed bit string padded to a multiple of 512 chars
+
+ >>> preprocess(b"a") == (b"01100001" + b"1" +
+ ... (b"0" * 439) + b"00001000" + (b"0" * 56))
+ True
+ >>> preprocess(b"") == b"1" + (b"0" * 447) + (b"0" * 64)
+ True
+ """
bit_string = b""
for char in message:
bit_string += format(char, "08b").encode("utf-8")
@@ -40,6 +132,50 @@
def get_block_words(bit_string: bytes) -> Generator[list[int]]:
+ """
+ Splits bit string into blocks of 512 chars and yields each block as a list
+ of 32-bit words
+
+ Example: Suppose the input is the following:
+ bit_string =
+ "000000000...0" + # 0x00 (32 bits, padded to the right)
+ "000000010...0" + # 0x01 (32 bits, padded to the right)
+ "000000100...0" + # 0x02 (32 bits, padded to the right)
+ "000000110...0" + # 0x03 (32 bits, padded to the right)
+ ...
+ "000011110...0" # 0x0a (32 bits, padded to the right)
+
+ Then len(bit_string) == 512, so there'll be 1 block. The block is split
+ into 32-bit words, and each word is converted to little endian. The
+ first word is interpreted as 0 in decimal, the second word is
+ interpreted as 1 in decimal, etc.
+
+ Thus, block_words == [[0, 1, 2, 3, ..., 15]].
+
+ Arguments:
+ bit_string {[string]} -- [bit string with multiple of 512 as length]
+
+ Raises:
+ ValueError -- [length of bit string isn't multiple of 512]
+
+ Yields:
+ a list of 16 32-bit words
+
+ >>> test_string = ("".join(format(n << 24, "032b") for n in range(16))
+ ... .encode("utf-8"))
+ >>> list(get_block_words(test_string))
+ [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
+ >>> list(get_block_words(test_string * 4)) == [list(range(16))] * 4
+ True
+ >>> list(get_block_words(b"1" * 512)) == [[4294967295] * 16]
+ True
+ >>> list(get_block_words(b""))
+ []
+ >>> list(get_block_words(b"1111"))
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must have length that's a multiple of 512
+ """
if len(bit_string) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512")
@@ -52,6 +188,33 @@
def not_32(i: int) -> int:
+ """
+ Perform bitwise NOT on given int.
+
+ Arguments:
+ i {[int]} -- [given int]
+
+ Raises:
+ ValueError -- [input is negative]
+
+ Returns:
+ Result of bitwise NOT on i
+
+ >>> not_32(34)
+ 4294967261
+ >>> not_32(1234)
+ 4294966061
+ >>> not_32(4294966061)
+ 1234
+ >>> not_32(0)
+ 4294967295
+ >>> not_32(1)
+ 4294967294
+ >>> not_32(-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be non-negative
+ """
if i < 0:
raise ValueError("Input must be non-negative")
@@ -63,10 +226,67 @@
def sum_32(a: int, b: int) -> int:
+ """
+ Add two numbers as 32-bit ints.
+
+ Arguments:
+ a {[int]} -- [first given int]
+ b {[int]} -- [second given int]
+
+ Returns:
+ (a + b) as an unsigned 32-bit int
+
+ >>> sum_32(1, 1)
+ 2
+ >>> sum_32(2, 3)
+ 5
+ >>> sum_32(0, 0)
+ 0
+ >>> sum_32(-1, -1)
+ 4294967294
+ >>> sum_32(4294967295, 1)
+ 0
+ """
return (a + b) % 2**32
def left_rotate_32(i: int, shift: int) -> int:
+ """
+ Rotate the bits of a given int left by a given amount.
+
+ Arguments:
+ i {[int]} -- [given int]
+ shift {[int]} -- [shift amount]
+
+ Raises:
+ ValueError -- [either given int or shift is negative]
+
+ Returns:
+ `i` rotated to the left by `shift` bits
+
+ >>> left_rotate_32(1234, 1)
+ 2468
+ >>> left_rotate_32(1111, 4)
+ 17776
+ >>> left_rotate_32(2147483648, 1)
+ 1
+ >>> left_rotate_32(2147483648, 3)
+ 4
+ >>> left_rotate_32(4294967295, 4)
+ 4294967295
+ >>> left_rotate_32(1234, 0)
+ 1234
+ >>> left_rotate_32(0, 0)
+ 0
+ >>> left_rotate_32(-1, 0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Input must be non-negative
+ >>> left_rotate_32(0, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Shift must be non-negative
+ """
if i < 0:
raise ValueError("Input must be non-negative")
if shift < 0:
@@ -75,6 +295,31 @@
def md5_me(message: bytes) -> bytes:
+ """
+ Returns the 32-char MD5 hash of a given message.
+
+ Reference: https://en.wikipedia.org/wiki/MD5#Algorithm
+
+ Arguments:
+ message {[string]} -- [message]
+
+ Returns:
+ 32-char MD5 hash string
+
+ >>> md5_me(b"")
+ b'd41d8cd98f00b204e9800998ecf8427e'
+ >>> md5_me(b"The quick brown fox jumps over the lazy dog")
+ b'9e107d9d372bb6826bd81d3542a419d6'
+ >>> md5_me(b"The quick brown fox jumps over the lazy dog.")
+ b'e4d909c290d0fb1ca068ffaddf22cbd0'
+
+ >>> import hashlib
+ >>> from string import ascii_letters
+ >>> msgs = [b"", ascii_letters.encode("utf-8"), "Üñîçø∂é".encode("utf-8"),
+ ... b"The quick brown fox jumps over the lazy dog."]
+ >>> all(md5_me(msg) == hashlib.md5(msg).hexdigest().encode("utf-8") for msg in msgs)
+ True
+ """
# Convert to bit string, add padding and append message length
bit_string = preprocess(message)
@@ -196,4 +441,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/md5.py |
Generate NumPy-style docstrings |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SHA1Hash:
def __init__(self, data):
self.data = data
self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def rotate(n, b):
return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF
def padding(self):
padding = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64)
padded_data = self.data + padding + struct.pack(">Q", 8 * len(self.data))
return padded_data
def split_blocks(self):
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64)
]
# @staticmethod
def expand_block(self, block):
w = list(struct.unpack(">16L", block)) + [0] * 64
for i in range(16, 80):
w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
return w
def final_hash(self):
self.padded_data = self.padding()
self.blocks = self.split_blocks()
for block in self.blocks:
expanded_block = self.expand_block(block)
a, b, c, d, e = self.h
for i in range(80):
if 0 <= i < 20:
f = (b & c) | ((~b) & d)
k = 0x5A827999
elif 20 <= i < 40:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i < 60:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i < 80:
f = b ^ c ^ d
k = 0xCA62C1D6
a, b, c, d, e = (
self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
a,
self.rotate(b, 30),
c,
d,
)
self.h = (
self.h[0] + a & 0xFFFFFFFF,
self.h[1] + b & 0xFFFFFFFF,
self.h[2] + c & 0xFFFFFFFF,
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h)
def test_sha1_hash():
msg = b"Test String"
assert SHA1Hash(msg).final_hash() == hashlib.sha1(msg).hexdigest() # noqa: S324
def main():
# unittest.main()
parser = argparse.ArgumentParser(description="Process some strings or files")
parser.add_argument(
"--string",
dest="input_string",
default="Hello World!! Welcome to Cryptography",
help="Hash the string",
)
parser.add_argument("--file", dest="input_file", help="Hash contents of a file")
args = parser.parse_args()
input_string = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb") as f:
hash_input = f.read()
else:
hash_input = bytes(input_string, "utf-8")
print(SHA1Hash(hash_input).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,30 @@+"""
+Implementation of the SHA1 hash function and gives utilities to find hash of string or
+hash of text from a file. Also contains a Test class to verify that the generated hash
+matches what is returned by the hashlib library
+
+Usage: python sha1.py --string "Hello World!!"
+ python sha1.py --file "hello_world.txt"
+ When run without any arguments, it prints the hash of the string "Hello World!!
+ Welcome to Cryptography"
+
+SHA1 hash or SHA1 sum of a string is a cryptographic function, which means it is easy
+to calculate forwards but extremely difficult to calculate backwards. What this means
+is you can easily calculate the hash of a string, but it is extremely difficult to know
+the original string if you have its hash. This property is useful for communicating
+securely, send encrypted messages and is very useful in payment systems, blockchain and
+cryptocurrency etc.
+
+The algorithm as described in the reference:
+First we start with a message. The message is padded and the length of the message
+is added to the end. It is then split into blocks of 512 bits or 64 bytes. The blocks
+are then processed one at a time. Each block must be expanded and compressed.
+The value after each compression is added to a 160-bit buffer called the current hash
+state. After the last block is processed, the current hash state is returned as
+the final hash.
+
+Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/
+"""
import argparse
import hashlib # hashlib is only used inside the Test class
@@ -5,33 +32,69 @@
class SHA1Hash:
+ """
+ Class to contain the entire pipeline for SHA1 hashing algorithm
+ >>> SHA1Hash(bytes('Allan', 'utf-8')).final_hash()
+ '872af2d8ac3d8695387e7c804bf0e02c18df9e6e'
+ """
def __init__(self, data):
+ """
+ Initiates the variables data and h. h is a list of 5 8-digit hexadecimal
+ numbers corresponding to
+ (1732584193, 4023233417, 2562383102, 271733878, 3285377520)
+ respectively. We will start with this as a message digest. 0x is how you write
+ hexadecimal numbers in Python
+ """
self.data = data
self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def rotate(n, b):
+ """
+ Static method to be used inside other methods. Left rotates n by b.
+ >>> SHA1Hash('').rotate(12,2)
+ 48
+ """
return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF
def padding(self):
+ """
+ Pads the input message with zeros so that padded_data has 64 bytes or 512 bits
+ """
padding = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64)
padded_data = self.data + padding + struct.pack(">Q", 8 * len(self.data))
return padded_data
def split_blocks(self):
+ """
+ Returns a list of bytestrings each of length 64
+ """
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64)
]
# @staticmethod
def expand_block(self, block):
+ """
+ Takes a bytestring-block of length 64, unpacks it to a list of integers and
+ returns a list of 80 integers after some bit operations
+ """
w = list(struct.unpack(">16L", block)) + [0] * 64
for i in range(16, 80):
w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
return w
def final_hash(self):
+ """
+ Calls all the other methods to process the input. Pads the data, then splits
+ into blocks and then does a series of operations for each block (including
+ expansion).
+ For each block, the variable h that was initialized is copied to a,b,c,d,e
+ and these 5 variables a,b,c,d,e undergo several changes. After all the blocks
+ are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1]
+ and so on. This h becomes our final hash which is returned.
+ """
self.padded_data = self.padding()
self.blocks = self.split_blocks()
for block in self.blocks:
@@ -73,6 +136,11 @@
def main():
+ """
+ Provides option 'string' or 'file' to take input and prints the calculated SHA1
+ hash. unittest.main() has been commented out because we probably don't want to run
+ the test each time.
+ """
# unittest.main()
parser = argparse.ArgumentParser(description="Process some strings or files")
parser.add_argument(
@@ -97,4 +165,4 @@ main()
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/sha1.py |
Write docstrings including parameters and return values |
def sdbm(plain_text: str) -> int:
hash_value = 0
for plain_chr in plain_text:
hash_value = (
ord(plain_chr) + (hash_value << 6) + (hash_value << 16) - hash_value
)
return hash_value | --- +++ @@ -1,9 +1,39 @@+"""
+This algorithm was created for sdbm (a public-domain reimplementation of ndbm)
+database library.
+It was found to do well in scrambling bits, causing better distribution of the keys
+and fewer splits.
+It also happens to be a good general hashing function with good distribution.
+The actual function (pseudo code) is:
+ for i in i..len(str):
+ hash(i) = hash(i - 1) * 65599 + str[i];
+
+What is included below is the faster version used in gawk. [there is even a faster,
+duff-device version]
+The magic constant 65599 was picked out of thin air while experimenting with
+different constants.
+It turns out to be a prime.
+This is one of the algorithms used in berkeley db (see sleepycat) and elsewhere.
+
+source: http://www.cse.yorku.ca/~oz/hash.html
+"""
def sdbm(plain_text: str) -> int:
+ """
+ Function implements sdbm hash, easy to use, great for bits scrambling.
+ iterates over each character in the given string and applies function to each of
+ them.
+
+ >>> sdbm('Algorithms')
+ 1462174910723540325254304520539387479031000036
+
+ >>> sdbm('scramble bits')
+ 730247649148944819640658295400555317318720608290373040936089
+ """
hash_value = 0
for plain_chr in plain_text:
hash_value = (
ord(plain_chr) + (hash_value << 6) + (hash_value << 16) - hash_value
)
- return hash_value+ return hash_value
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/sdbm.py |
Include argument descriptions in docstrings | # To get an insight into Greedy Algorithm through the Knapsack problem
def calc_profit(profit: list, weight: list, max_weight: int) -> int:
if len(profit) != len(weight):
raise ValueError("The length of profit and weight must be same.")
if max_weight <= 0:
raise ValueError("max_weight must greater than zero.")
if any(p < 0 for p in profit):
raise ValueError("Profit can not be negative.")
if any(w < 0 for w in weight):
raise ValueError("Weight can not be negative.")
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
profit_by_weight = [p / w for p, w in zip(profit, weight)]
# Creating a copy of the list and sorting profit/weight in ascending order
sorted_profit_by_weight = sorted(profit_by_weight)
# declaring useful variables
length = len(sorted_profit_by_weight)
limit = 0
gain = 0
i = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
biggest_profit_by_weight = sorted_profit_by_weight[length - i - 1]
"""
Calculate the index of the biggest_profit_by_weight in profit_by_weight list.
This will give the index of the first encountered element which is same as of
biggest_profit_by_weight. There may be one or more values same as that of
biggest_profit_by_weight but index always encounter the very first element
only. To curb this alter the values in profit_by_weight once they are used
here it is done to -1 because neither profit nor weight can be in negative.
"""
index = profit_by_weight.index(biggest_profit_by_weight)
profit_by_weight[index] = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
profit = [int(x) for x in input("Input profits separated by spaces: ").split()]
weight = [int(x) for x in input("Input weights separated by spaces: ").split()]
max_weight = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight) | --- +++ @@ -1,9 +1,35 @@ # To get an insight into Greedy Algorithm through the Knapsack problem
+"""
+A shopkeeper has bags of wheat that each have different weights and different profits.
+eg.
+profit 5 8 7 1 12 3 4
+weight 2 7 1 6 4 2 5
+max_weight 100
+
+Constraints:
+max_weight > 0
+profit[i] >= 0
+weight[i] >= 0
+Calculate the maximum profit that the shopkeeper can make given maxmum weight that can
+be carried.
+"""
def calc_profit(profit: list, weight: list, max_weight: int) -> int:
+ """
+ Function description is as follows-
+ :param profit: Take a list of profits
+ :param weight: Take a list of weight if bags corresponding to the profits
+ :param max_weight: Maximum weight that could be carried
+ :return: Maximum expected gain
+
+ >>> calc_profit([1, 2, 3], [3, 4, 5], 15)
+ 6
+ >>> calc_profit([10, 9 , 8], [3 ,4 , 5], 25)
+ 27
+ """
if len(profit) != len(weight):
raise ValueError("The length of profit and weight must be same.")
if max_weight <= 0:
@@ -69,4 +95,4 @@ max_weight = int(input("Max weight allowed: "))
# Function Call
- calc_profit(profit, weight, max_weight)+ calc_profit(profit, weight, max_weight)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/knapsack/greedy_knapsack.py |
Write docstrings describing each step | # Author: M. Yathurshan
# Black Formatter: True
import argparse
import struct
import unittest
class SHA256:
def __init__(self, data: bytes) -> None:
self.data = data
# Initialize hash values
self.hashes = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
self.round_constants = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
self.preprocessed_data = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def preprocessing(data: bytes) -> bytes:
padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64))
big_endian_integer = struct.pack(">Q", (len(data) * 8))
return data + padding + big_endian_integer
def final_hash(self) -> None:
# Convert into blocks of 64 bytes
self.blocks = [
self.preprocessed_data[x : x + 64]
for x in range(0, len(self.preprocessed_data), 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
words = list(struct.unpack(">16L", block))
# add 48 0-ed integers
words += [0] * 48
a, b, c, d, e, f, g, h = self.hashes
for index in range(64):
if index > 15:
# modify the zero-ed indexes at the end of the array
s0 = (
self.ror(words[index - 15], 7)
^ self.ror(words[index - 15], 18)
^ (words[index - 15] >> 3)
)
s1 = (
self.ror(words[index - 2], 17)
^ self.ror(words[index - 2], 19)
^ (words[index - 2] >> 10)
)
words[index] = (
words[index - 16] + s0 + words[index - 7] + s1
) % 0x100000000
# Compression
s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g)
temp1 = (
h + s1 + ch + self.round_constants[index] + words[index]
) % 0x100000000
s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
maj = (a & b) ^ (a & c) ^ (b & c)
temp2 = (s0 + maj) % 0x100000000
h, g, f, e, d, c, b, a = (
g,
f,
e,
((d + temp1) % 0x100000000),
c,
b,
a,
((temp1 + temp2) % 0x100000000),
)
mutated_hash_values = [a, b, c, d, e, f, g, h]
# Modify final values
self.hashes = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes)
]
self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes])
def ror(self, value: int, rotations: int) -> int:
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class SHA256HashTest(unittest.TestCase):
def test_match_hashes(self) -> None:
import hashlib
msg = bytes("Test String", "utf-8")
assert SHA256(msg).hash == hashlib.sha256(msg).hexdigest()
def main() -> None:
# unittest.main()
import doctest
doctest.testmod()
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--string",
dest="input_string",
default="Hello World!! Welcome to Cryptography",
help="Hash the string",
)
parser.add_argument(
"-f", "--file", dest="input_file", help="Hash contents of a file"
)
args = parser.parse_args()
input_string = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb") as f:
hash_input = f.read()
else:
hash_input = bytes(input_string, "utf-8")
print(SHA256(hash_input).hash)
if __name__ == "__main__":
main() | --- +++ @@ -1,216 +1,248 @@-# Author: M. Yathurshan
-# Black Formatter: True
-
-
-import argparse
-import struct
-import unittest
-
-
-class SHA256:
-
- def __init__(self, data: bytes) -> None:
- self.data = data
-
- # Initialize hash values
- self.hashes = [
- 0x6A09E667,
- 0xBB67AE85,
- 0x3C6EF372,
- 0xA54FF53A,
- 0x510E527F,
- 0x9B05688C,
- 0x1F83D9AB,
- 0x5BE0CD19,
- ]
-
- # Initialize round constants
- self.round_constants = [
- 0x428A2F98,
- 0x71374491,
- 0xB5C0FBCF,
- 0xE9B5DBA5,
- 0x3956C25B,
- 0x59F111F1,
- 0x923F82A4,
- 0xAB1C5ED5,
- 0xD807AA98,
- 0x12835B01,
- 0x243185BE,
- 0x550C7DC3,
- 0x72BE5D74,
- 0x80DEB1FE,
- 0x9BDC06A7,
- 0xC19BF174,
- 0xE49B69C1,
- 0xEFBE4786,
- 0x0FC19DC6,
- 0x240CA1CC,
- 0x2DE92C6F,
- 0x4A7484AA,
- 0x5CB0A9DC,
- 0x76F988DA,
- 0x983E5152,
- 0xA831C66D,
- 0xB00327C8,
- 0xBF597FC7,
- 0xC6E00BF3,
- 0xD5A79147,
- 0x06CA6351,
- 0x14292967,
- 0x27B70A85,
- 0x2E1B2138,
- 0x4D2C6DFC,
- 0x53380D13,
- 0x650A7354,
- 0x766A0ABB,
- 0x81C2C92E,
- 0x92722C85,
- 0xA2BFE8A1,
- 0xA81A664B,
- 0xC24B8B70,
- 0xC76C51A3,
- 0xD192E819,
- 0xD6990624,
- 0xF40E3585,
- 0x106AA070,
- 0x19A4C116,
- 0x1E376C08,
- 0x2748774C,
- 0x34B0BCB5,
- 0x391C0CB3,
- 0x4ED8AA4A,
- 0x5B9CCA4F,
- 0x682E6FF3,
- 0x748F82EE,
- 0x78A5636F,
- 0x84C87814,
- 0x8CC70208,
- 0x90BEFFFA,
- 0xA4506CEB,
- 0xBEF9A3F7,
- 0xC67178F2,
- ]
-
- self.preprocessed_data = self.preprocessing(self.data)
- self.final_hash()
-
- @staticmethod
- def preprocessing(data: bytes) -> bytes:
- padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64))
- big_endian_integer = struct.pack(">Q", (len(data) * 8))
- return data + padding + big_endian_integer
-
- def final_hash(self) -> None:
- # Convert into blocks of 64 bytes
- self.blocks = [
- self.preprocessed_data[x : x + 64]
- for x in range(0, len(self.preprocessed_data), 64)
- ]
-
- for block in self.blocks:
- # Convert the given block into a list of 4 byte integers
- words = list(struct.unpack(">16L", block))
- # add 48 0-ed integers
- words += [0] * 48
-
- a, b, c, d, e, f, g, h = self.hashes
-
- for index in range(64):
- if index > 15:
- # modify the zero-ed indexes at the end of the array
- s0 = (
- self.ror(words[index - 15], 7)
- ^ self.ror(words[index - 15], 18)
- ^ (words[index - 15] >> 3)
- )
- s1 = (
- self.ror(words[index - 2], 17)
- ^ self.ror(words[index - 2], 19)
- ^ (words[index - 2] >> 10)
- )
-
- words[index] = (
- words[index - 16] + s0 + words[index - 7] + s1
- ) % 0x100000000
-
- # Compression
- s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
- ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g)
- temp1 = (
- h + s1 + ch + self.round_constants[index] + words[index]
- ) % 0x100000000
- s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
- maj = (a & b) ^ (a & c) ^ (b & c)
- temp2 = (s0 + maj) % 0x100000000
-
- h, g, f, e, d, c, b, a = (
- g,
- f,
- e,
- ((d + temp1) % 0x100000000),
- c,
- b,
- a,
- ((temp1 + temp2) % 0x100000000),
- )
-
- mutated_hash_values = [a, b, c, d, e, f, g, h]
-
- # Modify final values
- self.hashes = [
- ((element + mutated_hash_values[index]) % 0x100000000)
- for index, element in enumerate(self.hashes)
- ]
-
- self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes])
-
- def ror(self, value: int, rotations: int) -> int:
- return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
-
-
-class SHA256HashTest(unittest.TestCase):
-
- def test_match_hashes(self) -> None:
- import hashlib
-
- msg = bytes("Test String", "utf-8")
- assert SHA256(msg).hash == hashlib.sha256(msg).hexdigest()
-
-
-def main() -> None:
-
- # unittest.main()
-
- import doctest
-
- doctest.testmod()
-
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "-s",
- "--string",
- dest="input_string",
- default="Hello World!! Welcome to Cryptography",
- help="Hash the string",
- )
- parser.add_argument(
- "-f", "--file", dest="input_file", help="Hash contents of a file"
- )
-
- args = parser.parse_args()
-
- input_string = args.input_string
-
- # hash input should be a bytestring
- if args.input_file:
- with open(args.input_file, "rb") as f:
- hash_input = f.read()
- else:
- hash_input = bytes(input_string, "utf-8")
-
- print(SHA256(hash_input).hash)
-
-
-if __name__ == "__main__":
- main()+# Author: M. Yathurshan
+# Black Formatter: True
+
+"""
+Implementation of SHA256 Hash function in a Python class and provides utilities
+to find hash of string or hash of text from a file.
+
+Usage: python sha256.py --string "Hello World!!"
+ python sha256.py --file "hello_world.txt"
+ When run without any arguments,
+ it prints the hash of the string "Hello World!! Welcome to Cryptography"
+
+References:
+https://qvault.io/cryptography/how-sha-2-works-step-by-step-sha-256/
+https://en.wikipedia.org/wiki/SHA-2
+"""
+
+import argparse
+import struct
+import unittest
+
+
+class SHA256:
+ """
+ Class to contain the entire pipeline for SHA1 Hashing Algorithm
+
+ >>> SHA256(b'Python').hash
+ '18885f27b5af9012df19e496460f9294d5ab76128824c6f993787004f6d9a7db'
+
+ >>> SHA256(b'hello world').hash
+ 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'
+ """
+
+ def __init__(self, data: bytes) -> None:
+ self.data = data
+
+ # Initialize hash values
+ self.hashes = [
+ 0x6A09E667,
+ 0xBB67AE85,
+ 0x3C6EF372,
+ 0xA54FF53A,
+ 0x510E527F,
+ 0x9B05688C,
+ 0x1F83D9AB,
+ 0x5BE0CD19,
+ ]
+
+ # Initialize round constants
+ self.round_constants = [
+ 0x428A2F98,
+ 0x71374491,
+ 0xB5C0FBCF,
+ 0xE9B5DBA5,
+ 0x3956C25B,
+ 0x59F111F1,
+ 0x923F82A4,
+ 0xAB1C5ED5,
+ 0xD807AA98,
+ 0x12835B01,
+ 0x243185BE,
+ 0x550C7DC3,
+ 0x72BE5D74,
+ 0x80DEB1FE,
+ 0x9BDC06A7,
+ 0xC19BF174,
+ 0xE49B69C1,
+ 0xEFBE4786,
+ 0x0FC19DC6,
+ 0x240CA1CC,
+ 0x2DE92C6F,
+ 0x4A7484AA,
+ 0x5CB0A9DC,
+ 0x76F988DA,
+ 0x983E5152,
+ 0xA831C66D,
+ 0xB00327C8,
+ 0xBF597FC7,
+ 0xC6E00BF3,
+ 0xD5A79147,
+ 0x06CA6351,
+ 0x14292967,
+ 0x27B70A85,
+ 0x2E1B2138,
+ 0x4D2C6DFC,
+ 0x53380D13,
+ 0x650A7354,
+ 0x766A0ABB,
+ 0x81C2C92E,
+ 0x92722C85,
+ 0xA2BFE8A1,
+ 0xA81A664B,
+ 0xC24B8B70,
+ 0xC76C51A3,
+ 0xD192E819,
+ 0xD6990624,
+ 0xF40E3585,
+ 0x106AA070,
+ 0x19A4C116,
+ 0x1E376C08,
+ 0x2748774C,
+ 0x34B0BCB5,
+ 0x391C0CB3,
+ 0x4ED8AA4A,
+ 0x5B9CCA4F,
+ 0x682E6FF3,
+ 0x748F82EE,
+ 0x78A5636F,
+ 0x84C87814,
+ 0x8CC70208,
+ 0x90BEFFFA,
+ 0xA4506CEB,
+ 0xBEF9A3F7,
+ 0xC67178F2,
+ ]
+
+ self.preprocessed_data = self.preprocessing(self.data)
+ self.final_hash()
+
+ @staticmethod
+ def preprocessing(data: bytes) -> bytes:
+ padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64))
+ big_endian_integer = struct.pack(">Q", (len(data) * 8))
+ return data + padding + big_endian_integer
+
+ def final_hash(self) -> None:
+ # Convert into blocks of 64 bytes
+ self.blocks = [
+ self.preprocessed_data[x : x + 64]
+ for x in range(0, len(self.preprocessed_data), 64)
+ ]
+
+ for block in self.blocks:
+ # Convert the given block into a list of 4 byte integers
+ words = list(struct.unpack(">16L", block))
+ # add 48 0-ed integers
+ words += [0] * 48
+
+ a, b, c, d, e, f, g, h = self.hashes
+
+ for index in range(64):
+ if index > 15:
+ # modify the zero-ed indexes at the end of the array
+ s0 = (
+ self.ror(words[index - 15], 7)
+ ^ self.ror(words[index - 15], 18)
+ ^ (words[index - 15] >> 3)
+ )
+ s1 = (
+ self.ror(words[index - 2], 17)
+ ^ self.ror(words[index - 2], 19)
+ ^ (words[index - 2] >> 10)
+ )
+
+ words[index] = (
+ words[index - 16] + s0 + words[index - 7] + s1
+ ) % 0x100000000
+
+ # Compression
+ s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25)
+ ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g)
+ temp1 = (
+ h + s1 + ch + self.round_constants[index] + words[index]
+ ) % 0x100000000
+ s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22)
+ maj = (a & b) ^ (a & c) ^ (b & c)
+ temp2 = (s0 + maj) % 0x100000000
+
+ h, g, f, e, d, c, b, a = (
+ g,
+ f,
+ e,
+ ((d + temp1) % 0x100000000),
+ c,
+ b,
+ a,
+ ((temp1 + temp2) % 0x100000000),
+ )
+
+ mutated_hash_values = [a, b, c, d, e, f, g, h]
+
+ # Modify final values
+ self.hashes = [
+ ((element + mutated_hash_values[index]) % 0x100000000)
+ for index, element in enumerate(self.hashes)
+ ]
+
+ self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes])
+
+ def ror(self, value: int, rotations: int) -> int:
+ """
+ Right rotate a given unsigned number by a certain amount of rotations
+ """
+ return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
+
+
+class SHA256HashTest(unittest.TestCase):
+ """
+ Test class for the SHA256 class. Inherits the TestCase class from unittest
+ """
+
+ def test_match_hashes(self) -> None:
+ import hashlib
+
+ msg = bytes("Test String", "utf-8")
+ assert SHA256(msg).hash == hashlib.sha256(msg).hexdigest()
+
+
+def main() -> None:
+ """
+ Provides option 'string' or 'file' to take input
+ and prints the calculated SHA-256 hash
+ """
+
+ # unittest.main()
+
+ import doctest
+
+ doctest.testmod()
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "-s",
+ "--string",
+ dest="input_string",
+ default="Hello World!! Welcome to Cryptography",
+ help="Hash the string",
+ )
+ parser.add_argument(
+ "-f", "--file", dest="input_file", help="Hash contents of a file"
+ )
+
+ args = parser.parse_args()
+
+ input_string = args.input_string
+
+ # hash input should be a bytestring
+ if args.input_file:
+ with open(args.input_file, "rb") as f:
+ hash_input = f.read()
+ else:
+ hash_input = bytes(input_string, "utf-8")
+
+ print(SHA256(hash_input).hash)
+
+
+if __name__ == "__main__":
+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/hashes/sha256.py |
Create docstrings for each class method | # To get an insight into naive recursive way to solve the Knapsack problem
def knapsack(
weights: list, values: list, number_of_items: int, max_weight: int, index: int
) -> int:
if index == number_of_items:
return 0
ans1 = 0
ans2 = 0
ans1 = knapsack(weights, values, number_of_items, max_weight, index + 1)
if weights[index] <= max_weight:
ans2 = values[index] + knapsack(
weights, values, number_of_items, max_weight - weights[index], index + 1
)
return max(ans1, ans2)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,11 +1,38 @@ # To get an insight into naive recursive way to solve the Knapsack problem
+"""
+A shopkeeper has bags of wheat that each have different weights and different profits.
+eg.
+no_of_items 4
+profit 5 4 8 6
+weight 1 2 4 5
+max_weight 5
+Constraints:
+max_weight > 0
+profit[i] >= 0
+weight[i] >= 0
+Calculate the maximum profit that the shopkeeper can make given maxmum weight that can
+be carried.
+"""
def knapsack(
weights: list, values: list, number_of_items: int, max_weight: int, index: int
) -> int:
+ """
+ Function description is as follows-
+ :param weights: Take a list of weights
+ :param values: Take a list of profits corresponding to the weights
+ :param number_of_items: number of items available to pick from
+ :param max_weight: Maximum weight that could be carried
+ :param index: the element we are looking at
+ :return: Maximum expected gain
+ >>> knapsack([1, 2, 4, 5], [5, 4, 8, 6], 4, 5, 0)
+ 13
+ >>> knapsack([3 ,4 , 5], [10, 9 , 8], 3, 25, 0)
+ 27
+ """
if index == number_of_items:
return 0
ans1 = 0
@@ -21,4 +48,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/knapsack/recursive_approach_knapsack.py |
Add missing documentation to my Python functions |
import numpy as np
from numpy import float64
from numpy.typing import NDArray
def retroactive_resolution(
coefficients: NDArray[float64], vector: NDArray[float64]
) -> NDArray[float64]:
rows, _columns = np.shape(coefficients)
x: NDArray[float64] = np.zeros((rows, 1), dtype=float)
for row in reversed(range(rows)):
total = np.dot(coefficients[row, row + 1 :], x[row + 1 :])
x[row, 0] = (vector[row][0] - total[0]) / coefficients[row, row]
return x
def gaussian_elimination(
coefficients: NDArray[float64], vector: NDArray[float64]
) -> NDArray[float64]:
# coefficients must to be a square matrix so we need to check first
rows, columns = np.shape(coefficients)
if rows != columns:
return np.array((), dtype=float)
# augmented matrix
augmented_mat: NDArray[float64] = np.concatenate((coefficients, vector), axis=1)
augmented_mat = augmented_mat.astype("float64")
# scale the matrix leaving it triangular
for row in range(rows - 1):
pivot = augmented_mat[row, row]
for col in range(row + 1, columns):
factor = augmented_mat[col, row] / pivot
augmented_mat[col, :] -= factor * augmented_mat[row, :]
x = retroactive_resolution(
augmented_mat[:, 0:columns], augmented_mat[:, columns : columns + 1]
)
return x
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,7 @@+"""
+| Gaussian elimination method for solving a system of linear equations.
+| Gaussian elimination - https://en.wikipedia.org/wiki/Gaussian_elimination
+"""
import numpy as np
from numpy import float64
@@ -7,6 +11,27 @@ def retroactive_resolution(
coefficients: NDArray[float64], vector: NDArray[float64]
) -> NDArray[float64]:
+ """
+ This function performs a retroactive linear system resolution
+ for triangular matrix
+
+ Examples:
+ 1.
+ * 2x1 + 2x2 - 1x3 = 5
+ * 0x1 - 2x2 - 1x3 = -7
+ * 0x1 + 0x2 + 5x3 = 15
+ 2.
+ * 2x1 + 2x2 = -1
+ * 0x1 - 2x2 = -1
+
+ >>> gaussian_elimination([[2, 2, -1], [0, -2, -1], [0, 0, 5]], [[5], [-7], [15]])
+ array([[2.],
+ [2.],
+ [3.]])
+ >>> gaussian_elimination([[2, 2], [0, -2]], [[-1], [-1]])
+ array([[-1. ],
+ [ 0.5]])
+ """
rows, _columns = np.shape(coefficients)
@@ -21,6 +46,26 @@ def gaussian_elimination(
coefficients: NDArray[float64], vector: NDArray[float64]
) -> NDArray[float64]:
+ """
+ This function performs Gaussian elimination method
+
+ Examples:
+ 1.
+ * 1x1 - 4x2 - 2x3 = -2
+ * 5x1 + 2x2 - 2x3 = -3
+ * 1x1 - 1x2 + 0x3 = 4
+ 2.
+ * 1x1 + 2x2 = 5
+ * 5x1 + 2x2 = 5
+
+ >>> gaussian_elimination([[1, -4, -2], [5, 2, -2], [1, -1, 0]], [[-2], [-3], [4]])
+ array([[ 2.3 ],
+ [-1.7 ],
+ [ 5.55]])
+ >>> gaussian_elimination([[1, 2], [5, 2]], [[5], [5]])
+ array([[0. ],
+ [2.5]])
+ """
# coefficients must to be a square matrix so we need to check first
rows, columns = np.shape(coefficients)
if rows != columns:
@@ -47,4 +92,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/gaussian_elimination.py |
Generate consistent documentation across files |
from __future__ import annotations
from functools import lru_cache
def knapsack(
capacity: int,
weights: list[int],
values: list[int],
counter: int,
allow_repetition=False,
) -> int:
@lru_cache
def knapsack_recur(capacity: int, counter: int) -> int:
# Base Case
if counter == 0 or capacity == 0:
return 0
# If weight of the nth item is more than Knapsack of capacity,
# then this item cannot be included in the optimal solution,
# else return the maximum of two cases:
# (1) nth item included only once (0-1), if allow_repetition is False
# nth item included one or more times (0-N), if allow_repetition is True
# (2) not included
if weights[counter - 1] > capacity:
return knapsack_recur(capacity, counter - 1)
else:
left_capacity = capacity - weights[counter - 1]
new_value_included = values[counter - 1] + knapsack_recur(
left_capacity, counter - 1 if not allow_repetition else counter
)
without_new_value = knapsack_recur(capacity, counter - 1)
return max(new_value_included, without_new_value)
return knapsack_recur(capacity, counter)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,6 @@+"""A recursive implementation of 0-N Knapsack Problem
+https://en.wikipedia.org/wiki/Knapsack_problem
+"""
from __future__ import annotations
@@ -11,6 +14,28 @@ counter: int,
allow_repetition=False,
) -> int:
+ """
+ Returns the maximum value that can be put in a knapsack of a capacity cap,
+ whereby each weight w has a specific value val
+ with option to allow repetitive selection of items
+
+ >>> cap = 50
+ >>> val = [60, 100, 120]
+ >>> w = [10, 20, 30]
+ >>> c = len(val)
+ >>> knapsack(cap, w, val, c)
+ 220
+
+ Given the repetition is NOT allowed,
+ the result is 220 cause the values of 100 and 120 got the weight of 50
+ which is the limit of the capacity.
+ >>> knapsack(cap, w, val, c, True)
+ 300
+
+ Given the repetition is allowed,
+ the result is 300 cause the values of 60*5 (pick 5 times)
+ got the weight of 10*5 which is the limit of the capacity.
+ """
@lru_cache
def knapsack_recur(capacity: int, counter: int) -> int:
@@ -40,4 +65,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/knapsack/knapsack.py |
Create documentation strings for testing functions |
from __future__ import annotations
import numpy as np
from numpy import float64
from numpy.typing import NDArray
# Method to find solution of system of linear equations
def jacobi_iteration_method(
coefficient_matrix: NDArray[float64],
constant_matrix: NDArray[float64],
init_val: list[float],
iterations: int,
) -> list[float]:
rows1, cols1 = coefficient_matrix.shape
rows2, cols2 = constant_matrix.shape
if rows1 != cols1:
msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}"
raise ValueError(msg)
if cols2 != 1:
msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}"
raise ValueError(msg)
if rows1 != rows2:
msg = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"received {rows1}x{cols1} and {rows2}x{cols2}"
)
raise ValueError(msg)
if len(init_val) != rows1:
msg = (
"Number of initial values must be equal to number of rows in coefficient "
f"matrix but received {len(init_val)} and {rows1}"
)
raise ValueError(msg)
if iterations <= 0:
raise ValueError("Iterations must be at least 1")
table: NDArray[float64] = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1
)
rows, _cols = table.shape
strictly_diagonally_dominant(table)
"""
# Iterates the whole matrix for given number of times
for _ in range(iterations):
new_val = []
for row in range(rows):
temp = 0
for col in range(cols):
if col == row:
denom = table[row][col]
elif col == cols - 1:
val = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
temp = (temp + val) / denom
new_val.append(temp)
init_val = new_val
"""
# denominator - a list of values along the diagonal
denominator = np.diag(coefficient_matrix)
# val_last - values of the last column of the table array
val_last = table[:, -1]
# masks - boolean mask of all strings without diagonal
# elements array coefficient_matrix
masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool)
# no_diagonals - coefficient_matrix array values without diagonal elements
no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1)
# Here we get 'i_col' - these are the column numbers, for each row
# without diagonal elements, except for the last column.
_i_row, i_col = np.where(masks)
ind = i_col.reshape(-1, rows - 1)
#'i_col' is converted to a two-dimensional list 'ind', which will be
# used to make selections from 'init_val' ('arr' array see below).
# Iterates the whole matrix for given number of times
for _ in range(iterations):
arr = np.take(init_val, ind)
sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1)
new_val = (sum_product_rows + val_last) / denominator
init_val = new_val
return new_val.tolist()
# Checks if the given matrix is strictly diagonally dominant
def strictly_diagonally_dominant(table: NDArray[float64]) -> bool:
rows, cols = table.shape
is_diagonally_dominant = True
for i in range(rows):
total = 0
for j in range(cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,6 @@+"""
+Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method
+"""
from __future__ import annotations
@@ -13,6 +16,69 @@ init_val: list[float],
iterations: int,
) -> list[float]:
+ """
+ Jacobi Iteration Method:
+ An iterative algorithm to determine the solutions of strictly diagonally dominant
+ system of linear equations
+
+ 4x1 + x2 + x3 = 2
+ x1 + 5x2 + 2x3 = -6
+ x1 + 2x2 + 4x3 = -4
+
+ x_init = [0.5, -0.5 , -0.5]
+
+ Examples:
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(coefficient, constant, init_val, iterations)
+ [0.909375, -1.14375, -0.7484375]
+
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(coefficient, constant, init_val, iterations)
+ Traceback (most recent call last):
+ ...
+ ValueError: Coefficient matrix dimensions must be nxn but received 2x3
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(
+ ... coefficient, constant, init_val, iterations
+ ... ) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but
+ received 3x3 and 2x1
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5]
+ >>> iterations = 3
+ >>> jacobi_iteration_method(
+ ... coefficient, constant, init_val, iterations
+ ... ) # doctest: +NORMALIZE_WHITESPACE
+ Traceback (most recent call last):
+ ...
+ ValueError: Number of initial values must be equal to number of rows in coefficient
+ matrix but received 2 and 3
+
+ >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]])
+ >>> constant = np.array([[2], [-6], [-4]])
+ >>> init_val = [0.5, -0.5, -0.5]
+ >>> iterations = 0
+ >>> jacobi_iteration_method(coefficient, constant, init_val, iterations)
+ Traceback (most recent call last):
+ ...
+ ValueError: Iterations must be at least 1
+ """
rows1, cols1 = coefficient_matrix.shape
rows2, cols2 = constant_matrix.shape
@@ -101,6 +167,17 @@
# Checks if the given matrix is strictly diagonally dominant
def strictly_diagonally_dominant(table: NDArray[float64]) -> bool:
+ """
+ >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]])
+ >>> strictly_diagonally_dominant(table)
+ True
+
+ >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]])
+ >>> strictly_diagonally_dominant(table)
+ Traceback (most recent call last):
+ ...
+ ValueError: Coefficient matrix is not strictly diagonally dominant
+ """
rows, cols = table.shape
@@ -124,4 +201,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/jacobi_iteration_method.py |
Add docstrings explaining edge cases |
from __future__ import annotations
import numpy as np
def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
# Ensure that table is a square array
rows, columns = np.shape(table)
if rows != columns:
msg = (
"'table' has to be of square shaped array but got a "
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(msg)
lower = np.zeros((rows, columns))
upper = np.zeros((rows, columns))
# in 'total', the necessary data is extracted through slices
# and the sum of the products is obtained.
for i in range(columns):
for j in range(i):
total = np.sum(lower[i, :i] * upper[:i, j])
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists")
lower[i][j] = (table[i][j] - total) / upper[j][j]
lower[i][i] = 1
for j in range(i, columns):
total = np.sum(lower[i, :i] * upper[:i, j])
upper[i][j] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,21 @@+"""
+Lower-upper (LU) decomposition factors a matrix as a product of a lower
+triangular matrix and an upper triangular matrix. A square matrix has an LU
+decomposition under the following conditions:
+
+ - If the matrix is invertible, then it has an LU decomposition if and only
+ if all of its leading principal minors are non-zero (see
+ https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of
+ leading principal minors of a matrix).
+ - If the matrix is singular (i.e., not invertible) and it has a rank of k
+ (i.e., it has k linearly independent columns), then it has an LU
+ decomposition if its first k leading principal minors are non-zero.
+
+This algorithm will simply attempt to perform LU decomposition on any square
+matrix and raise an error if no such decomposition exists.
+
+Reference: https://en.wikipedia.org/wiki/LU_decomposition
+"""
from __future__ import annotations
@@ -5,6 +23,63 @@
def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+ """
+ Perform LU decomposition on a given matrix and raises an error if the matrix
+ isn't square or if no such decomposition exists
+
+ >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ >>> lower_mat
+ array([[1. , 0. , 0. ],
+ [0. , 1. , 0. ],
+ [2.5, 8. , 1. ]])
+ >>> upper_mat
+ array([[ 2. , -2. , 1. ],
+ [ 0. , 1. , 2. ],
+ [ 0. , 0. , -17.5]])
+
+ >>> matrix = np.array([[4, 3], [6, 3]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ >>> lower_mat
+ array([[1. , 0. ],
+ [1.5, 1. ]])
+ >>> upper_mat
+ array([[ 4. , 3. ],
+ [ 0. , -1.5]])
+
+ >>> # Matrix is not square
+ >>> matrix = np.array([[2, -2, 1], [0, 1, 2]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ Traceback (most recent call last):
+ ...
+ ValueError: 'table' has to be of square shaped array but got a 2x3 array:
+ [[ 2 -2 1]
+ [ 0 1 2]]
+
+ >>> # Matrix is invertible, but its first leading principal minor is 0
+ >>> matrix = np.array([[0, 1], [1, 0]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: No LU decomposition exists
+
+ >>> # Matrix is singular, but its first leading principal minor is 1
+ >>> matrix = np.array([[1, 0], [1, 0]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ >>> lower_mat
+ array([[1., 0.],
+ [1., 1.]])
+ >>> upper_mat
+ array([[1., 0.],
+ [0., 0.]])
+
+ >>> # Matrix is singular, but its first leading principal minor is 0
+ >>> matrix = np.array([[0, 1], [0, 1]])
+ >>> lower_mat, upper_mat = lower_upper_decomposition(matrix)
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: No LU decomposition exists
+ """
# Ensure that table is a square array
rows, columns = np.shape(table)
if rows != columns:
@@ -36,4 +111,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/lu_decomposition.py |
Add docstrings to make code maintainable | #!/usr/bin/python
class Graph:
def __init__(self):
self.vertex = {}
# for printing the Graph vertices
def print_graph(self) -> None:
print(self.vertex)
for i in self.vertex:
print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]]))
# for adding the edge between two vertices
def add_edge(self, from_vertex: int, to_vertex: int) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(to_vertex)
else:
# else make a new vertex
self.vertex[from_vertex] = [to_vertex]
def dfs(self) -> None:
# visited array for storing already visited nodes
visited = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(i, visited)
def dfs_recursive(self, start_vertex: int, visited: list) -> None:
# mark start vertex as visited
visited[start_vertex] = True
print(start_vertex, end="")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
print(" ", end="")
self.dfs_recursive(i, visited)
if __name__ == "__main__":
import doctest
doctest.testmod()
g = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs() | --- +++ @@ -1,5 +1,6 @@ #!/usr/bin/python
+"""Author: OMKAR PATHAK"""
class Graph:
@@ -8,12 +9,44 @@
# for printing the Graph vertices
def print_graph(self) -> None:
+ """
+ Print the graph vertices.
+
+ Example:
+ >>> g = Graph()
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 2)
+ >>> g.add_edge(1, 2)
+ >>> g.add_edge(2, 0)
+ >>> g.add_edge(2, 3)
+ >>> g.add_edge(3, 3)
+ >>> g.print_graph()
+ {0: [1, 2], 1: [2], 2: [0, 3], 3: [3]}
+ 0 -> 1 -> 2
+ 1 -> 2
+ 2 -> 0 -> 3
+ 3 -> 3
+ """
print(self.vertex)
for i in self.vertex:
print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]]))
# for adding the edge between two vertices
def add_edge(self, from_vertex: int, to_vertex: int) -> None:
+ """
+ Add an edge between two vertices.
+
+ :param from_vertex: The source vertex.
+ :param to_vertex: The destination vertex.
+
+ Example:
+ >>> g = Graph()
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 2)
+ >>> g.print_graph()
+ {0: [1, 2]}
+ 0 -> 1 -> 2
+ """
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(to_vertex)
@@ -22,6 +55,21 @@ self.vertex[from_vertex] = [to_vertex]
def dfs(self) -> None:
+ """
+ Perform depth-first search (DFS) traversal on the graph
+ and print the visited vertices.
+
+ Example:
+ >>> g = Graph()
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 2)
+ >>> g.add_edge(1, 2)
+ >>> g.add_edge(2, 0)
+ >>> g.add_edge(2, 3)
+ >>> g.add_edge(3, 3)
+ >>> g.dfs()
+ 0 1 2 3
+ """
# visited array for storing already visited nodes
visited = [False] * len(self.vertex)
@@ -31,6 +79,24 @@ self.dfs_recursive(i, visited)
def dfs_recursive(self, start_vertex: int, visited: list) -> None:
+ """
+ Perform a recursive depth-first search (DFS) traversal on the graph.
+
+ :param start_vertex: The starting vertex for the traversal.
+ :param visited: A list to track visited vertices.
+
+ Example:
+ >>> g = Graph()
+ >>> g.add_edge(0, 1)
+ >>> g.add_edge(0, 2)
+ >>> g.add_edge(1, 2)
+ >>> g.add_edge(2, 0)
+ >>> g.add_edge(2, 3)
+ >>> g.add_edge(3, 3)
+ >>> visited = [False] * len(g.vertex)
+ >>> g.dfs_recursive(0, visited)
+ 0 1 2 3
+ """
# mark start vertex as visited
visited[start_vertex] = True
@@ -58,4 +124,4 @@
g.print_graph()
print("DFS:")
- g.dfs()+ g.dfs()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/depth_first_search_2.py |
Generate docstrings for script automation |
import heapq
def dijkstra(graph, start, end):
heap = [(0, start)] # cost from start node,end node
visited = set()
while heap:
(cost, u) = heapq.heappop(heap)
if u in visited:
continue
visited.add(u)
if u == end:
return cost
for v, c in graph[u]:
if v in visited:
continue
next_item = cost + c
heapq.heappush(heap, (next_item, v))
return -1
G = {
"A": [["B", 2], ["C", 5]],
"B": [["A", 2], ["D", 3], ["E", 1], ["F", 1]],
"C": [["A", 5], ["F", 3]],
"D": [["B", 3]],
"E": [["B", 4], ["F", 3]],
"F": [["C", 3], ["E", 3]],
}
r"""
Layout of G2:
E -- 1 --> B -- 1 --> C -- 1 --> D -- 1 --> F
\ /\
\ ||
----------------- 3 --------------------
"""
G2 = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["F", 3]],
"F": [],
}
r"""
Layout of G3:
E -- 1 --> B -- 1 --> C -- 1 --> D -- 1 --> F
\ /\
\ ||
-------- 2 ---------> G ------- 1 ------
"""
G3 = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
short_distance = dijkstra(G, "E", "C")
print(short_distance) # E -- 3 --> F -- 3 --> C == 6
short_distance = dijkstra(G2, "E", "F")
print(short_distance) # E -- 3 --> F == 3
short_distance = dijkstra(G3, "E", "F")
print(short_distance) # E -- 2 --> G -- 1 --> F == 3
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,8 +1,49 @@+"""
+pseudo-code
+
+DIJKSTRA(graph G, start vertex s, destination vertex d):
+
+//all nodes initially unexplored
+
+1 - let H = min heap data structure, initialized with 0 and s [here 0 indicates
+ the distance from start vertex s]
+2 - while H is non-empty:
+3 - remove the first node and cost of H, call it U and cost
+4 - if U has been previously explored:
+5 - go to the while loop, line 2 //Once a node is explored there is no need
+ to make it again
+6 - mark U as explored
+7 - if U is d:
+8 - return cost // total cost from start to destination vertex
+9 - for each edge(U, V): c=cost of edge(U,V) // for V in graph[U]
+10 - if V explored:
+11 - go to next V in line 9
+12 - total_cost = cost + c
+13 - add (total_cost,V) to H
+
+You can think at cost as a distance where Dijkstra finds the shortest distance
+between vertices s and v in a graph G. The use of a min heap as H guarantees
+that if a vertex has already been explored there will be no other path with
+shortest distance, that happens because heapq.heappop will always return the
+next vertex with the shortest distance, considering that the heap stores not
+only the distance between previous vertex and current vertex but the entire
+distance between each vertex that makes up the path from start vertex to target
+vertex.
+"""
import heapq
def dijkstra(graph, start, end):
+ """Return the cost of the shortest path between vertices start and end.
+
+ >>> dijkstra(G, "E", "C")
+ 6
+ >>> dijkstra(G2, "E", "F")
+ 3
+ >>> dijkstra(G3, "E", "F")
+ 3
+ """
heap = [(0, start)] # cost from start node,end node
visited = set()
@@ -75,4 +116,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/dijkstra.py |
Generate docstrings for script automation |
from __future__ import annotations
def depth_first_search(graph: dict, start: str) -> set[str]:
explored, stack = set(start), [start]
while stack:
v = stack.pop()
explored.add(v)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(adj)
return explored
G = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A")) | --- +++ @@ -1,8 +1,22 @@+"""Non recursive implementation of a DFS algorithm."""
from __future__ import annotations
def depth_first_search(graph: dict, start: str) -> set[str]:
+ """Depth First Search on Graph
+ :param graph: directed graph in dictionary format
+ :param start: starting vertex as a string
+ :returns: the trace of the search
+ >>> input_G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"],
+ ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"],
+ ... "F": ["C", "E", "G"], "G": ["F"] }
+ >>> output_G = list({'A', 'B', 'C', 'D', 'E', 'F', 'G'})
+ >>> all(x in output_G for x in list(depth_first_search(input_G, "A")))
+ True
+ >>> all(x in output_G for x in list(depth_first_search(input_G, "G")))
+ True
+ """
explored, stack = set(start), [start]
while stack:
@@ -31,4 +45,4 @@ import doctest
doctest.testmod()
- print(depth_first_search(G, "A"))+ print(depth_first_search(G, "A"))
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/depth_first_search.py |
Turn comments into proper docstrings |
from typing import Any
import numpy as np
def _is_matrix_spd(matrix: np.ndarray) -> bool:
# Ensure matrix is square.
assert np.shape(matrix)[0] == np.shape(matrix)[1]
# If matrix not symmetric, exit right away.
if np.allclose(matrix, matrix.T) is False:
return False
# Get eigenvalues and eignevectors for a symmetric matrix.
eigen_values, _ = np.linalg.eigh(matrix)
# Check sign of all eigenvalues.
# np.all returns a value of type np.bool_
return bool(np.all(eigen_values > 0))
def _create_spd_matrix(dimension: int) -> Any:
rng = np.random.default_rng()
random_matrix = rng.normal(size=(dimension, dimension))
spd_matrix = np.dot(random_matrix, random_matrix.T)
assert _is_matrix_spd(spd_matrix)
return spd_matrix
def conjugate_gradient(
spd_matrix: np.ndarray,
load_vector: np.ndarray,
max_iterations: int = 1000,
tol: float = 1e-8,
) -> Any:
# Ensure proper dimensionality.
assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]
assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0]
assert _is_matrix_spd(spd_matrix)
# Initialize solution guess, residual, search direction.
x0 = np.zeros((np.shape(load_vector)[0], 1))
r0 = np.copy(load_vector)
p0 = np.copy(r0)
# Set initial errors in solution guess and residual.
error_residual = 1e9
error_x_solution = 1e9
error = 1e9
# Set iteration counter to threshold number of iterations.
iterations = 0
while error > tol:
# Save this value so we only calculate the matrix-vector product once.
w = np.dot(spd_matrix, p0)
# The main algorithm.
# Update search direction magnitude.
alpha = np.dot(r0.T, r0) / np.dot(p0.T, w)
# Update solution guess.
x = x0 + alpha * p0
# Calculate new residual.
r = r0 - alpha * w
# Calculate new Krylov subspace scale.
beta = np.dot(r.T, r) / np.dot(r0.T, r0)
# Calculate new A conjuage search direction.
p = r + beta * p0
# Calculate errors.
error_residual = np.linalg.norm(r - r0)
error_x_solution = np.linalg.norm(x - x0)
error = np.maximum(error_residual, error_x_solution)
# Update variables.
x0 = np.copy(x)
r0 = np.copy(r)
p0 = np.copy(p)
# Update number of iterations.
iterations += 1
if iterations > max_iterations:
break
return x
def test_conjugate_gradient() -> None:
# Create linear system with SPD matrix and known solution x_true.
dimension = 3
spd_matrix = _create_spd_matrix(dimension)
rng = np.random.default_rng()
x_true = rng.normal(size=(dimension, 1))
b = np.dot(spd_matrix, x_true)
# Numpy solution.
x_numpy = np.linalg.solve(spd_matrix, b)
# Our implementation.
x_conjugate_gradient = conjugate_gradient(spd_matrix, b)
# Ensure both solutions are close to x_true (and therefore one another).
assert np.linalg.norm(x_numpy - x_true) <= 1e-6
assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_conjugate_gradient() | --- +++ @@ -1,3 +1,8 @@+"""
+Resources:
+- https://en.wikipedia.org/wiki/Conjugate_gradient_method
+- https://en.wikipedia.org/wiki/Definite_symmetric_matrix
+"""
from typing import Any
@@ -5,6 +10,26 @@
def _is_matrix_spd(matrix: np.ndarray) -> bool:
+ """
+ Returns True if input matrix is symmetric positive definite.
+ Returns False otherwise.
+
+ For a matrix to be SPD, all eigenvalues must be positive.
+
+ >>> import numpy as np
+ >>> matrix = np.array([
+ ... [4.12401784, -5.01453636, -0.63865857],
+ ... [-5.01453636, 12.33347422, -3.40493586],
+ ... [-0.63865857, -3.40493586, 5.78591885]])
+ >>> _is_matrix_spd(matrix)
+ True
+ >>> matrix = np.array([
+ ... [0.34634879, 1.96165514, 2.18277744],
+ ... [0.74074469, -1.19648894, -1.34223498],
+ ... [-0.7687067 , 0.06018373, -1.16315631]])
+ >>> _is_matrix_spd(matrix)
+ False
+ """
# Ensure matrix is square.
assert np.shape(matrix)[0] == np.shape(matrix)[1]
@@ -21,6 +46,21 @@
def _create_spd_matrix(dimension: int) -> Any:
+ """
+ Returns a symmetric positive definite matrix given a dimension.
+
+ Input:
+ dimension gives the square matrix dimension.
+
+ Output:
+ spd_matrix is an diminesion x dimensions symmetric positive definite (SPD) matrix.
+
+ >>> import numpy as np
+ >>> dimension = 3
+ >>> spd_matrix = _create_spd_matrix(dimension)
+ >>> _is_matrix_spd(spd_matrix)
+ True
+ """
rng = np.random.default_rng()
random_matrix = rng.normal(size=(dimension, dimension))
spd_matrix = np.dot(random_matrix, random_matrix.T)
@@ -34,6 +74,30 @@ max_iterations: int = 1000,
tol: float = 1e-8,
) -> Any:
+ """
+ Returns solution to the linear system np.dot(spd_matrix, x) = b.
+
+ Input:
+ spd_matrix is an NxN Symmetric Positive Definite (SPD) matrix.
+ load_vector is an Nx1 vector.
+
+ Output:
+ x is an Nx1 vector that is the solution vector.
+
+ >>> import numpy as np
+ >>> spd_matrix = np.array([
+ ... [8.73256573, -5.02034289, -2.68709226],
+ ... [-5.02034289, 3.78188322, 0.91980451],
+ ... [-2.68709226, 0.91980451, 1.94746467]])
+ >>> b = np.array([
+ ... [-5.80872761],
+ ... [ 3.23807431],
+ ... [ 1.95381422]])
+ >>> conjugate_gradient(spd_matrix, b)
+ array([[-0.63114139],
+ [-0.01561498],
+ [ 0.13979294]])
+ """
# Ensure proper dimensionality.
assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]
assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0]
@@ -88,6 +152,9 @@
def test_conjugate_gradient() -> None:
+ """
+ >>> test_conjugate_gradient() # self running tests
+ """
# Create linear system with SPD matrix and known solution x_true.
dimension = 3
spd_matrix = _create_spd_matrix(dimension)
@@ -110,4 +177,4 @@ import doctest
doctest.testmod()
- test_conjugate_gradient()+ test_conjugate_gradient()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/src/conjugate_gradient.py |
Create docstrings for each class method |
# pylint: disable=invalid-name
from collections import defaultdict
def dfs(start: int) -> int:
# pylint: disable=redefined-outer-name
ret = 1
visited[start] = True
for v in tree[start]:
if v not in visited:
ret += dfs(v)
if ret % 2 == 0:
cuts.append(start)
return ret
def even_tree():
dfs(1)
if __name__ == "__main__":
n, m = 10, 9
tree = defaultdict(list)
visited: dict[int, bool] = {}
cuts: list[int] = []
count = 0
edges = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | --- +++ @@ -1,9 +1,24 @@+"""
+You are given a tree(a simple connected graph with no cycles). The tree has N
+nodes numbered from 1 to N and is rooted at node 1.
+
+Find the maximum number of edges you can remove from the tree to get a forest
+such that each connected component of the forest contains an even number of
+nodes.
+
+Constraints
+2 <= 2 <= 100
+
+Note: The tree input will be such that it can always be decomposed into
+components containing an even number of nodes.
+"""
# pylint: disable=invalid-name
from collections import defaultdict
def dfs(start: int) -> int:
+ """DFS traversal"""
# pylint: disable=redefined-outer-name
ret = 1
visited[start] = True
@@ -16,6 +31,18 @@
def even_tree():
+ """
+ 2 1
+ 3 1
+ 4 3
+ 5 2
+ 6 1
+ 7 2
+ 8 6
+ 9 8
+ 10 8
+ On removing edges (1,3) and (1,6), we can get the desired result 2.
+ """
dfs(1)
@@ -30,4 +57,4 @@ tree[u].append(v)
tree[v].append(u)
even_tree()
- print(len(cuts) - 1)+ print(len(cuts) - 1)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/even_tree.py |
Add structured docstrings to improve clarity | from __future__ import annotations
class Graph:
def __init__(self, vertices: int) -> None:
self.vertices = vertices
self.graph = [[0] * vertices for _ in range(vertices)]
def print_solution(self, distances_from_source: list[int]) -> None:
print("Vertex \t Distance from Source")
for vertex in range(self.vertices):
print(vertex, "\t\t", distances_from_source[vertex])
def minimum_distance(
self, distances_from_source: list[int], visited: list[bool]
) -> int:
# Initialize minimum distance for next node
minimum = 1e7
min_index = 0
# Search not nearest vertex not in the shortest path tree
for vertex in range(self.vertices):
if distances_from_source[vertex] < minimum and visited[vertex] is False:
minimum = distances_from_source[vertex]
min_index = vertex
return min_index
def dijkstra(self, source: int) -> None:
distances = [int(1e7)] * self.vertices # distances from the source
distances[source] = 0
visited = [False] * self.vertices
for _ in range(self.vertices):
u = self.minimum_distance(distances, visited)
visited[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shortest path tree
for v in range(self.vertices):
if (
self.graph[u][v] > 0
and visited[v] is False
and distances[v] > distances[u] + self.graph[u][v]
):
distances[v] = distances[u] + self.graph[u][v]
self.print_solution(distances)
if __name__ == "__main__":
graph = Graph(9)
graph.graph = [
[0, 4, 0, 0, 0, 0, 0, 8, 0],
[4, 0, 8, 0, 0, 0, 0, 11, 0],
[0, 8, 0, 7, 0, 4, 0, 0, 2],
[0, 0, 7, 0, 9, 14, 0, 0, 0],
[0, 0, 0, 9, 0, 10, 0, 0, 0],
[0, 0, 4, 14, 10, 0, 2, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 6],
[8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0],
]
graph.dijkstra(0) | --- +++ @@ -3,10 +3,23 @@
class Graph:
def __init__(self, vertices: int) -> None:
+ """
+ >>> graph = Graph(2)
+ >>> graph.vertices
+ 2
+ >>> len(graph.graph)
+ 2
+ >>> len(graph.graph[0])
+ 2
+ """
self.vertices = vertices
self.graph = [[0] * vertices for _ in range(vertices)]
def print_solution(self, distances_from_source: list[int]) -> None:
+ """
+ >>> Graph(0).print_solution([]) # doctest: +NORMALIZE_WHITESPACE
+ Vertex Distance from Source
+ """
print("Vertex \t Distance from Source")
for vertex in range(self.vertices):
print(vertex, "\t\t", distances_from_source[vertex])
@@ -14,6 +27,13 @@ def minimum_distance(
self, distances_from_source: list[int], visited: list[bool]
) -> int:
+ """
+ A utility function to find the vertex with minimum distance value, from the set
+ of vertices not yet included in shortest path tree.
+
+ >>> Graph(3).minimum_distance([1, 2, 3], [False, False, True])
+ 0
+ """
# Initialize minimum distance for next node
minimum = 1e7
@@ -27,6 +47,17 @@ return min_index
def dijkstra(self, source: int) -> None:
+ """
+ Function that implements Dijkstra's single source shortest path algorithm for a
+ graph represented using adjacency matrix representation.
+
+ >>> Graph(4).dijkstra(1) # doctest: +NORMALIZE_WHITESPACE
+ Vertex Distance from Source
+ 0 10000000
+ 1 0
+ 2 10000000
+ 3 10000000
+ """
distances = [int(1e7)] * self.vertices # distances from the source
distances[source] = 0
@@ -64,4 +95,4 @@ [8, 11, 0, 0, 0, 0, 1, 0, 7],
[0, 0, 2, 0, 0, 0, 6, 7, 0],
]
- graph.dijkstra(0)+ graph.dijkstra(0)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/dijkstra_alternate.py |
Create structured documentation for my script |
# fmt: off
edge_array = [
['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4',
'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3'],
['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'cd-e2', 'de-e1', 'df-e8',
'ef-e3', 'eg-e2', 'fg-e6'],
['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2',
'eh-e12', 'fg-e6', 'fh-e10', 'gh-e6'],
['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'],
['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8',
'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6']
]
# fmt: on
def get_distinct_edge(edge_array):
distinct_edge = set()
for row in edge_array:
for item in row:
distinct_edge.add(item[0])
return list(distinct_edge)
def get_bitcode(edge_array, distinct_edge):
bitcode = ["0"] * len(edge_array)
for i, row in enumerate(edge_array):
for item in row:
if distinct_edge in item[0]:
bitcode[i] = "1"
break
return "".join(bitcode)
def get_frequency_table(edge_array):
distinct_edge = get_distinct_edge(edge_array)
frequency_table = {}
for item in distinct_edge:
bit = get_bitcode(edge_array, item)
# print('bit',bit)
# bt=''.join(bit)
s = bit.count("1")
frequency_table[item] = [s, bit]
# Store [Distinct edge, WT(Bitcode), Bitcode] in descending order
sorted_frequency_table = [
[k, v[0], v[1]]
for k, v in sorted(frequency_table.items(), key=lambda v: v[1][0], reverse=True)
]
return sorted_frequency_table
def get_nodes(frequency_table):
nodes = {}
for _, item in enumerate(frequency_table):
nodes.setdefault(item[2], []).append(item[0])
return nodes
def get_cluster(nodes):
cluster = {}
for key, value in nodes.items():
cluster.setdefault(key.count("1"), {})[key] = value
return cluster
def get_support(cluster):
return [i * 100 / len(cluster) for i in cluster]
def print_all() -> None:
print("\nNodes\n")
for key, value in nodes.items():
print(key, value)
print("\nSupport\n")
print(support)
print("\n Cluster \n")
for key, value in sorted(cluster.items(), reverse=True):
print(key, value)
print("\n Graph\n")
for key, value in graph.items():
print(key, value)
print("\n Edge List of Frequent subgraphs \n")
for edge_list in freq_subgraph_edge_list:
print(edge_list)
def create_edge(nodes, graph, cluster, c1):
for i in cluster[c1]:
count = 0
c2 = c1 + 1
while c2 < max(cluster.keys()):
for j in cluster[c2]:
"""
creates edge only if the condition satisfies
"""
if int(i, 2) & int(j, 2) == int(i, 2):
if tuple(nodes[i]) in graph:
graph[tuple(nodes[i])].append(nodes[j])
else:
graph[tuple(nodes[i])] = [nodes[j]]
count += 1
if count == 0:
c2 = c2 + 1
else:
break
def construct_graph(cluster, nodes):
x = cluster[max(cluster.keys())]
cluster[max(cluster.keys()) + 1] = "Header"
graph = {}
for i in x:
if (["Header"],) in graph:
graph[(["Header"],)].append(x[i])
else:
graph[(["Header"],)] = [x[i]]
for i in x:
graph[(x[i],)] = [["Header"]]
i = 1
while i < max(cluster) - 1:
create_edge(nodes, graph, cluster, i)
i = i + 1
return graph
def my_dfs(graph, start, end, path=None):
path = (path or []) + [start]
if start == end:
paths.append(path)
for node in graph[start]:
if tuple(node) not in path:
my_dfs(graph, tuple(node), end, path)
def find_freq_subgraph_given_support(s, cluster, graph):
k = int(s / 100 * (len(cluster) - 1))
for i in cluster[k]:
my_dfs(graph, tuple(cluster[k][i]), (["Header"],))
def freq_subgraphs_edge_list(paths):
freq_sub_el = []
for edges in paths:
el = []
for j in range(len(edges) - 1):
temp = list(edges[j])
for e in temp:
edge = (e[0], e[1])
el.append(edge)
freq_sub_el.append(el)
return freq_sub_el
def preprocess(edge_array):
for i in range(len(edge_array)):
for j in range(len(edge_array[i])):
t = edge_array[i][j].split("-")
edge_array[i][j] = t
if __name__ == "__main__":
preprocess(edge_array)
frequency_table = get_frequency_table(edge_array)
nodes = get_nodes(frequency_table)
cluster = get_cluster(nodes)
support = get_support(cluster)
graph = construct_graph(cluster, nodes)
find_freq_subgraph_given_support(60, cluster, graph)
paths: list = []
freq_subgraph_edge_list = freq_subgraphs_edge_list(paths)
print_all() | --- +++ @@ -1,172 +1,233 @@-
-# fmt: off
-edge_array = [
- ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4',
- 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3'],
- ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'cd-e2', 'de-e1', 'df-e8',
- 'ef-e3', 'eg-e2', 'fg-e6'],
- ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2',
- 'eh-e12', 'fg-e6', 'fh-e10', 'gh-e6'],
- ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'],
- ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8',
- 'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6']
-]
-# fmt: on
-
-
-def get_distinct_edge(edge_array):
- distinct_edge = set()
- for row in edge_array:
- for item in row:
- distinct_edge.add(item[0])
- return list(distinct_edge)
-
-
-def get_bitcode(edge_array, distinct_edge):
- bitcode = ["0"] * len(edge_array)
- for i, row in enumerate(edge_array):
- for item in row:
- if distinct_edge in item[0]:
- bitcode[i] = "1"
- break
- return "".join(bitcode)
-
-
-def get_frequency_table(edge_array):
- distinct_edge = get_distinct_edge(edge_array)
- frequency_table = {}
-
- for item in distinct_edge:
- bit = get_bitcode(edge_array, item)
- # print('bit',bit)
- # bt=''.join(bit)
- s = bit.count("1")
- frequency_table[item] = [s, bit]
- # Store [Distinct edge, WT(Bitcode), Bitcode] in descending order
- sorted_frequency_table = [
- [k, v[0], v[1]]
- for k, v in sorted(frequency_table.items(), key=lambda v: v[1][0], reverse=True)
- ]
- return sorted_frequency_table
-
-
-def get_nodes(frequency_table):
- nodes = {}
- for _, item in enumerate(frequency_table):
- nodes.setdefault(item[2], []).append(item[0])
- return nodes
-
-
-def get_cluster(nodes):
- cluster = {}
- for key, value in nodes.items():
- cluster.setdefault(key.count("1"), {})[key] = value
- return cluster
-
-
-def get_support(cluster):
- return [i * 100 / len(cluster) for i in cluster]
-
-
-def print_all() -> None:
- print("\nNodes\n")
- for key, value in nodes.items():
- print(key, value)
- print("\nSupport\n")
- print(support)
- print("\n Cluster \n")
- for key, value in sorted(cluster.items(), reverse=True):
- print(key, value)
- print("\n Graph\n")
- for key, value in graph.items():
- print(key, value)
- print("\n Edge List of Frequent subgraphs \n")
- for edge_list in freq_subgraph_edge_list:
- print(edge_list)
-
-
-def create_edge(nodes, graph, cluster, c1):
- for i in cluster[c1]:
- count = 0
- c2 = c1 + 1
- while c2 < max(cluster.keys()):
- for j in cluster[c2]:
- """
- creates edge only if the condition satisfies
- """
- if int(i, 2) & int(j, 2) == int(i, 2):
- if tuple(nodes[i]) in graph:
- graph[tuple(nodes[i])].append(nodes[j])
- else:
- graph[tuple(nodes[i])] = [nodes[j]]
- count += 1
- if count == 0:
- c2 = c2 + 1
- else:
- break
-
-
-def construct_graph(cluster, nodes):
- x = cluster[max(cluster.keys())]
- cluster[max(cluster.keys()) + 1] = "Header"
- graph = {}
- for i in x:
- if (["Header"],) in graph:
- graph[(["Header"],)].append(x[i])
- else:
- graph[(["Header"],)] = [x[i]]
- for i in x:
- graph[(x[i],)] = [["Header"]]
- i = 1
- while i < max(cluster) - 1:
- create_edge(nodes, graph, cluster, i)
- i = i + 1
- return graph
-
-
-def my_dfs(graph, start, end, path=None):
- path = (path or []) + [start]
- if start == end:
- paths.append(path)
- for node in graph[start]:
- if tuple(node) not in path:
- my_dfs(graph, tuple(node), end, path)
-
-
-def find_freq_subgraph_given_support(s, cluster, graph):
- k = int(s / 100 * (len(cluster) - 1))
- for i in cluster[k]:
- my_dfs(graph, tuple(cluster[k][i]), (["Header"],))
-
-
-def freq_subgraphs_edge_list(paths):
- freq_sub_el = []
- for edges in paths:
- el = []
- for j in range(len(edges) - 1):
- temp = list(edges[j])
- for e in temp:
- edge = (e[0], e[1])
- el.append(edge)
- freq_sub_el.append(el)
- return freq_sub_el
-
-
-def preprocess(edge_array):
- for i in range(len(edge_array)):
- for j in range(len(edge_array[i])):
- t = edge_array[i][j].split("-")
- edge_array[i][j] = t
-
-
-if __name__ == "__main__":
- preprocess(edge_array)
- frequency_table = get_frequency_table(edge_array)
- nodes = get_nodes(frequency_table)
- cluster = get_cluster(nodes)
- support = get_support(cluster)
- graph = construct_graph(cluster, nodes)
- find_freq_subgraph_given_support(60, cluster, graph)
- paths: list = []
- freq_subgraph_edge_list = freq_subgraphs_edge_list(paths)
- print_all()+"""
+FP-GraphMiner - A Fast Frequent Pattern Mining Algorithm for Network Graphs
+
+A novel Frequent Pattern Graph Mining algorithm, FP-GraphMiner, that compactly
+represents a set of network graphs as a Frequent Pattern Graph (or FP-Graph).
+This graph can be used to efficiently mine frequent subgraphs including maximal
+frequent subgraphs and maximum common subgraphs.
+
+URL: https://www.researchgate.net/publication/235255851
+"""
+
+# fmt: off
+edge_array = [
+ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4',
+ 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3'],
+ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'cd-e2', 'de-e1', 'df-e8',
+ 'ef-e3', 'eg-e2', 'fg-e6'],
+ ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2',
+ 'eh-e12', 'fg-e6', 'fh-e10', 'gh-e6'],
+ ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'],
+ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8',
+ 'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6']
+]
+# fmt: on
+
+
+def get_distinct_edge(edge_array):
+ """
+ Return Distinct edges from edge array of multiple graphs
+ >>> sorted(get_distinct_edge(edge_array))
+ ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
+ """
+ distinct_edge = set()
+ for row in edge_array:
+ for item in row:
+ distinct_edge.add(item[0])
+ return list(distinct_edge)
+
+
+def get_bitcode(edge_array, distinct_edge):
+ """
+ Return bitcode of distinct_edge
+ """
+ bitcode = ["0"] * len(edge_array)
+ for i, row in enumerate(edge_array):
+ for item in row:
+ if distinct_edge in item[0]:
+ bitcode[i] = "1"
+ break
+ return "".join(bitcode)
+
+
+def get_frequency_table(edge_array):
+ """
+ Returns Frequency Table
+ """
+ distinct_edge = get_distinct_edge(edge_array)
+ frequency_table = {}
+
+ for item in distinct_edge:
+ bit = get_bitcode(edge_array, item)
+ # print('bit',bit)
+ # bt=''.join(bit)
+ s = bit.count("1")
+ frequency_table[item] = [s, bit]
+ # Store [Distinct edge, WT(Bitcode), Bitcode] in descending order
+ sorted_frequency_table = [
+ [k, v[0], v[1]]
+ for k, v in sorted(frequency_table.items(), key=lambda v: v[1][0], reverse=True)
+ ]
+ return sorted_frequency_table
+
+
+def get_nodes(frequency_table):
+ """
+ Returns nodes
+ format nodes={bitcode:edges that represent the bitcode}
+ >>> get_nodes([['ab', 5, '11111'], ['ac', 5, '11111'], ['df', 5, '11111'],
+ ... ['bd', 5, '11111'], ['bc', 5, '11111']])
+ {'11111': ['ab', 'ac', 'df', 'bd', 'bc']}
+ """
+ nodes = {}
+ for _, item in enumerate(frequency_table):
+ nodes.setdefault(item[2], []).append(item[0])
+ return nodes
+
+
+def get_cluster(nodes):
+ """
+ Returns cluster
+ format cluster:{WT(bitcode):nodes with same WT}
+ """
+ cluster = {}
+ for key, value in nodes.items():
+ cluster.setdefault(key.count("1"), {})[key] = value
+ return cluster
+
+
+def get_support(cluster):
+ """
+ Returns support
+ >>> get_support({5: {'11111': ['ab', 'ac', 'df', 'bd', 'bc']},
+ ... 4: {'11101': ['ef', 'eg', 'de', 'fg'], '11011': ['cd']},
+ ... 3: {'11001': ['ad'], '10101': ['dg']},
+ ... 2: {'10010': ['dh', 'bh'], '11000': ['be'], '10100': ['gh'],
+ ... '10001': ['ce']},
+ ... 1: {'00100': ['fh', 'eh'], '10000': ['hi']}})
+ [100.0, 80.0, 60.0, 40.0, 20.0]
+ """
+ return [i * 100 / len(cluster) for i in cluster]
+
+
+def print_all() -> None:
+ print("\nNodes\n")
+ for key, value in nodes.items():
+ print(key, value)
+ print("\nSupport\n")
+ print(support)
+ print("\n Cluster \n")
+ for key, value in sorted(cluster.items(), reverse=True):
+ print(key, value)
+ print("\n Graph\n")
+ for key, value in graph.items():
+ print(key, value)
+ print("\n Edge List of Frequent subgraphs \n")
+ for edge_list in freq_subgraph_edge_list:
+ print(edge_list)
+
+
+def create_edge(nodes, graph, cluster, c1):
+ """
+ create edge between the nodes
+ """
+ for i in cluster[c1]:
+ count = 0
+ c2 = c1 + 1
+ while c2 < max(cluster.keys()):
+ for j in cluster[c2]:
+ """
+ creates edge only if the condition satisfies
+ """
+ if int(i, 2) & int(j, 2) == int(i, 2):
+ if tuple(nodes[i]) in graph:
+ graph[tuple(nodes[i])].append(nodes[j])
+ else:
+ graph[tuple(nodes[i])] = [nodes[j]]
+ count += 1
+ if count == 0:
+ c2 = c2 + 1
+ else:
+ break
+
+
+def construct_graph(cluster, nodes):
+ x = cluster[max(cluster.keys())]
+ cluster[max(cluster.keys()) + 1] = "Header"
+ graph = {}
+ for i in x:
+ if (["Header"],) in graph:
+ graph[(["Header"],)].append(x[i])
+ else:
+ graph[(["Header"],)] = [x[i]]
+ for i in x:
+ graph[(x[i],)] = [["Header"]]
+ i = 1
+ while i < max(cluster) - 1:
+ create_edge(nodes, graph, cluster, i)
+ i = i + 1
+ return graph
+
+
+def my_dfs(graph, start, end, path=None):
+ """
+ find different DFS walk from given node to Header node
+ """
+ path = (path or []) + [start]
+ if start == end:
+ paths.append(path)
+ for node in graph[start]:
+ if tuple(node) not in path:
+ my_dfs(graph, tuple(node), end, path)
+
+
+def find_freq_subgraph_given_support(s, cluster, graph):
+ """
+ find edges of multiple frequent subgraphs
+ """
+ k = int(s / 100 * (len(cluster) - 1))
+ for i in cluster[k]:
+ my_dfs(graph, tuple(cluster[k][i]), (["Header"],))
+
+
+def freq_subgraphs_edge_list(paths):
+ """
+ returns Edge list for frequent subgraphs
+ """
+ freq_sub_el = []
+ for edges in paths:
+ el = []
+ for j in range(len(edges) - 1):
+ temp = list(edges[j])
+ for e in temp:
+ edge = (e[0], e[1])
+ el.append(edge)
+ freq_sub_el.append(el)
+ return freq_sub_el
+
+
+def preprocess(edge_array):
+ """
+ Preprocess the edge array
+ >>> preprocess([['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12',
+ ... 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3',
+ ... 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3']])
+
+ """
+ for i in range(len(edge_array)):
+ for j in range(len(edge_array[i])):
+ t = edge_array[i][j].split("-")
+ edge_array[i][j] = t
+
+
+if __name__ == "__main__":
+ preprocess(edge_array)
+ frequency_table = get_frequency_table(edge_array)
+ nodes = get_nodes(frequency_table)
+ cluster = get_cluster(nodes)
+ support = get_support(cluster)
+ graph = construct_graph(cluster, nodes)
+ find_freq_subgraph_given_support(60, cluster, graph)
+ paths: list = []
+ freq_subgraph_edge_list = freq_subgraphs_edge_list(paths)
+ print_all()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/frequent_pattern_graph_miner.py |
Document classes and their methods |
from heapq import heappop, heappush
import numpy as np
def dijkstra(
grid: np.ndarray,
source: tuple[int, int],
destination: tuple[int, int],
allow_diagonal: bool,
) -> tuple[float | int, list[tuple[int, int]]]:
rows, cols = grid.shape
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
queue, visited = [(0, source)], set()
matrix = np.full((rows, cols), np.inf)
matrix[source] = 0
predecessors = np.empty((rows, cols), dtype=object)
predecessors[source] = None
while queue:
(dist, (x, y)) = heappop(queue)
if (x, y) in visited:
continue
visited.add((x, y))
if (x, y) == destination:
path = []
while (x, y) != source:
path.append((x, y))
x, y = predecessors[x, y]
path.append(source) # add the source manually
path.reverse()
return float(matrix[destination]), path
for i in range(len(dx)):
nx, ny = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
next_node = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(queue, (dist + 1, (nx, ny)))
matrix[nx, ny] = dist + 1
predecessors[nx, ny] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,10 @@+"""
+This script implements the Dijkstra algorithm on a binary grid.
+The grid consists of 0s and 1s, where 1 represents
+a walkable node and 0 represents an obstacle.
+The algorithm finds the shortest path from a start node to a destination node.
+Diagonal movement can be allowed or disallowed.
+"""
from heapq import heappop, heappush
@@ -10,6 +17,32 @@ destination: tuple[int, int],
allow_diagonal: bool,
) -> tuple[float | int, list[tuple[int, int]]]:
+ """
+ Implements Dijkstra's algorithm on a binary grid.
+
+ Args:
+ grid (np.ndarray): A 2D numpy array representing the grid.
+ 1 represents a walkable node and 0 represents an obstacle.
+ source (Tuple[int, int]): A tuple representing the start node.
+ destination (Tuple[int, int]): A tuple representing the
+ destination node.
+ allow_diagonal (bool): A boolean determining whether
+ diagonal movements are allowed.
+
+ Returns:
+ Tuple[Union[float, int], List[Tuple[int, int]]]:
+ The shortest distance from the start node to the destination node
+ and the shortest path as a list of nodes.
+
+ >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), False)
+ (4.0, [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2)])
+
+ >>> dijkstra(np.array([[1, 1, 1], [0, 1, 0], [0, 1, 1]]), (0, 0), (2, 2), True)
+ (2.0, [(0, 0), (1, 1), (2, 2)])
+
+ >>> dijkstra(np.array([[1, 1, 1], [0, 0, 1], [0, 1, 1]]), (0, 0), (2, 2), False)
+ (4.0, [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)])
+ """
rows, cols = grid.shape
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
@@ -53,4 +86,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/dijkstra_binary_grid.py |
Write reusable docstrings |
def __get_demo_graph(index):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]:
id_ = 0
n = len(graph) # No of vertices in graph
low = [0] * n
visited = [False] * n
def dfs(at, parent, bridges, id_):
visited[at] = True
low[at] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(to, at, bridges, id_)
low[at] = min(low[at], low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
low[at] = min(low[at], low[to])
bridges: list[tuple[int, int]] = []
for i in range(n):
if not visited[i]:
dfs(i, -1, bridges, id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,13 @@+"""
+An edge is a bridge if, after removing it count of connected components in graph will
+be increased by one. Bridges represent vulnerabilities in a connected network and are
+useful for designing reliable networks. For example, in a wired computer network, an
+articulation point indicates the critical computers and a bridge indicates the critical
+wires or connections.
+
+For more details, refer this article:
+https://www.geeksforgeeks.org/bridge-in-a-graph/
+"""
def __get_demo_graph(index):
@@ -48,6 +58,19 @@
def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]:
+ """
+ Return the list of undirected graph bridges [(a1, b1), ..., (ak, bk)]; ai <= bi
+ >>> compute_bridges(__get_demo_graph(0))
+ [(3, 4), (2, 3), (2, 5)]
+ >>> compute_bridges(__get_demo_graph(1))
+ [(6, 7), (0, 6), (1, 9), (3, 4), (2, 4), (2, 5)]
+ >>> compute_bridges(__get_demo_graph(2))
+ [(1, 6), (4, 6), (0, 4)]
+ >>> compute_bridges(__get_demo_graph(3))
+ []
+ >>> compute_bridges({})
+ []
+ """
id_ = 0
n = len(graph) # No of vertices in graph
@@ -80,4 +103,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/finding_bridges.py |
Document all public functions with docstrings |
from __future__ import annotations
from sys import maxsize
from typing import TypeVar
T = TypeVar("T")
def get_parent_position(position: int) -> int:
return (position - 1) // 2
def get_child_left_position(position: int) -> int:
return (2 * position) + 1
def get_child_right_position(position: int) -> int:
return (2 * position) + 2
class MinPriorityQueue[T]:
def __init__(self) -> None:
self.heap: list[tuple[T, int]] = []
self.position_map: dict[T, int] = {}
self.elements: int = 0
def __len__(self) -> int:
return self.elements
def __repr__(self) -> str:
return str(self.heap)
def is_empty(self) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def push(self, elem: T, weight: int) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight))
self.position_map[elem] = self.elements
self.elements += 1
self._bubble_up(elem)
def extract_min(self) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0, self.elements - 1)
elem, _ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
bubble_down_elem, _ = self.heap[0]
self._bubble_down(bubble_down_elem)
return elem
def update_key(self, elem: T, weight: int) -> None:
# Update the weight of the given key
position = self.position_map[elem]
self.heap[position] = (elem, weight)
if position > 0:
parent_position = get_parent_position(position)
_, parent_weight = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(elem)
else:
self._bubble_down(elem)
else:
self._bubble_down(elem)
def _bubble_up(self, elem: T) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
curr_pos = self.position_map[elem]
if curr_pos == 0:
return None
parent_position = get_parent_position(curr_pos)
_, weight = self.heap[curr_pos]
_, parent_weight = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(parent_position, curr_pos)
return self._bubble_up(elem)
return None
def _bubble_down(self, elem: T) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
curr_pos = self.position_map[elem]
_, weight = self.heap[curr_pos]
child_left_position = get_child_left_position(curr_pos)
child_right_position = get_child_right_position(curr_pos)
if child_left_position < self.elements and child_right_position < self.elements:
_, child_left_weight = self.heap[child_left_position]
_, child_right_weight = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(child_right_position, curr_pos)
return self._bubble_down(elem)
if child_left_position < self.elements:
_, child_left_weight = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(child_left_position, curr_pos)
return self._bubble_down(elem)
else:
return None
if child_right_position < self.elements:
_, child_right_weight = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(child_right_position, curr_pos)
return self._bubble_down(elem)
return None
def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None:
# Swap the nodes at the given positions
node1_elem = self.heap[node1_pos][0]
node2_elem = self.heap[node2_pos][0]
self.heap[node1_pos], self.heap[node2_pos] = (
self.heap[node2_pos],
self.heap[node1_pos],
)
self.position_map[node1_elem] = node2_pos
self.position_map[node2_elem] = node1_pos
class GraphUndirectedWeighted[T]:
def __init__(self) -> None:
self.connections: dict[T, dict[T, int]] = {}
self.nodes: int = 0
def __repr__(self) -> str:
return str(self.connections)
def __len__(self) -> int:
return self.nodes
def add_node(self, node: T) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
self.connections[node] = {}
self.nodes += 1
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def prims_algo[T](
graph: GraphUndirectedWeighted[T],
) -> tuple[dict[T, int], dict[T, T | None]]:
# prim's algorithm for minimum spanning tree
dist: dict[T, int] = dict.fromkeys(graph.connections, maxsize)
parent: dict[T, T | None] = dict.fromkeys(graph.connections)
priority_queue: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(node, weight)
if priority_queue.is_empty():
return dist, parent
# initialization
node = priority_queue.extract_min()
dist[node] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
# running prim's algorithm
while not priority_queue.is_empty():
node = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
return dist, parent | --- +++ @@ -1,3 +1,11 @@+"""
+Prim's (also known as Jarník's) algorithm is a greedy algorithm that finds a minimum
+spanning tree for a weighted undirected graph. This means it finds a subset of the
+edges that forms a tree that includes every vertex, where the total weight of all the
+edges in the tree is minimized. The algorithm operates by building this tree one vertex
+at a time, from an arbitrary starting vertex, at each step adding the cheapest possible
+connection from the tree to another vertex.
+"""
from __future__ import annotations
@@ -8,18 +16,72 @@
def get_parent_position(position: int) -> int:
+ """
+ heap helper function get the position of the parent of the current node
+
+ >>> get_parent_position(1)
+ 0
+ >>> get_parent_position(2)
+ 0
+ """
return (position - 1) // 2
def get_child_left_position(position: int) -> int:
+ """
+ heap helper function get the position of the left child of the current node
+
+ >>> get_child_left_position(0)
+ 1
+ """
return (2 * position) + 1
def get_child_right_position(position: int) -> int:
+ """
+ heap helper function get the position of the right child of the current node
+
+ >>> get_child_right_position(0)
+ 2
+ """
return (2 * position) + 2
class MinPriorityQueue[T]:
+ """
+ Minimum Priority Queue Class
+
+ Functions:
+ is_empty: function to check if the priority queue is empty
+ push: function to add an element with given priority to the queue
+ extract_min: function to remove and return the element with lowest weight (highest
+ priority)
+ update_key: function to update the weight of the given key
+ _bubble_up: helper function to place a node at the proper position (upward
+ movement)
+ _bubble_down: helper function to place a node at the proper position (downward
+ movement)
+ _swap_nodes: helper function to swap the nodes at the given positions
+
+ >>> queue = MinPriorityQueue()
+
+ >>> queue.push(1, 1000)
+ >>> queue.push(2, 100)
+ >>> queue.push(3, 4000)
+ >>> queue.push(4, 3000)
+
+ >>> queue.extract_min()
+ 2
+
+ >>> queue.update_key(4, 50)
+
+ >>> queue.extract_min()
+ 4
+ >>> queue.extract_min()
+ 1
+ >>> queue.extract_min()
+ 3
+ """
def __init__(self) -> None:
self.heap: list[tuple[T, int]] = []
@@ -123,6 +185,13 @@
class GraphUndirectedWeighted[T]:
+ """
+ Graph Undirected Weighted Class
+
+ Functions:
+ add_node: function to add a node in the graph
+ add_edge: function to add an edge between 2 nodes in the graph
+ """
def __init__(self) -> None:
self.connections: dict[T, dict[T, int]] = {}
@@ -151,6 +220,24 @@ def prims_algo[T](
graph: GraphUndirectedWeighted[T],
) -> tuple[dict[T, int], dict[T, T | None]]:
+ """
+ >>> graph = GraphUndirectedWeighted()
+
+ >>> graph.add_edge("a", "b", 3)
+ >>> graph.add_edge("b", "c", 10)
+ >>> graph.add_edge("c", "d", 5)
+ >>> graph.add_edge("a", "c", 15)
+ >>> graph.add_edge("b", "d", 100)
+
+ >>> dist, parent = prims_algo(graph)
+
+ >>> abs(dist["a"] - dist["b"])
+ 3
+ >>> abs(dist["d"] - dist["b"])
+ 15
+ >>> abs(dist["a"] - dist["c"])
+ 13
+ """
# prim's algorithm for minimum spanning tree
dist: dict[T, int] = dict.fromkeys(graph.connections, maxsize)
parent: dict[T, T | None] = dict.fromkeys(graph.connections)
@@ -179,4 +266,4 @@ dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
- return dist, parent+ return dist, parent
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/minimum_spanning_tree_prims2.py |
Write clean docstrings for readability |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class Vector:
def __init__(self, components: Collection[float] | None = None) -> None:
if components is None:
components = []
self.__components = list(components)
def __len__(self) -> int:
return len(self.__components)
def __str__(self) -> str:
return "(" + ",".join(map(str, self.__components)) + ")"
def __add__(self, other: Vector) -> Vector:
size = len(self)
if size == len(other):
result = [self.__components[i] + other.component(i) for i in range(size)]
return Vector(result)
else:
raise Exception("must have the same size")
def __sub__(self, other: Vector) -> Vector:
size = len(self)
if size == len(other):
result = [self.__components[i] - other.component(i) for i in range(size)]
return Vector(result)
else: # error case
raise Exception("must have the same size")
def __eq__(self, other: object) -> bool:
if not isinstance(other, Vector):
return NotImplemented
if len(self) != len(other):
return False
return all(self.component(i) == other.component(i) for i in range(len(self)))
@overload
def __mul__(self, other: float) -> Vector: ...
@overload
def __mul__(self, other: Vector) -> float: ...
def __mul__(self, other: float | Vector) -> float | Vector:
if isinstance(other, (float, int)):
ans = [c * other for c in self.__components]
return Vector(ans)
elif isinstance(other, Vector) and len(self) == len(other):
size = len(self)
prods = [self.__components[i] * other.component(i) for i in range(size)]
return sum(prods)
else: # error case
raise Exception("invalid operand!")
def copy(self) -> Vector:
return Vector(self.__components)
def component(self, i: int) -> float:
if isinstance(i, int) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def change_component(self, pos: int, value: float) -> None:
# precondition
assert -len(self.__components) <= pos < len(self.__components)
self.__components[pos] = value
def euclidean_length(self) -> float:
if len(self.__components) == 0:
raise Exception("Vector is empty")
squares = [c**2 for c in self.__components]
return math.sqrt(sum(squares))
def angle(self, other: Vector, deg: bool = False) -> float:
num = self * other
den = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def zero_vector(dimension: int) -> Vector:
# precondition
assert isinstance(dimension, int)
return Vector([0] * dimension)
def unit_basis_vector(dimension: int, pos: int) -> Vector:
# precondition
assert isinstance(dimension, int)
assert isinstance(pos, int)
ans = [0] * dimension
ans[pos] = 1
return Vector(ans)
def axpy(scalar: float, x: Vector, y: Vector) -> Vector:
# precondition
assert isinstance(x, Vector)
assert isinstance(y, Vector)
assert isinstance(scalar, (int, float))
return x * scalar + y
def random_vector(n: int, a: int, b: int) -> Vector:
random.seed(None)
ans = [random.randint(a, b) for _ in range(n)]
return Vector(ans)
class Matrix:
def __init__(self, matrix: list[list[float]], w: int, h: int) -> None:
self.__matrix = matrix
self.__width = w
self.__height = h
def __str__(self) -> str:
ans = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__(self, other: Matrix) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = [
self.__matrix[i][j] + other.component(i, j)
for j in range(self.__width)
]
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__(self, other: Matrix) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = [
self.__matrix[i][j] - other.component(i, j)
for j in range(self.__width)
]
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__(self, other: float) -> Matrix: ...
@overload
def __mul__(self, other: Vector) -> Vector: ...
def __mul__(self, other: float | Vector) -> Vector | Matrix:
if isinstance(other, Vector): # matrix-vector
if len(other) == self.__width:
ans = zero_vector(self.__height)
for i in range(self.__height):
prods = [
self.__matrix[i][j] * other.component(j)
for j in range(self.__width)
]
ans.change_component(i, sum(prods))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!"
)
elif isinstance(other, (int, float)): # matrix-scalar
matrix = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(matrix, self.__width, self.__height)
return None
def height(self) -> int:
return self.__height
def width(self) -> int:
return self.__width
def component(self, x: int, y: int) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def change_component(self, x: int, y: int, value: float) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
self.__matrix[x][y] = value
else:
raise Exception("change_component: indices out of bounds")
def minor(self, x: int, y: int) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square")
minor = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(minor)):
minor[i] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(minor, self.__width - 1, self.__height - 1).determinant()
def cofactor(self, x: int, y: int) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(x, y)
else:
raise Exception("Indices out of bounds")
def determinant(self) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
cofactor_prods = [
self.__matrix[0][y] * self.cofactor(0, y) for y in range(self.__width)
]
return sum(cofactor_prods)
def square_zero_matrix(n: int) -> Matrix:
ans: list[list[float]] = [[0] * n for _ in range(n)]
return Matrix(ans, n, n)
def random_matrix(width: int, height: int, a: int, b: int) -> Matrix:
random.seed(None)
matrix: list[list[float]] = [
[random.randint(a, b) for _ in range(width)] for _ in range(height)
]
return Matrix(matrix, width, height) | --- +++ @@ -1,3 +1,23 @@+"""
+Created on Mon Feb 26 14:29:11 2018
+
+@author: Christian Bender
+@license: MIT-license
+
+This module contains some useful classes and functions for dealing
+with linear algebra in python.
+
+Overview:
+
+- class Vector
+- function zero_vector(dimension)
+- function unit_basis_vector(dimension, pos)
+- function axpy(scalar, vector1, vector2)
+- function random_vector(N, a, b)
+- class Matrix
+- function square_zero_matrix(N)
+- function random_matrix(W, H, a, b)
+"""
from __future__ import annotations
@@ -8,19 +28,53 @@
class Vector:
+ """
+ This class represents a vector of arbitrary size.
+ You need to give the vector components.
+
+ Overview of the methods:
+
+ __init__(components: Collection[float] | None): init the vector
+ __len__(): gets the size of the vector (number of components)
+ __str__(): returns a string representation
+ __add__(other: Vector): vector addition
+ __sub__(other: Vector): vector subtraction
+ __mul__(other: float): scalar multiplication
+ __mul__(other: Vector): dot product
+ copy(): copies this vector and returns it
+ component(i): gets the i-th component (0-indexed)
+ change_component(pos: int, value: float): changes specified component
+ euclidean_length(): returns the euclidean length of the vector
+ angle(other: Vector, deg: bool): returns the angle between two vectors
+ """
def __init__(self, components: Collection[float] | None = None) -> None:
+ """
+ input: components or nothing
+ simple constructor for init the vector
+ """
if components is None:
components = []
self.__components = list(components)
def __len__(self) -> int:
+ """
+ returns the size of the vector
+ """
return len(self.__components)
def __str__(self) -> str:
+ """
+ returns a string representation of the vector
+ """
return "(" + ",".join(map(str, self.__components)) + ")"
def __add__(self, other: Vector) -> Vector:
+ """
+ input: other vector
+ assumes: other vector has the same size
+ returns a new vector that represents the sum.
+ """
size = len(self)
if size == len(other):
result = [self.__components[i] + other.component(i) for i in range(size)]
@@ -29,6 +83,11 @@ raise Exception("must have the same size")
def __sub__(self, other: Vector) -> Vector:
+ """
+ input: other vector
+ assumes: other vector has the same size
+ returns a new vector that represents the difference.
+ """
size = len(self)
if size == len(other):
result = [self.__components[i] - other.component(i) for i in range(size)]
@@ -37,6 +96,9 @@ raise Exception("must have the same size")
def __eq__(self, other: object) -> bool:
+ """
+ performs the comparison between two vectors
+ """
if not isinstance(other, Vector):
return NotImplemented
if len(self) != len(other):
@@ -50,6 +112,10 @@ def __mul__(self, other: Vector) -> float: ...
def __mul__(self, other: float | Vector) -> float | Vector:
+ """
+ mul implements the scalar multiplication
+ and the dot-product
+ """
if isinstance(other, (float, int)):
ans = [c * other for c in self.__components]
return Vector(ans)
@@ -61,26 +127,64 @@ raise Exception("invalid operand!")
def copy(self) -> Vector:
+ """
+ copies this vector and returns it.
+ """
return Vector(self.__components)
def component(self, i: int) -> float:
+ """
+ input: index (0-indexed)
+ output: the i-th component of the vector.
+ """
if isinstance(i, int) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def change_component(self, pos: int, value: float) -> None:
+ """
+ input: an index (pos) and a value
+ changes the specified component (pos) with the
+ 'value'
+ """
# precondition
assert -len(self.__components) <= pos < len(self.__components)
self.__components[pos] = value
def euclidean_length(self) -> float:
+ """
+ returns the euclidean length of the vector
+
+ >>> Vector([2, 3, 4]).euclidean_length()
+ 5.385164807134504
+ >>> Vector([1]).euclidean_length()
+ 1.0
+ >>> Vector([0, -1, -2, -3, 4, 5, 6]).euclidean_length()
+ 9.539392014169456
+ >>> Vector([]).euclidean_length()
+ Traceback (most recent call last):
+ ...
+ Exception: Vector is empty
+ """
if len(self.__components) == 0:
raise Exception("Vector is empty")
squares = [c**2 for c in self.__components]
return math.sqrt(sum(squares))
def angle(self, other: Vector, deg: bool = False) -> float:
+ """
+ find angle between two Vector (self, Vector)
+
+ >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]))
+ 1.4906464636572374
+ >>> Vector([3, 4, -1]).angle(Vector([2, -1, 1]), deg = True)
+ 85.40775111366095
+ >>> Vector([3, 4, -1]).angle(Vector([2, -1]))
+ Traceback (most recent call last):
+ ...
+ Exception: invalid operand!
+ """
num = self * other
den = self.euclidean_length() * other.euclidean_length()
if deg:
@@ -90,12 +194,19 @@
def zero_vector(dimension: int) -> Vector:
+ """
+ returns a zero-vector of size 'dimension'
+ """
# precondition
assert isinstance(dimension, int)
return Vector([0] * dimension)
def unit_basis_vector(dimension: int, pos: int) -> Vector:
+ """
+ returns a unit basis vector with a One
+ at index 'pos' (indexing at 0)
+ """
# precondition
assert isinstance(dimension, int)
assert isinstance(pos, int)
@@ -105,6 +216,11 @@
def axpy(scalar: float, x: Vector, y: Vector) -> Vector:
+ """
+ input: a 'scalar' and two vectors 'x' and 'y'
+ output: a vector
+ computes the axpy operation
+ """
# precondition
assert isinstance(x, Vector)
assert isinstance(y, Vector)
@@ -113,19 +229,51 @@
def random_vector(n: int, a: int, b: int) -> Vector:
+ """
+ input: size (N) of the vector.
+ random range (a,b)
+ output: returns a random vector of size N, with
+ random integer components between 'a' and 'b'.
+ """
random.seed(None)
ans = [random.randint(a, b) for _ in range(n)]
return Vector(ans)
class Matrix:
+ """
+ class: Matrix
+ This class represents an arbitrary matrix.
+
+ Overview of the methods:
+
+ __init__():
+ __str__(): returns a string representation
+ __add__(other: Matrix): matrix addition
+ __sub__(other: Matrix): matrix subtraction
+ __mul__(other: float): scalar multiplication
+ __mul__(other: Vector): vector multiplication
+ height() : returns height
+ width() : returns width
+ component(x: int, y: int): returns specified component
+ change_component(x: int, y: int, value: float): changes specified component
+ minor(x: int, y: int): returns minor along (x, y)
+ cofactor(x: int, y: int): returns cofactor along (x, y)
+ determinant() : returns determinant
+ """
def __init__(self, matrix: list[list[float]], w: int, h: int) -> None:
+ """
+ simple constructor for initializing the matrix with components.
+ """
self.__matrix = matrix
self.__width = w
self.__height = h
def __str__(self) -> str:
+ """
+ returns a string representation of this matrix.
+ """
ans = ""
for i in range(self.__height):
ans += "|"
@@ -137,6 +285,9 @@ return ans
def __add__(self, other: Matrix) -> Matrix:
+ """
+ implements matrix addition.
+ """
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
@@ -150,6 +301,9 @@ raise Exception("matrix must have the same dimension!")
def __sub__(self, other: Matrix) -> Matrix:
+ """
+ implements matrix subtraction.
+ """
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
@@ -169,6 +323,10 @@ def __mul__(self, other: Vector) -> Vector: ...
def __mul__(self, other: float | Vector) -> Vector | Matrix:
+ """
+ implements the matrix-vector multiplication.
+ implements the matrix-scalar multiplication
+ """
if isinstance(other, Vector): # matrix-vector
if len(other) == self.__width:
ans = zero_vector(self.__height)
@@ -193,24 +351,39 @@ return None
def height(self) -> int:
+ """
+ getter for the height
+ """
return self.__height
def width(self) -> int:
+ """
+ getter for the width
+ """
return self.__width
def component(self, x: int, y: int) -> float:
+ """
+ returns the specified (x,y) component
+ """
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def change_component(self, x: int, y: int, value: float) -> None:
+ """
+ changes the x-y component of this matrix
+ """
if 0 <= x < self.__height and 0 <= y < self.__width:
self.__matrix[x][y] = value
else:
raise Exception("change_component: indices out of bounds")
def minor(self, x: int, y: int) -> float:
+ """
+ returns the minor along (x, y)
+ """
if self.__height != self.__width:
raise Exception("Matrix is not square")
minor = self.__matrix[:x] + self.__matrix[x + 1 :]
@@ -219,6 +392,9 @@ return Matrix(minor, self.__width - 1, self.__height - 1).determinant()
def cofactor(self, x: int, y: int) -> float:
+ """
+ returns the cofactor (signed minor) along (x, y)
+ """
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
@@ -227,6 +403,9 @@ raise Exception("Indices out of bounds")
def determinant(self) -> float:
+ """
+ returns the determinant of an nxn matrix using Laplace expansion
+ """
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
@@ -246,13 +425,20 @@
def square_zero_matrix(n: int) -> Matrix:
+ """
+ returns a square zero-matrix of dimension NxN
+ """
ans: list[list[float]] = [[0] * n for _ in range(n)]
return Matrix(ans, n, n)
def random_matrix(width: int, height: int, a: int, b: int) -> Matrix:
+ """
+ returns a random matrix WxH with integer components
+ between 'a' and 'b'
+ """
random.seed(None)
matrix: list[list[float]] = [
[random.randint(a, b) for _ in range(width)] for _ in range(height)
]
- return Matrix(matrix, width, height)+ return Matrix(matrix, width, height)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/src/lib.py |
Write docstrings for utility functions | # floyd_warshall.py
def _print_dist(dist, v):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(v):
for j in range(v):
if dist[i][j] != float("inf"):
print(int(dist[i][j]), end="\t")
else:
print("INF", end="\t")
print()
def floyd_warshall(graph, v):
dist = [[float("inf") for _ in range(v)] for _ in range(v)]
for i in range(v):
for j in range(v):
dist[i][j] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(v):
# looping through rows of graph array
for i in range(v):
# looping through columns of graph array
for j in range(v):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
dist[i][j] = dist[i][k] + dist[k][j]
_print_dist(dist, v)
return dist, v
if __name__ == "__main__":
v = int(input("Enter number of vertices: "))
e = int(input("Enter number of edges: "))
graph = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
graph[i][i] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
src = int(input("Enter source:"))
dst = int(input("Enter destination:"))
weight = float(input("Enter weight:"))
graph[src][dst] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | --- +++ @@ -1,4 +1,8 @@ # floyd_warshall.py
+"""
+The problem is to find the shortest distance between all pairs of vertices in a
+weighted directed graph that can have negative edge weights.
+"""
def _print_dist(dist, v):
@@ -13,6 +17,21 @@
def floyd_warshall(graph, v):
+ """
+ :param graph: 2D array calculated from weight[edge[i, j]]
+ :type graph: List[List[float]]
+ :param v: number of vertices
+ :type v: int
+ :return: shortest distance between all vertex pairs
+ distance[u][v] will contain the shortest distance from vertex u to v.
+
+ 1. For all edges from v to n, distance[i][j] = weight(edge(i, j)).
+ 3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] +
+ distance[k][j]) for each possible pair i, j of vertices.
+ 4. The above is repeated for each vertex k in the graph.
+ 5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is
+ updated to the next vertex[i][k].
+ """
dist = [[float("inf") for _ in range(v)] for _ in range(v)]
@@ -80,4 +99,4 @@ # # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
- # INF 1 0+ # INF 1 0
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/graphs_floyd_warshall.py |
Help me comply with documentation standards | import numpy as np
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
class GradientBoostingClassifier:
def __init__(self, n_estimators: int = 100, learning_rate: float = 0.1) -> None:
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.models: list[tuple[DecisionTreeRegressor, float]] = []
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
for _ in range(self.n_estimators):
# Calculate the pseudo-residuals
residuals = -self.gradient(target, self.predict(features))
# Fit a weak learner (e.g., decision tree) to the residuals
model = DecisionTreeRegressor(max_depth=1)
model.fit(features, residuals)
# Update the model by adding the weak learner with a learning rate
self.models.append((model, self.learning_rate))
def predict(self, features: np.ndarray) -> np.ndarray:
# Initialize predictions with zeros
predictions = np.zeros(features.shape[0])
for model, learning_rate in self.models:
predictions += learning_rate * model.predict(features)
return np.sign(predictions) # Convert to binary predictions (-1 or 1)
def gradient(self, target: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
return -target / (1 + np.exp(target * y_pred))
if __name__ == "__main__":
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}") | --- +++ @@ -7,11 +7,43 @@
class GradientBoostingClassifier:
def __init__(self, n_estimators: int = 100, learning_rate: float = 0.1) -> None:
+ """
+ Initialize a GradientBoostingClassifier.
+
+ Parameters:
+ - n_estimators (int): The number of weak learners to train.
+ - learning_rate (float): The learning rate for updating the model.
+
+ Attributes:
+ - n_estimators (int): The number of weak learners.
+ - learning_rate (float): The learning rate.
+ - models (list): A list to store the trained weak learners.
+ """
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.models: list[tuple[DecisionTreeRegressor, float]] = []
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
+ """
+ Fit the GradientBoostingClassifier to the training data.
+
+ Parameters:
+ - features (np.ndarray): The training features.
+ - target (np.ndarray): The target values.
+
+ Returns:
+ None
+
+ >>> import numpy as np
+ >>> from sklearn.datasets import load_iris
+ >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
+ >>> iris = load_iris()
+ >>> X, y = iris.data, iris.target
+ >>> clf.fit(X, y)
+ >>> # Check if the model is trained
+ >>> len(clf.models) == 100
+ True
+ """
for _ in range(self.n_estimators):
# Calculate the pseudo-residuals
residuals = -self.gradient(target, self.predict(features))
@@ -22,6 +54,26 @@ self.models.append((model, self.learning_rate))
def predict(self, features: np.ndarray) -> np.ndarray:
+ """
+ Make predictions on input data.
+
+ Parameters:
+ - features (np.ndarray): The input data for making predictions.
+
+ Returns:
+ - np.ndarray: An array of binary predictions (-1 or 1).
+
+ >>> import numpy as np
+ >>> from sklearn.datasets import load_iris
+ >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
+ >>> iris = load_iris()
+ >>> X, y = iris.data, iris.target
+ >>> clf.fit(X, y)
+ >>> y_pred = clf.predict(X)
+ >>> # Check if the predictions have the correct shape
+ >>> y_pred.shape == y.shape
+ True
+ """
# Initialize predictions with zeros
predictions = np.zeros(features.shape[0])
for model, learning_rate in self.models:
@@ -29,6 +81,25 @@ return np.sign(predictions) # Convert to binary predictions (-1 or 1)
def gradient(self, target: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
+ """
+ Calculate the negative gradient (pseudo-residuals) for logistic loss.
+
+ Parameters:
+ - target (np.ndarray): The target values.
+ - y_pred (np.ndarray): The predicted values.
+
+ Returns:
+ - np.ndarray: An array of pseudo-residuals.
+
+ >>> import numpy as np
+ >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
+ >>> target = np.array([0, 1, 0, 1])
+ >>> y_pred = np.array([0.2, 0.8, 0.3, 0.7])
+ >>> residuals = clf.gradient(target, y_pred)
+ >>> # Check if residuals have the correct shape
+ >>> residuals.shape == target.shape
+ True
+ """
return -target / (1 + np.exp(target * y_pred))
@@ -44,4 +115,4 @@
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
- print(f"Accuracy: {accuracy:.2f}")+ print(f"Accuracy: {accuracy:.2f}")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/gradient_boosting_classifier.py |
Document this script properly | #!/usr/bin/python
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[67]:
# sigmoid function or logistic function is used as a hypothesis function in
# classification problems
def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
return 1 / (1 + np.exp(-z))
def cost_function(h: np.ndarray, y: np.ndarray) -> float:
return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())
def log_likelihood(x, y, weights):
scores = np.dot(x, weights)
return np.sum(y * scores - np.log(1 + np.exp(scores)))
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
def logistic_reg(alpha, x, y, max_iterations=70000):
theta = np.zeros(x.shape[1])
for iterations in range(max_iterations):
z = np.dot(x, theta)
h = sigmoid_function(z)
gradient = np.dot(x.T, h - y) / y.size
theta = theta - alpha * gradient # updating the weights
z = np.dot(x, theta)
h = sigmoid_function(z)
j = cost_function(h, y)
if iterations % 100 == 0:
print(f"loss: {j} \t") # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
import doctest
doctest.testmod()
iris = datasets.load_iris()
x = iris.data[:, :2]
y = (iris.target != 0) * 1
alpha = 0.1
theta = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def predict_prob(x):
return sigmoid_function(
np.dot(x, theta)
) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(x1_min, x1_max) = (x[:, 0].min(), x[:, 0].max())
(x2_min, x2_max) = (x[:, 1].min(), x[:, 1].max())
(xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = predict_prob(grid).reshape(xx1.shape)
plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show() | --- +++ @@ -8,6 +8,12 @@
# importing all the required libraries
+"""
+Implementing logistic regression for classification problem
+Helpful resources:
+Coursera ML course
+https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac
+"""
import numpy as np
from matplotlib import pyplot as plt
@@ -23,10 +29,77 @@
def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
+ """
+ Also known as Logistic Function.
+
+ 1
+ f(x) = -------
+ 1 + e⁻ˣ
+
+ The sigmoid function approaches a value of 1 as its input 'x' becomes
+ increasing positive. Opposite for negative values.
+
+ Reference: https://en.wikipedia.org/wiki/Sigmoid_function
+
+ @param z: input to the function
+ @returns: returns value in the range 0 to 1
+
+ Examples:
+ >>> float(sigmoid_function(4))
+ 0.9820137900379085
+ >>> sigmoid_function(np.array([-3, 3]))
+ array([0.04742587, 0.95257413])
+ >>> sigmoid_function(np.array([-3, 3, 1]))
+ array([0.04742587, 0.95257413, 0.73105858])
+ >>> sigmoid_function(np.array([-0.01, -2, -1.9]))
+ array([0.49750002, 0.11920292, 0.13010847])
+ >>> sigmoid_function(np.array([-1.3, 5.3, 12]))
+ array([0.21416502, 0.9950332 , 0.99999386])
+ >>> sigmoid_function(np.array([0.01, 0.02, 4.1]))
+ array([0.50249998, 0.50499983, 0.9836975 ])
+ >>> sigmoid_function(np.array([0.8]))
+ array([0.68997448])
+ """
return 1 / (1 + np.exp(-z))
def cost_function(h: np.ndarray, y: np.ndarray) -> float:
+ """
+ Cost function quantifies the error between predicted and expected values.
+ The cost function used in Logistic Regression is called Log Loss
+ or Cross Entropy Function.
+
+ J(θ) = (1/m) * Σ [ -y * log(hθ(x)) - (1 - y) * log(1 - hθ(x)) ]
+
+ Where:
+ - J(θ) is the cost that we want to minimize during training
+ - m is the number of training examples
+ - Σ represents the summation over all training examples
+ - y is the actual binary label (0 or 1) for a given example
+ - hθ(x) is the predicted probability that x belongs to the positive class
+
+ @param h: the output of sigmoid function. It is the estimated probability
+ that the input example 'x' belongs to the positive class
+
+ @param y: the actual binary label associated with input example 'x'
+
+ Examples:
+ >>> estimations = sigmoid_function(np.array([0.3, -4.3, 8.1]))
+ >>> cost_function(h=estimations,y=np.array([1, 0, 1]))
+ 0.18937868932131605
+ >>> estimations = sigmoid_function(np.array([4, 3, 1]))
+ >>> cost_function(h=estimations,y=np.array([1, 0, 0]))
+ 1.459999655669926
+ >>> estimations = sigmoid_function(np.array([4, -3, -1]))
+ >>> cost_function(h=estimations,y=np.array([1,0,0]))
+ 0.1266663223365915
+ >>> estimations = sigmoid_function(0)
+ >>> cost_function(h=estimations,y=np.array([1]))
+ 0.6931471805599453
+
+ References:
+ - https://en.wikipedia.org/wiki/Logistic_regression
+ """
return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())
@@ -83,4 +156,4 @@ plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black")
plt.legend()
- plt.show()+ plt.show()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/logistic_regression.py |
Auto-generate documentation strings for this file | import numpy as np
def power_iteration(
input_matrix: np.ndarray,
vector: np.ndarray,
error_tol: float = 1e-12,
max_iterations: int = 100,
) -> tuple[float, np.ndarray]:
# Ensure matrix is square.
assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1]
# Ensure proper dimensionality.
assert np.shape(input_matrix)[0] == np.shape(vector)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(input_matrix) == np.iscomplexobj(vector)
is_complex = np.iscomplexobj(input_matrix)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(input_matrix, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
convergence = False
lambda_previous = 0
iterations = 0
error = 1e12
while not convergence:
# Multiple matrix by the vector.
w = np.dot(input_matrix, vector)
# Normalize the resulting output vector.
vector = w / np.linalg.norm(w)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
vector_h = vector.conj().T if is_complex else vector.T
lambda_ = np.dot(vector_h, np.dot(input_matrix, vector))
# Check convergence.
error = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
convergence = True
lambda_previous = lambda_
if is_complex:
lambda_ = np.real(lambda_)
return float(lambda_), vector
def test_power_iteration() -> None:
real_input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
real_vector = np.array([41, 4, 20])
complex_input_matrix = real_input_matrix.astype(np.complex128)
imag_matrix = np.triu(1j * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
complex_vector = np.array([41, 4, 20]).astype(np.complex128)
for problem_type in ["real", "complex"]:
if problem_type == "real":
input_matrix = real_input_matrix
vector = real_vector
elif problem_type == "complex":
input_matrix = complex_input_matrix
vector = complex_vector
# Our implementation.
eigen_value, eigen_vector = power_iteration(input_matrix, vector)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
eigen_values, eigen_vectors = np.linalg.eigh(input_matrix)
# Last eigenvalue is the maximum one.
eigen_value_max = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
eigen_vector_max = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | --- +++ @@ -7,6 +7,35 @@ error_tol: float = 1e-12,
max_iterations: int = 100,
) -> tuple[float, np.ndarray]:
+ """
+ Power Iteration.
+ Find the largest eigenvalue and corresponding eigenvector
+ of matrix input_matrix given a random vector in the same space.
+ Will work so long as vector has component of largest eigenvector.
+ input_matrix must be either real or Hermitian.
+
+ Input
+ input_matrix: input matrix whose largest eigenvalue we will find.
+ Numpy array. np.shape(input_matrix) == (N,N).
+ vector: random initial vector in same space as matrix.
+ Numpy array. np.shape(vector) == (N,) or (N,1)
+
+ Output
+ largest_eigenvalue: largest eigenvalue of the matrix input_matrix.
+ Float. Scalar.
+ largest_eigenvector: eigenvector corresponding to largest_eigenvalue.
+ Numpy array. np.shape(largest_eigenvector) == (N,) or (N,1).
+
+ >>> import numpy as np
+ >>> input_matrix = np.array([
+ ... [41, 4, 20],
+ ... [ 4, 26, 30],
+ ... [20, 30, 50]
+ ... ])
+ >>> vector = np.array([41,4,20])
+ >>> power_iteration(input_matrix,vector)
+ (79.66086378788381, array([0.44472726, 0.46209842, 0.76725662]))
+ """
# Ensure matrix is square.
assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1]
@@ -53,6 +82,9 @@
def test_power_iteration() -> None:
+ """
+ >>> test_power_iteration() # self running tests
+ """
real_input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
real_vector = np.array([41, 4, 20])
complex_input_matrix = real_input_matrix.astype(np.complex128)
@@ -93,4 +125,4 @@ import doctest
doctest.testmod()
- test_power_iteration()+ test_power_iteration()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/src/power_iteration.py |
Fully document this Python code with docstrings |
from typing import Any
import numpy as np
def is_hermitian(matrix: np.ndarray) -> bool:
return np.array_equal(matrix, matrix.conjugate().T)
def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any:
v_star = v.conjugate().T
v_star_dot = v_star.dot(a)
assert isinstance(v_star_dot, np.ndarray)
return (v_star_dot.dot(v)) / (v_star.dot(v))
def tests() -> None:
a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]])
v = np.array([[1], [2], [3]])
assert is_hermitian(a), f"{a} is not hermitian."
print(rayleigh_quotient(a, v))
a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]])
assert is_hermitian(a), f"{a} is not hermitian."
assert rayleigh_quotient(a, v) == float(3)
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | --- +++ @@ -1,3 +1,6 @@+"""
+https://en.wikipedia.org/wiki/Rayleigh_quotient
+"""
from typing import Any
@@ -5,10 +8,43 @@
def is_hermitian(matrix: np.ndarray) -> bool:
+ """
+ Checks if a matrix is Hermitian.
+ >>> import numpy as np
+ >>> A = np.array([
+ ... [2, 2+1j, 4],
+ ... [2-1j, 3, 1j],
+ ... [4, -1j, 1]])
+ >>> is_hermitian(A)
+ True
+ >>> A = np.array([
+ ... [2, 2+1j, 4+1j],
+ ... [2-1j, 3, 1j],
+ ... [4, -1j, 1]])
+ >>> is_hermitian(A)
+ False
+ """
return np.array_equal(matrix, matrix.conjugate().T)
def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any:
+ """
+ Returns the Rayleigh quotient of a Hermitian matrix A and
+ vector v.
+ >>> import numpy as np
+ >>> A = np.array([
+ ... [1, 2, 4],
+ ... [2, 3, -1],
+ ... [4, -1, 1]
+ ... ])
+ >>> v = np.array([
+ ... [1],
+ ... [2],
+ ... [3]
+ ... ])
+ >>> rayleigh_quotient(A, v)
+ array([[3.]])
+ """
v_star = v.conjugate().T
v_star_dot = v_star.dot(a)
assert isinstance(v_star_dot, np.ndarray)
@@ -30,4 +66,4 @@ import doctest
doctest.testmod()
- tests()+ tests()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/src/rayleigh_quotient.py |
Add missing documentation to my Python functions |
from math import cos, sin
def scaling(scaling_factor: float) -> list[list[float]]:
scaling_factor = float(scaling_factor)
return [[scaling_factor * int(x == y) for x in range(2)] for y in range(2)]
def rotation(angle: float) -> list[list[float]]:
c, s = cos(angle), sin(angle)
return [[c, -s], [s, c]]
def projection(angle: float) -> list[list[float]]:
c, s = cos(angle), sin(angle)
cs = c * s
return [[c * c, cs], [cs, s * s]]
def reflection(angle: float) -> list[list[float]]:
c, s = cos(angle), sin(angle)
cs = c * s
return [[2 * c - 1, 2 * cs], [2 * cs, 2 * s - 1]]
print(f" {scaling(5) = }")
print(f" {rotation(45) = }")
print(f"{projection(45) = }")
print(f"{reflection(45) = }") | --- +++ @@ -1,24 +1,58 @@+"""
+2D Transformations are regularly used in Linear Algebra.
+
+I have added the codes for reflection, projection, scaling and rotation 2D matrices.
+
+.. code-block:: python
+
+ scaling(5) = [[5.0, 0.0], [0.0, 5.0]]
+ rotation(45) = [[0.5253219888177297, -0.8509035245341184],
+ [0.8509035245341184, 0.5253219888177297]]
+ projection(45) = [[0.27596319193541496, 0.446998331800279],
+ [0.446998331800279, 0.7240368080645851]]
+ reflection(45) = [[0.05064397763545947, 0.893996663600558],
+ [0.893996663600558, 0.7018070490682369]]
+"""
from math import cos, sin
def scaling(scaling_factor: float) -> list[list[float]]:
+ """
+ >>> scaling(5)
+ [[5.0, 0.0], [0.0, 5.0]]
+ """
scaling_factor = float(scaling_factor)
return [[scaling_factor * int(x == y) for x in range(2)] for y in range(2)]
def rotation(angle: float) -> list[list[float]]:
+ """
+ >>> rotation(45) # doctest: +NORMALIZE_WHITESPACE
+ [[0.5253219888177297, -0.8509035245341184],
+ [0.8509035245341184, 0.5253219888177297]]
+ """
c, s = cos(angle), sin(angle)
return [[c, -s], [s, c]]
def projection(angle: float) -> list[list[float]]:
+ """
+ >>> projection(45) # doctest: +NORMALIZE_WHITESPACE
+ [[0.27596319193541496, 0.446998331800279],
+ [0.446998331800279, 0.7240368080645851]]
+ """
c, s = cos(angle), sin(angle)
cs = c * s
return [[c * c, cs], [cs, s * s]]
def reflection(angle: float) -> list[list[float]]:
+ """
+ >>> reflection(45) # doctest: +NORMALIZE_WHITESPACE
+ [[0.05064397763545947, 0.893996663600558],
+ [0.893996663600558, 0.7018070490682369]]
+ """
c, s = cos(angle), sin(angle)
cs = c * s
return [[2 * c - 1, 2 * cs], [2 * cs, 2 * s - 1]]
@@ -27,4 +61,4 @@ print(f" {scaling(5) = }")
print(f" {rotation(45) = }")
print(f"{projection(45) = }")
-print(f"{reflection(45) = }")+print(f"{reflection(45) = }")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_algebra/src/transformations_2d.py |
Insert docstrings into my code | import numpy as np
""" Here I implemented the scoring functions.
MAE, MSE, RMSE, RMSLE are included.
Those are used for calculating differences between
predicted values and actual values.
Metrics are slightly differentiated. Sometimes squared, rooted,
even log is used.
Using log and roots can be perceived as tools for penalizing big
errors. However, using appropriate metrics depends on the situations,
and types of data
"""
# Mean Absolute Error
def mae(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = abs(predict - actual)
score = difference.mean()
return score
# Mean Squared Error
def mse(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
score = square_diff.mean()
return score
# Root Mean Squared Error
def rmse(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
square_diff = np.square(difference)
mean_square_diff = square_diff.mean()
score = np.sqrt(mean_square_diff)
return score
# Root Mean Square Logarithmic Error
def rmsle(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
log_predict = np.log(predict + 1)
log_actual = np.log(actual + 1)
difference = log_predict - log_actual
square_diff = np.square(difference)
mean_square_diff = square_diff.mean()
score = np.sqrt(mean_square_diff)
return score
# Mean Bias Deviation
def mbd(predict, actual):
predict = np.array(predict)
actual = np.array(actual)
difference = predict - actual
numerator = np.sum(difference) / len(predict)
denumerator = np.sum(actual) / len(predict)
# print(numerator, denumerator)
score = float(numerator) / denumerator * 100
return score
def manual_accuracy(predict, actual):
return np.mean(np.array(actual) == np.array(predict)) | --- +++ @@ -17,6 +17,16 @@
# Mean Absolute Error
def mae(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [1,2,3];predict = [1,4,3]
+ >>> float(np.around(mae(predict,actual),decimals = 2))
+ 0.67
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> float(mae(predict,actual))
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -28,6 +38,16 @@
# Mean Squared Error
def mse(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [1,2,3];predict = [1,4,3]
+ >>> float(np.around(mse(predict,actual),decimals = 2))
+ 1.33
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> float(mse(predict,actual))
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -40,6 +60,16 @@
# Root Mean Squared Error
def rmse(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> actual = [1,2,3];predict = [1,4,3]
+ >>> float(np.around(rmse(predict,actual),decimals = 2))
+ 1.15
+
+ >>> actual = [1,1,1];predict = [1,1,1]
+ >>> float(rmse(predict,actual))
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -52,6 +82,14 @@
# Root Mean Square Logarithmic Error
def rmsle(predict, actual):
+ """
+ Examples(rounded for precision):
+ >>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2))
+ 0.75
+
+ >>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1]))
+ 0.0
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -69,6 +107,22 @@
# Mean Bias Deviation
def mbd(predict, actual):
+ """
+ This value is Negative, if the model underpredicts,
+ positive, if it overpredicts.
+
+ Example(rounded for precision):
+
+ Here the model overpredicts
+ >>> actual = [1,2,3];predict = [2,3,4]
+ >>> float(np.around(mbd(predict,actual),decimals = 2))
+ 50.0
+
+ Here the model underpredicts
+ >>> actual = [1,2,3];predict = [0,1,1]
+ >>> float(np.around(mbd(predict,actual),decimals = 2))
+ -66.67
+ """
predict = np.array(predict)
actual = np.array(actual)
@@ -82,4 +136,4 @@
def manual_accuracy(predict, actual):
- return np.mean(np.array(actual) == np.array(predict))+ return np.mean(np.array(actual) == np.array(predict))
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/scoring_functions.py |
Write beginner-friendly docstrings |
import logging
import numpy as np
import scipy.fftpack as fft
from scipy.signal import get_window
logging.basicConfig(filename=f"{__file__}.log", level=logging.INFO)
def mfcc(
audio: np.ndarray,
sample_rate: int,
ftt_size: int = 1024,
hop_length: int = 20,
mel_filter_num: int = 10,
dct_filter_num: int = 40,
) -> np.ndarray:
logging.info(f"Sample rate: {sample_rate}Hz")
logging.info(f"Audio duration: {len(audio) / sample_rate}s")
logging.info(f"Audio min: {np.min(audio)}")
logging.info(f"Audio max: {np.max(audio)}")
# normalize audio
audio_normalized = normalize(audio)
logging.info(f"Normalized audio min: {np.min(audio_normalized)}")
logging.info(f"Normalized audio max: {np.max(audio_normalized)}")
# frame audio into
audio_framed = audio_frames(
audio_normalized, sample_rate, ftt_size=ftt_size, hop_length=hop_length
)
logging.info(f"Framed audio shape: {audio_framed.shape}")
logging.info(f"First frame: {audio_framed[0]}")
# convert to frequency domain
# For simplicity we will choose the Hanning window.
window = get_window("hann", ftt_size, fftbins=True)
audio_windowed = audio_framed * window
logging.info(f"Windowed audio shape: {audio_windowed.shape}")
logging.info(f"First frame: {audio_windowed[0]}")
audio_fft = calculate_fft(audio_windowed, ftt_size)
logging.info(f"fft audio shape: {audio_fft.shape}")
logging.info(f"First frame: {audio_fft[0]}")
audio_power = calculate_signal_power(audio_fft)
logging.info(f"power audio shape: {audio_power.shape}")
logging.info(f"First frame: {audio_power[0]}")
filters = mel_spaced_filterbank(sample_rate, mel_filter_num, ftt_size)
logging.info(f"filters shape: {filters.shape}")
audio_filtered = np.dot(filters, np.transpose(audio_power))
audio_log = 10.0 * np.log10(audio_filtered)
logging.info(f"audio_log shape: {audio_log.shape}")
dct_filters = discrete_cosine_transform(dct_filter_num, mel_filter_num)
cepstral_coefficents = np.dot(dct_filters, audio_log)
logging.info(f"cepstral_coefficents shape: {cepstral_coefficents.shape}")
return cepstral_coefficents
def normalize(audio: np.ndarray) -> np.ndarray:
# Divide the entire audio signal by the maximum absolute value
return audio / np.max(np.abs(audio))
def audio_frames(
audio: np.ndarray,
sample_rate: int,
hop_length: int = 20,
ftt_size: int = 1024,
) -> np.ndarray:
hop_size = np.round(sample_rate * hop_length / 1000).astype(int)
# Pad the audio signal to handle edge cases
audio = np.pad(audio, int(ftt_size / 2), mode="reflect")
# Calculate the number of frames
frame_count = int((len(audio) - ftt_size) / hop_size) + 1
# Initialize an array to store the frames
frames = np.zeros((frame_count, ftt_size))
# Split the audio signal into frames
for n in range(frame_count):
frames[n] = audio[n * hop_size : n * hop_size + ftt_size]
return frames
def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarray:
# Transpose the audio data to have time in rows and channels in columns
audio_transposed = np.transpose(audio_windowed)
# Initialize an array to store the FFT results
audio_fft = np.empty(
(int(1 + ftt_size // 2), audio_transposed.shape[1]),
dtype=np.complex64,
order="F",
)
# Compute FFT for each channel
for n in range(audio_fft.shape[1]):
audio_fft[:, n] = fft.fft(audio_transposed[:, n], axis=0)[: audio_fft.shape[0]]
# Transpose the FFT results back to the original shape
return np.transpose(audio_fft)
def calculate_signal_power(audio_fft: np.ndarray) -> np.ndarray:
# Calculate the power by squaring the absolute values of the FFT coefficients
return np.square(np.abs(audio_fft))
def freq_to_mel(freq: float) -> float:
# Use the formula to convert frequency to the mel scale
return 2595.0 * np.log10(1.0 + freq / 700.0)
def mel_to_freq(mels: float) -> float:
# Use the formula to convert mel scale to frequency
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
def mel_spaced_filterbank(
sample_rate: int, mel_filter_num: int = 10, ftt_size: int = 1024
) -> np.ndarray:
freq_min = 0
freq_high = sample_rate // 2
logging.info(f"Minimum frequency: {freq_min}")
logging.info(f"Maximum frequency: {freq_high}")
# Calculate filter points and mel frequencies
filter_points, mel_freqs = get_filter_points(
sample_rate,
freq_min,
freq_high,
mel_filter_num,
ftt_size,
)
filters = get_filters(filter_points, ftt_size)
# normalize filters
# taken from the librosa library
enorm = 2.0 / (mel_freqs[2 : mel_filter_num + 2] - mel_freqs[:mel_filter_num])
return filters * enorm[:, np.newaxis]
def get_filters(filter_points: np.ndarray, ftt_size: int) -> np.ndarray:
num_filters = len(filter_points) - 2
filters = np.zeros((num_filters, int(ftt_size / 2) + 1))
for n in range(num_filters):
start = filter_points[n]
mid = filter_points[n + 1]
end = filter_points[n + 2]
# Linearly increase values from 0 to 1
filters[n, start:mid] = np.linspace(0, 1, mid - start)
# Linearly decrease values from 1 to 0
filters[n, mid:end] = np.linspace(1, 0, end - mid)
return filters
def get_filter_points(
sample_rate: int,
freq_min: int,
freq_high: int,
mel_filter_num: int = 10,
ftt_size: int = 1024,
) -> tuple[np.ndarray, np.ndarray]:
# Convert minimum and maximum frequencies to mel scale
fmin_mel = freq_to_mel(freq_min)
fmax_mel = freq_to_mel(freq_high)
logging.info(f"MEL min: {fmin_mel}")
logging.info(f"MEL max: {fmax_mel}")
# Generate equally spaced mel frequencies
mels = np.linspace(fmin_mel, fmax_mel, num=mel_filter_num + 2)
# Convert mel frequencies back to Hertz
freqs = mel_to_freq(mels)
# Calculate filter points as integer values
filter_points = np.floor((ftt_size + 1) / sample_rate * freqs).astype(int)
return filter_points, freqs
def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarray:
basis = np.empty((dct_filter_num, filter_num))
basis[0, :] = 1.0 / np.sqrt(filter_num)
samples = np.arange(1, 2 * filter_num, 2) * np.pi / (2.0 * filter_num)
for i in range(1, dct_filter_num):
basis[i, :] = np.cos(i * samples) * np.sqrt(2.0 / filter_num)
return basis
def example(wav_file_path: str = "./path-to-file/sample.wav") -> np.ndarray:
from scipy.io import wavfile
# Load the audio from the WAV file
sample_rate, audio = wavfile.read(wav_file_path)
# Calculate MFCCs
return mfcc(audio, sample_rate)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,61 @@+"""
+Mel Frequency Cepstral Coefficients (MFCC) Calculation
+
+MFCC is an algorithm widely used in audio and speech processing to represent the
+short-term power spectrum of a sound signal in a more compact and
+discriminative way. It is particularly popular in speech and audio processing
+tasks such as speech recognition and speaker identification.
+
+How Mel Frequency Cepstral Coefficients are Calculated:
+1. Preprocessing:
+ - Load an audio signal and normalize it to ensure that the values fall
+ within a specific range (e.g., between -1 and 1).
+ - Frame the audio signal into overlapping, fixed-length segments, typically
+ using a technique like windowing to reduce spectral leakage.
+
+2. Fourier Transform:
+ - Apply a Fast Fourier Transform (FFT) to each audio frame to convert it
+ from the time domain to the frequency domain. This results in a
+ representation of the audio frame as a sequence of frequency components.
+
+3. Power Spectrum:
+ - Calculate the power spectrum by taking the squared magnitude of each
+ frequency component obtained from the FFT. This step measures the energy
+ distribution across different frequency bands.
+
+4. Mel Filterbank:
+ - Apply a set of triangular filterbanks spaced in the Mel frequency scale
+ to the power spectrum. These filters mimic the human auditory system's
+ frequency response. Each filterbank sums the power spectrum values within
+ its band.
+
+5. Logarithmic Compression:
+ - Take the logarithm (typically base 10) of the filterbank values to
+ compress the dynamic range. This step mimics the logarithmic response of
+ the human ear to sound intensity.
+
+6. Discrete Cosine Transform (DCT):
+ - Apply the Discrete Cosine Transform to the log filterbank energies to
+ obtain the MFCC coefficients. This transformation helps decorrelate the
+ filterbank energies and captures the most important features of the audio
+ signal.
+
+7. Feature Extraction:
+ - Select a subset of the DCT coefficients to form the feature vector.
+ Often, the first few coefficients (e.g., 12-13) are used for most
+ applications.
+
+References:
+- Mel-Frequency Cepstral Coefficients (MFCCs):
+ https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
+- Speech and Language Processing by Daniel Jurafsky & James H. Martin:
+ https://web.stanford.edu/~jurafsky/slp3/
+- Mel Frequency Cepstral Coefficient (MFCC) tutorial
+ http://practicalcryptography.com/miscellaneous/machine-learning
+ /guide-mel-frequency-cepstral-coefficients-mfccs/
+
+Author: Amir Lavasani
+"""
import logging
@@ -16,6 +74,32 @@ mel_filter_num: int = 10,
dct_filter_num: int = 40,
) -> np.ndarray:
+ """
+ Calculate Mel Frequency Cepstral Coefficients (MFCCs) from an audio signal.
+
+ Args:
+ audio: The input audio signal.
+ sample_rate: The sample rate of the audio signal (in Hz).
+ ftt_size: The size of the FFT window (default is 1024).
+ hop_length: The hop length for frame creation (default is 20ms).
+ mel_filter_num: The number of Mel filters (default is 10).
+ dct_filter_num: The number of DCT filters (default is 40).
+
+ Returns:
+ A matrix of MFCCs for the input audio.
+
+ Raises:
+ ValueError: If the input audio is empty.
+
+ Example:
+ >>> sample_rate = 44100 # Sample rate of 44.1 kHz
+ >>> duration = 2.0 # Duration of 1 second
+ >>> t = np.linspace(0, duration, int(sample_rate * duration), endpoint=False)
+ >>> audio = 0.5 * np.sin(2 * np.pi * 440.0 * t) # Generate a 440 Hz sine wave
+ >>> mfccs = mfcc(audio, sample_rate)
+ >>> mfccs.shape
+ (40, 101)
+ """
logging.info(f"Sample rate: {sample_rate}Hz")
logging.info(f"Audio duration: {len(audio) / sample_rate}s")
logging.info(f"Audio min: {np.min(audio)}")
@@ -66,6 +150,23 @@
def normalize(audio: np.ndarray) -> np.ndarray:
+ """
+ Normalize an audio signal by scaling it to have values between -1 and 1.
+
+ Args:
+ audio: The input audio signal.
+
+ Returns:
+ The normalized audio signal.
+
+ Examples:
+ >>> audio = np.array([1, 2, 3, 4, 5])
+ >>> normalized_audio = normalize(audio)
+ >>> float(np.max(normalized_audio))
+ 1.0
+ >>> float(np.min(normalized_audio))
+ 0.2
+ """
# Divide the entire audio signal by the maximum absolute value
return audio / np.max(np.abs(audio))
@@ -76,6 +177,25 @@ hop_length: int = 20,
ftt_size: int = 1024,
) -> np.ndarray:
+ """
+ Split an audio signal into overlapping frames.
+
+ Args:
+ audio: The input audio signal.
+ sample_rate: The sample rate of the audio signal.
+ hop_length: The length of the hopping (default is 20ms).
+ ftt_size: The size of the FFT window (default is 1024).
+
+ Returns:
+ An array of overlapping frames.
+
+ Examples:
+ >>> audio = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]*1000)
+ >>> sample_rate = 8000
+ >>> frames = audio_frames(audio, sample_rate, hop_length=10, ftt_size=512)
+ >>> frames.shape
+ (126, 512)
+ """
hop_size = np.round(sample_rate * hop_length / 1000).astype(int)
@@ -96,6 +216,23 @@
def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarray:
+ """
+ Calculate the Fast Fourier Transform (FFT) of windowed audio data.
+
+ Args:
+ audio_windowed: The windowed audio signal.
+ ftt_size: The size of the FFT (default is 1024).
+
+ Returns:
+ The FFT of the audio data.
+
+ Examples:
+ >>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
+ >>> audio_fft = calculate_fft(audio_windowed, ftt_size=4)
+ >>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j,
+ ... -1.5-0.8660254j])))
+ True
+ """
# Transpose the audio data to have time in rows and channels in columns
audio_transposed = np.transpose(audio_windowed)
@@ -115,16 +252,57 @@
def calculate_signal_power(audio_fft: np.ndarray) -> np.ndarray:
+ """
+ Calculate the power of the audio signal from its FFT.
+
+ Args:
+ audio_fft: The FFT of the audio signal.
+
+ Returns:
+ The power of the audio signal.
+
+ Examples:
+ >>> audio_fft = np.array([1+2j, 2+3j, 3+4j, 4+5j])
+ >>> power = calculate_signal_power(audio_fft)
+ >>> np.allclose(power, np.array([5, 13, 25, 41]))
+ True
+ """
# Calculate the power by squaring the absolute values of the FFT coefficients
return np.square(np.abs(audio_fft))
def freq_to_mel(freq: float) -> float:
+ """
+ Convert a frequency in Hertz to the mel scale.
+
+ Args:
+ freq: The frequency in Hertz.
+
+ Returns:
+ The frequency in mel scale.
+
+ Examples:
+ >>> float(round(freq_to_mel(1000), 2))
+ 999.99
+ """
# Use the formula to convert frequency to the mel scale
return 2595.0 * np.log10(1.0 + freq / 700.0)
def mel_to_freq(mels: float) -> float:
+ """
+ Convert a frequency in the mel scale to Hertz.
+
+ Args:
+ mels: The frequency in mel scale.
+
+ Returns:
+ The frequency in Hertz.
+
+ Examples:
+ >>> round(mel_to_freq(999.99), 2)
+ 1000.01
+ """
# Use the formula to convert mel scale to frequency
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
@@ -132,6 +310,21 @@ def mel_spaced_filterbank(
sample_rate: int, mel_filter_num: int = 10, ftt_size: int = 1024
) -> np.ndarray:
+ """
+ Create a Mel-spaced filter bank for audio processing.
+
+ Args:
+ sample_rate: The sample rate of the audio.
+ mel_filter_num: The number of mel filters (default is 10).
+ ftt_size: The size of the FFT (default is 1024).
+
+ Returns:
+ Mel-spaced filter bank.
+
+ Examples:
+ >>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10))
+ 0.0004603981
+ """
freq_min = 0
freq_high = sample_rate // 2
@@ -156,6 +349,20 @@
def get_filters(filter_points: np.ndarray, ftt_size: int) -> np.ndarray:
+ """
+ Generate filters for audio processing.
+
+ Args:
+ filter_points: A list of filter points.
+ ftt_size: The size of the FFT.
+
+ Returns:
+ A matrix of filters.
+
+ Examples:
+ >>> get_filters(np.array([0, 20, 51, 95, 161, 256], dtype=int), 512).shape
+ (4, 257)
+ """
num_filters = len(filter_points) - 2
filters = np.zeros((num_filters, int(ftt_size / 2) + 1))
@@ -180,6 +387,27 @@ mel_filter_num: int = 10,
ftt_size: int = 1024,
) -> tuple[np.ndarray, np.ndarray]:
+ """
+ Calculate the filter points and frequencies for mel frequency filters.
+
+ Args:
+ sample_rate: The sample rate of the audio.
+ freq_min: The minimum frequency in Hertz.
+ freq_high: The maximum frequency in Hertz.
+ mel_filter_num: The number of mel filters (default is 10).
+ ftt_size: The size of the FFT (default is 1024).
+
+ Returns:
+ Filter points and corresponding frequencies.
+
+ Examples:
+ >>> filter_points = get_filter_points(8000, 0, 4000, mel_filter_num=4, ftt_size=512)
+ >>> filter_points[0]
+ array([ 0, 20, 51, 95, 161, 256])
+ >>> filter_points[1]
+ array([ 0. , 324.46707094, 799.33254207, 1494.30973963,
+ 2511.42581671, 4000. ])
+ """
# Convert minimum and maximum frequencies to mel scale
fmin_mel = freq_to_mel(freq_min)
fmax_mel = freq_to_mel(freq_high)
@@ -200,6 +428,20 @@
def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarray:
+ """
+ Compute the Discrete Cosine Transform (DCT) basis matrix.
+
+ Args:
+ dct_filter_num: The number of DCT filters to generate.
+ filter_num: The number of the fbank filters.
+
+ Returns:
+ The DCT basis matrix.
+
+ Examples:
+ >>> float(round(discrete_cosine_transform(3, 5)[0][0], 5))
+ 0.44721
+ """
basis = np.empty((dct_filter_num, filter_num))
basis[0, :] = 1.0 / np.sqrt(filter_num)
@@ -212,6 +454,16 @@
def example(wav_file_path: str = "./path-to-file/sample.wav") -> np.ndarray:
+ """
+ Example function to calculate Mel Frequency Cepstral Coefficients
+ (MFCCs) from an audio file.
+
+ Args:
+ wav_file_path: The path to the WAV audio file.
+
+ Returns:
+ np.ndarray: The computed MFCCs for the audio.
+ """
from scipy.io import wavfile
# Load the audio from the WAV file
@@ -224,4 +476,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/mfcc.py |
Add docstrings for better understanding | # https://en.wikipedia.org/wiki/Set_cover_problem
from dataclasses import dataclass
from operator import attrgetter
@dataclass
class Item:
weight: int
value: int
@property
def ratio(self) -> float:
return self.value / self.weight
def fractional_cover(items: list[Item], capacity: int) -> float:
if capacity < 0:
raise ValueError("Capacity cannot be negative")
total_value = 0.0
remaining_capacity = capacity
# Sort the items by their value-to-weight ratio in descending order
for item in sorted(items, key=attrgetter("ratio"), reverse=True):
if remaining_capacity == 0:
break
weight_taken = min(item.weight, remaining_capacity)
total_value += weight_taken * item.ratio
remaining_capacity -= weight_taken
return total_value
if __name__ == "__main__":
import doctest
if result := doctest.testmod().failed:
print(f"{result} test(s) failed")
else:
print("All tests passed") | --- +++ @@ -11,10 +11,70 @@
@property
def ratio(self) -> float:
+ """
+ Return the value-to-weight ratio for the item.
+
+ Returns:
+ float: The value-to-weight ratio for the item.
+
+ Examples:
+ >>> Item(10, 65).ratio
+ 6.5
+
+ >>> Item(20, 100).ratio
+ 5.0
+
+ >>> Item(30, 120).ratio
+ 4.0
+ """
return self.value / self.weight
def fractional_cover(items: list[Item], capacity: int) -> float:
+ """
+ Solve the Fractional Cover Problem.
+
+ Args:
+ items: A list of items, where each item has weight and value attributes.
+ capacity: The maximum weight capacity of the knapsack.
+
+ Returns:
+ The maximum value that can be obtained by selecting fractions of items to cover
+ the knapsack's capacity.
+
+ Raises:
+ ValueError: If capacity is negative.
+
+ Examples:
+ >>> fractional_cover((Item(10, 60), Item(20, 100), Item(30, 120)), capacity=50)
+ 240.0
+
+ >>> fractional_cover([Item(20, 100), Item(30, 120), Item(10, 60)], capacity=25)
+ 135.0
+
+ >>> fractional_cover([Item(10, 60), Item(20, 100), Item(30, 120)], capacity=60)
+ 280.0
+
+ >>> fractional_cover(items=[Item(5, 30), Item(10, 60), Item(15, 90)], capacity=30)
+ 180.0
+
+ >>> fractional_cover(items=[], capacity=50)
+ 0.0
+
+ >>> fractional_cover(items=[Item(10, 60)], capacity=5)
+ 30.0
+
+ >>> fractional_cover(items=[Item(10, 60)], capacity=1)
+ 6.0
+
+ >>> fractional_cover(items=[Item(10, 60)], capacity=0)
+ 0.0
+
+ >>> fractional_cover(items=[Item(10, 60)], capacity=-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: Capacity cannot be negative
+ """
if capacity < 0:
raise ValueError("Capacity cannot be negative")
@@ -39,4 +99,4 @@ if result := doctest.testmod().failed:
print(f"{result} test(s) failed")
else:
- print("All tests passed")+ print("All tests passed")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/greedy_methods/fractional_cover_problem.py |
Add docstrings to improve collaboration |
import doctest
import numpy as np
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def collect_dataset() -> tuple[np.ndarray, np.ndarray]:
data = load_iris()
return np.array(data.data), np.array(data.target)
def apply_pca(data_x: np.ndarray, n_components: int) -> tuple[np.ndarray, np.ndarray]:
# Standardizing the dataset
scaler = StandardScaler()
data_x_scaled = scaler.fit_transform(data_x)
# Applying PCA
pca = PCA(n_components=n_components)
principal_components = pca.fit_transform(data_x_scaled)
return principal_components, pca.explained_variance_ratio_
def main() -> None:
data_x, _data_y = collect_dataset()
# Number of principal components to retain
n_components = 2
# Apply PCA
transformed_data, variance_ratio = apply_pca(data_x, n_components)
print("Transformed Dataset (First 5 rows):")
print(transformed_data[:5])
print("\nExplained Variance Ratio:")
print(variance_ratio)
if __name__ == "__main__":
doctest.testmod()
main() | --- +++ @@ -1,3 +1,13 @@+"""
+Principal Component Analysis (PCA) is a dimensionality reduction technique
+used in machine learning. It transforms high-dimensional data into a lower-dimensional
+representation while retaining as much variance as possible.
+
+This implementation follows best practices, including:
+- Standardizing the dataset.
+- Computing principal components using Singular Value Decomposition (SVD).
+- Returning transformed data and explained variance ratio.
+"""
import doctest
@@ -8,11 +18,38 @@
def collect_dataset() -> tuple[np.ndarray, np.ndarray]:
+ """
+ Collects the dataset (Iris dataset) and returns feature matrix and target values.
+
+ :return: Tuple containing feature matrix (X) and target labels (y)
+
+ Example:
+ >>> X, y = collect_dataset()
+ >>> X.shape
+ (150, 4)
+ >>> y.shape
+ (150,)
+ """
data = load_iris()
return np.array(data.data), np.array(data.target)
def apply_pca(data_x: np.ndarray, n_components: int) -> tuple[np.ndarray, np.ndarray]:
+ """
+ Applies Principal Component Analysis (PCA) to reduce dimensionality.
+
+ :param data_x: Original dataset (features)
+ :param n_components: Number of principal components to retain
+ :return: Tuple containing transformed dataset and explained variance ratio
+
+ Example:
+ >>> X, _ = collect_dataset()
+ >>> transformed_X, variance = apply_pca(X, 2)
+ >>> transformed_X.shape
+ (150, 2)
+ >>> len(variance) == 2
+ True
+ """
# Standardizing the dataset
scaler = StandardScaler()
data_x_scaled = scaler.fit_transform(data_x)
@@ -25,6 +62,9 @@
def main() -> None:
+ """
+ Driver function to execute PCA and display results.
+ """
data_x, _data_y = collect_dataset()
# Number of principal components to retain
@@ -42,4 +82,4 @@
if __name__ == "__main__":
doctest.testmod()
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/principle_component_analysis.py |
Can you add docstrings to this Python file? |
import numpy as np
class DecisionTree:
def __init__(self, depth=5, min_leaf_size=5):
self.depth = depth
self.decision_boundary = 0
self.left = None
self.right = None
self.min_leaf_size = min_leaf_size
self.prediction = None
def mean_squared_error(self, labels, prediction):
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
return np.mean((labels - prediction) ** 2)
def train(self, x, y):
if x.ndim != 1:
raise ValueError("Input data set must be one-dimensional")
if len(x) != len(y):
raise ValueError("x and y have different lengths")
if y.ndim != 1:
raise ValueError("Data set labels must be one-dimensional")
if len(x) < 2 * self.min_leaf_size:
self.prediction = np.mean(y)
return
if self.depth == 1:
self.prediction = np.mean(y)
return
best_split = 0
min_error = self.mean_squared_error(x, np.mean(y)) * 2
"""
loop over all possible splits for the decision tree. find the best split.
if no split exists that is less than 2 * error for the entire array
then the data set is not split and the average for the entire array is used as
the predictor
"""
for i in range(len(x)):
if len(x[:i]) < self.min_leaf_size: # noqa: SIM114
continue
elif len(x[i:]) < self.min_leaf_size:
continue
else:
error_left = self.mean_squared_error(x[:i], np.mean(y[:i]))
error_right = self.mean_squared_error(x[i:], np.mean(y[i:]))
error = error_left + error_right
if error < min_error:
best_split = i
min_error = error
if best_split != 0:
left_x = x[:best_split]
left_y = y[:best_split]
right_x = x[best_split:]
right_y = y[best_split:]
self.decision_boundary = x[best_split]
self.left = DecisionTree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
self.right = DecisionTree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
self.left.train(left_x, left_y)
self.right.train(right_x, right_y)
else:
self.prediction = np.mean(y)
return
def predict(self, x):
if self.prediction is not None:
return self.prediction
elif self.left is not None and self.right is not None:
if x >= self.decision_boundary:
return self.right.predict(x)
else:
return self.left.predict(x)
else:
raise ValueError("Decision tree not yet trained")
class TestDecisionTree:
@staticmethod
def helper_mean_squared_error_test(labels, prediction):
squared_error_sum = float(0)
for label in labels:
squared_error_sum += (label - prediction) ** 2
return float(squared_error_sum / labels.size)
def main():
x = np.arange(-1.0, 1.0, 0.005)
y = np.sin(x)
tree = DecisionTree(depth=10, min_leaf_size=10)
tree.train(x, y)
rng = np.random.default_rng()
test_cases = (rng.random(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
avg_error = np.mean((predictions - test_cases) ** 2)
print("Test values: " + str(test_cases))
print("Predictions: " + str(predictions))
print("Average error: " + str(avg_error))
if __name__ == "__main__":
main()
import doctest
doctest.testmod(name="mean_squared_error", verbose=True) | --- +++ @@ -1,3 +1,8 @@+"""
+Implementation of a basic regression decision tree.
+Input data set: The input data set must be 1-dimensional with continuous labels.
+Output: The decision tree maps a real number input to a real number output.
+"""
import numpy as np
@@ -12,12 +17,69 @@ self.prediction = None
def mean_squared_error(self, labels, prediction):
+ """
+ mean_squared_error:
+ @param labels: a one-dimensional numpy array
+ @param prediction: a floating point value
+ return value: mean_squared_error calculates the error if prediction is used to
+ estimate the labels
+ >>> tester = DecisionTree()
+ >>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
+ >>> test_prediction = float(6)
+ >>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
+ ... TestDecisionTree.helper_mean_squared_error_test(test_labels,
+ ... test_prediction)))
+ True
+ >>> test_labels = np.array([1,2,3])
+ >>> test_prediction = float(2)
+ >>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
+ ... TestDecisionTree.helper_mean_squared_error_test(test_labels,
+ ... test_prediction)))
+ True
+ """
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
return np.mean((labels - prediction) ** 2)
def train(self, x, y):
+ """
+ train:
+ @param x: a one-dimensional numpy array
+ @param y: a one-dimensional numpy array.
+ The contents of y are the labels for the corresponding X values
+
+ train() does not have a return value
+
+ Examples:
+ 1. Try to train when x & y are of same length & 1 dimensions (No errors)
+ >>> dt = DecisionTree()
+ >>> dt.train(np.array([10,20,30,40,50]),np.array([0,0,0,1,1]))
+
+ 2. Try to train when x is 2 dimensions
+ >>> dt = DecisionTree()
+ >>> dt.train(np.array([[1,2,3,4,5],[1,2,3,4,5]]),np.array([0,0,0,1,1]))
+ Traceback (most recent call last):
+ ...
+ ValueError: Input data set must be one-dimensional
+
+ 3. Try to train when x and y are not of the same length
+ >>> dt = DecisionTree()
+ >>> dt.train(np.array([1,2,3,4,5]),np.array([[0,0,0,1,1],[0,0,0,1,1]]))
+ Traceback (most recent call last):
+ ...
+ ValueError: x and y have different lengths
+
+ 4. Try to train when x & y are of the same length but different dimensions
+ >>> dt = DecisionTree()
+ >>> dt.train(np.array([1,2,3,4,5]),np.array([[1],[2],[3],[4],[5]]))
+ Traceback (most recent call last):
+ ...
+ ValueError: Data set labels must be one-dimensional
+
+ This section is to check that the inputs conform to our dimensionality
+ constraints
+ """
if x.ndim != 1:
raise ValueError("Input data set must be one-dimensional")
if len(x) != len(y):
@@ -76,6 +138,12 @@ return
def predict(self, x):
+ """
+ predict:
+ @param x: a floating point value to predict the label of
+ the prediction function works by recursively calling the predict function
+ of the appropriate subtrees based on the tree's decision boundary
+ """
if self.prediction is not None:
return self.prediction
elif self.left is not None and self.right is not None:
@@ -88,9 +156,16 @@
class TestDecisionTree:
+ """Decision Tres test class"""
@staticmethod
def helper_mean_squared_error_test(labels, prediction):
+ """
+ helper_mean_squared_error_test:
+ @param labels: a one dimensional numpy array
+ @param prediction: a floating point value
+ return value: helper_mean_squared_error_test calculates the mean squared error
+ """
squared_error_sum = float(0)
for label in labels:
squared_error_sum += (label - prediction) ** 2
@@ -99,6 +174,12 @@
def main():
+ """
+ In this demonstration we're generating a sample data set from the sin function in
+ numpy. We then train a decision tree on the data set and use the decision tree to
+ predict the label of 10 different test values. Then the mean squared error over
+ this test is displayed.
+ """
x = np.arange(-1.0, 1.0, 0.005)
y = np.sin(x)
@@ -119,4 +200,4 @@ main()
import doctest
- doctest.testmod(name="mean_squared_error", verbose=True)+ doctest.testmod(name="mean_squared_error", verbose=True)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/decision_tree.py |
Generate NumPy-style docstrings |
from __future__ import annotations
from collections import defaultdict
from enum import Enum
from types import TracebackType
from typing import Any
import numpy as np
from typing_extensions import Self # noqa: UP035
class OpType(Enum):
ADD = 0
SUB = 1
MUL = 2
DIV = 3
MATMUL = 4
POWER = 5
NOOP = 6
class Variable:
def __init__(self, value: Any) -> None:
self.value = np.array(value)
# pointers to the operations to which the Variable is input
self.param_to: list[Operation] = []
# pointer to the operation of which the Variable is output of
self.result_of: Operation = Operation(OpType.NOOP)
def __repr__(self) -> str:
return f"Variable({self.value})"
def to_ndarray(self) -> np.ndarray:
return self.value
def __add__(self, other: Variable) -> Variable:
result = Variable(self.value + other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.ADD, params=[self, other], output=result)
return result
def __sub__(self, other: Variable) -> Variable:
result = Variable(self.value - other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.SUB, params=[self, other], output=result)
return result
def __mul__(self, other: Variable) -> Variable:
result = Variable(self.value * other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.MUL, params=[self, other], output=result)
return result
def __truediv__(self, other: Variable) -> Variable:
result = Variable(self.value / other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.DIV, params=[self, other], output=result)
return result
def __matmul__(self, other: Variable) -> Variable:
result = Variable(self.value @ other.value)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(OpType.MATMUL, params=[self, other], output=result)
return result
def __pow__(self, power: int) -> Variable:
result = Variable(self.value**power)
with GradientTracker() as tracker:
# if tracker is enabled, computation graph will be updated
if tracker.enabled:
tracker.append(
OpType.POWER,
params=[self],
output=result,
other_params={"power": power},
)
return result
def add_param_to(self, param_to: Operation) -> None:
self.param_to.append(param_to)
def add_result_of(self, result_of: Operation) -> None:
self.result_of = result_of
class Operation:
def __init__(
self,
op_type: OpType,
other_params: dict | None = None,
) -> None:
self.op_type = op_type
self.other_params = {} if other_params is None else other_params
def add_params(self, params: list[Variable]) -> None:
self.params = params
def add_output(self, output: Variable) -> None:
self.output = output
def __eq__(self, value) -> bool:
return self.op_type == value if isinstance(value, OpType) else False
class GradientTracker:
instance = None
def __new__(cls) -> Self:
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
def __init__(self) -> None:
self.enabled = False
def __enter__(self) -> Self:
self.enabled = True
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.enabled = False
def append(
self,
op_type: OpType,
params: list[Variable],
output: Variable,
other_params: dict | None = None,
) -> None:
operation = Operation(op_type, other_params=other_params)
param_nodes = []
for param in params:
param.add_param_to(operation)
param_nodes.append(param)
output.add_result_of(operation)
operation.add_params(param_nodes)
operation.add_output(output)
def gradient(self, target: Variable, source: Variable) -> np.ndarray | None:
# partial derivatives with respect to target
partial_deriv = defaultdict(lambda: 0)
partial_deriv[target] = np.ones_like(target.to_ndarray())
# iterating through each operations in the computation graph
operation_queue = [target.result_of]
while len(operation_queue) > 0:
operation = operation_queue.pop()
for param in operation.params:
# as per the chain rule, multiplying partial derivatives
# of variables with respect to the target
dparam_doutput = self.derivative(param, operation)
dparam_dtarget = dparam_doutput * partial_deriv[operation.output]
partial_deriv[param] += dparam_dtarget
if param.result_of and param.result_of != OpType.NOOP:
operation_queue.append(param.result_of)
return partial_deriv.get(source)
def derivative(self, param: Variable, operation: Operation) -> np.ndarray:
params = operation.params
if operation == OpType.ADD:
return np.ones_like(params[0].to_ndarray(), dtype=np.float64)
if operation == OpType.SUB:
if params[0] == param:
return np.ones_like(params[0].to_ndarray(), dtype=np.float64)
return -np.ones_like(params[1].to_ndarray(), dtype=np.float64)
if operation == OpType.MUL:
return (
params[1].to_ndarray().T
if params[0] == param
else params[0].to_ndarray().T
)
if operation == OpType.DIV:
if params[0] == param:
return 1 / params[1].to_ndarray()
return -params[0].to_ndarray() / (params[1].to_ndarray() ** 2)
if operation == OpType.MATMUL:
return (
params[1].to_ndarray().T
if params[0] == param
else params[0].to_ndarray().T
)
if operation == OpType.POWER:
power = operation.other_params["power"]
return power * (params[0].to_ndarray() ** (power - 1))
err_msg = f"invalid operation type: {operation.op_type}"
raise ValueError(err_msg)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,11 @@+"""
+Demonstration of the Automatic Differentiation (Reverse mode).
+
+Reference: https://en.wikipedia.org/wiki/Automatic_differentiation
+
+Author: Poojan Smart
+Email: smrtpoojan@gmail.com
+"""
from __future__ import annotations
@@ -11,6 +19,9 @@
class OpType(Enum):
+ """
+ Class represents list of supported operations on Variable for gradient calculation.
+ """
ADD = 0
SUB = 1
@@ -22,6 +33,20 @@
class Variable:
+ """
+ Class represents n-dimensional object which is used to wrap numpy array on which
+ operations will be performed and the gradient will be calculated.
+
+ Examples:
+ >>> Variable(5.0)
+ Variable(5.0)
+ >>> Variable([5.0, 2.9])
+ Variable([5. 2.9])
+ >>> Variable([5.0, 2.9]) + Variable([1.0, 5.5])
+ Variable([6. 8.4])
+ >>> Variable([[8.0, 10.0]])
+ Variable([[ 8. 10.]])
+ """
def __init__(self, value: Any) -> None:
self.value = np.array(value)
@@ -104,6 +129,11 @@
class Operation:
+ """
+ Class represents operation between single or two Variable objects.
+ Operation objects contains type of operation, pointers to input Variable
+ objects and pointer to resulting Variable from the operation.
+ """
def __init__(
self,
@@ -124,10 +154,51 @@
class GradientTracker:
+ """
+ Class contains methods to compute partial derivatives of Variable
+ based on the computation graph.
+
+ Examples:
+
+ >>> with GradientTracker() as tracker:
+ ... a = Variable([2.0, 5.0])
+ ... b = Variable([1.0, 2.0])
+ ... m = Variable([1.0, 2.0])
+ ... c = a + b
+ ... d = a * b
+ ... e = c / d
+ >>> tracker.gradient(e, a)
+ array([-0.25, -0.04])
+ >>> tracker.gradient(e, b)
+ array([-1. , -0.25])
+ >>> tracker.gradient(e, m) is None
+ True
+
+ >>> with GradientTracker() as tracker:
+ ... a = Variable([[2.0, 5.0]])
+ ... b = Variable([[1.0], [2.0]])
+ ... c = a @ b
+ >>> tracker.gradient(c, a)
+ array([[1., 2.]])
+ >>> tracker.gradient(c, b)
+ array([[2.],
+ [5.]])
+
+ >>> with GradientTracker() as tracker:
+ ... a = Variable([[2.0, 5.0]])
+ ... b = a ** 3
+ >>> tracker.gradient(b, a)
+ array([[12., 75.]])
+ """
instance = None
def __new__(cls) -> Self:
+ """
+ Executes at the creation of class object and returns if
+ object is already created. This class follows singleton
+ design pattern.
+ """
if cls.instance is None:
cls.instance = super().__new__(cls)
return cls.instance
@@ -154,6 +225,15 @@ output: Variable,
other_params: dict | None = None,
) -> None:
+ """
+ Adds Operation object to the related Variable objects for
+ creating computational graph for calculating gradients.
+
+ Args:
+ op_type: Operation type
+ params: Input parameters to the operation
+ output: Output variable of the operation
+ """
operation = Operation(op_type, other_params=other_params)
param_nodes = []
for param in params:
@@ -165,6 +245,18 @@ operation.add_output(output)
def gradient(self, target: Variable, source: Variable) -> np.ndarray | None:
+ """
+ Reverse accumulation of partial derivatives to calculate gradients
+ of target variable with respect to source variable.
+
+ Args:
+ target: target variable for which gradients are calculated.
+ source: source variable with respect to which the gradients are
+ calculated.
+
+ Returns:
+ Gradient of the source variable with respect to the target variable
+ """
# partial derivatives with respect to target
partial_deriv = defaultdict(lambda: 0)
@@ -187,6 +279,17 @@ return partial_deriv.get(source)
def derivative(self, param: Variable, operation: Operation) -> np.ndarray:
+ """
+ Compute the derivative of given operation/function
+
+ Args:
+ param: variable to be differentiated
+ operation: function performed on the input variable
+
+ Returns:
+ Derivative of input variable with respect to the output of
+ the operation
+ """
params = operation.params
if operation == OpType.ADD:
@@ -222,4 +325,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/automatic_differentiation.py |
Add docstrings to make code maintainable |
from typing import Any
import numpy as np
class Tableau:
# Max iteration number to prevent cycling
maxiter = 100
def __init__(
self, tableau: np.ndarray, n_vars: int, n_artificial_vars: int
) -> None:
if tableau.dtype != "float64":
raise TypeError("Tableau must have type float64")
# Check if RHS is negative
if not (tableau[:, -1] >= 0).all():
raise ValueError("RHS must be > 0")
if n_vars < 2 or n_artificial_vars < 0:
raise ValueError(
"number of (artificial) variables must be a natural number"
)
self.tableau = tableau
self.n_rows, n_cols = tableau.shape
# Number of decision variables x1, x2, x3...
self.n_vars, self.n_artificial_vars = n_vars, n_artificial_vars
# 2 if there are >= or == constraints (nonstandard), 1 otherwise (std)
self.n_stages = (self.n_artificial_vars > 0) + 1
# Number of slack variables added to make inequalities into equalities
self.n_slack = n_cols - self.n_vars - self.n_artificial_vars - 1
# Objectives for each stage
self.objectives = ["max"]
# In two stage simplex, first minimise then maximise
if self.n_artificial_vars:
self.objectives.append("min")
self.col_titles = self.generate_col_titles()
# Index of current pivot row and column
self.row_idx = None
self.col_idx = None
# Does objective row only contain (non)-negative values?
self.stop_iter = False
def generate_col_titles(self) -> list[str]:
args = (self.n_vars, self.n_slack)
# decision | slack
string_starts = ["x", "s"]
titles = []
for i in range(2):
for j in range(args[i]):
titles.append(string_starts[i] + str(j + 1))
titles.append("RHS")
return titles
def find_pivot(self) -> tuple[Any, Any]:
objective = self.objectives[-1]
# Find entries of highest magnitude in objective rows
sign = (objective == "min") - (objective == "max")
col_idx = np.argmax(sign * self.tableau[0, :-1])
# Choice is only valid if below 0 for maximise, and above for minimise
if sign * self.tableau[0, col_idx] <= 0:
self.stop_iter = True
return 0, 0
# Pivot row is chosen as having the lowest quotient when elements of
# the pivot column divide the right-hand side
# Slice excluding the objective rows
s = slice(self.n_stages, self.n_rows)
# RHS
dividend = self.tableau[s, -1]
# Elements of pivot column within slice
divisor = self.tableau[s, col_idx]
# Array filled with nans
nans = np.full(self.n_rows - self.n_stages, np.nan)
# If element in pivot column is greater than zero, return
# quotient or nan otherwise
quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0)
# Arg of minimum quotient excluding the nan values. n_stages is added
# to compensate for earlier exclusion of objective columns
row_idx = np.nanargmin(quotients) + self.n_stages
return row_idx, col_idx
def pivot(self, row_idx: int, col_idx: int) -> np.ndarray:
# Avoid changes to original tableau
piv_row = self.tableau[row_idx].copy()
piv_val = piv_row[col_idx]
# Entry becomes 1
piv_row *= 1 / piv_val
# Variable in pivot column becomes basic, ie the only non-zero entry
for idx, coeff in enumerate(self.tableau[:, col_idx]):
self.tableau[idx] += -coeff * piv_row
self.tableau[row_idx] = piv_row
return self.tableau
def change_stage(self) -> np.ndarray:
# Objective of original objective row remains
self.objectives.pop()
if not self.objectives:
return self.tableau
# Slice containing ids for artificial columns
s = slice(-self.n_artificial_vars - 1, -1)
# Delete the artificial variable columns
self.tableau = np.delete(self.tableau, s, axis=1)
# Delete the objective row of the first stage
self.tableau = np.delete(self.tableau, 0, axis=0)
self.n_stages = 1
self.n_rows -= 1
self.n_artificial_vars = 0
self.stop_iter = False
return self.tableau
def run_simplex(self) -> dict[Any, Any]:
# Stop simplex algorithm from cycling.
for _ in range(Tableau.maxiter):
# Completion of each stage removes an objective. If both stages
# are complete, then no objectives are left
if not self.objectives:
# Find the values of each variable at optimal solution
return self.interpret_tableau()
row_idx, col_idx = self.find_pivot()
# If there are no more negative values in objective row
if self.stop_iter:
# Delete artificial variable columns and rows. Update attributes
self.tableau = self.change_stage()
else:
self.tableau = self.pivot(row_idx, col_idx)
return {}
def interpret_tableau(self) -> dict[str, float]:
# P = RHS of final tableau
output_dict = {"P": abs(self.tableau[0, -1])}
for i in range(self.n_vars):
# Gives indices of nonzero entries in the ith column
nonzero = np.nonzero(self.tableau[:, i])
n_nonzero = len(nonzero[0])
# First entry in the nonzero indices
nonzero_rowidx = nonzero[0][0]
nonzero_val = self.tableau[nonzero_rowidx, i]
# If there is only one nonzero value in column, which is one
if n_nonzero == 1 and nonzero_val == 1:
rhs_val = self.tableau[nonzero_rowidx, -1]
output_dict[self.col_titles[i]] = rhs_val
return output_dict
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,17 @@+"""
+Python implementation of the simplex algorithm for solving linear programs in
+tabular form with
+- `>=`, `<=`, and `=` constraints and
+- each variable `x1, x2, ...>= 0`.
+
+See https://gist.github.com/imengus/f9619a568f7da5bc74eaf20169a24d98 for how to
+convert linear programs to simplex tableaus, and the steps taken in the simplex
+algorithm.
+
+Resources:
+https://en.wikipedia.org/wiki/Simplex_algorithm
+https://tinyurl.com/simplex4beginners
+"""
from typing import Any
@@ -5,6 +19,23 @@
class Tableau:
+ """Operate on simplex tableaus
+
+ >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4]]), 2, 2)
+ Traceback (most recent call last):
+ ...
+ TypeError: Tableau must have type float64
+
+ >>> Tableau(np.array([[-1,-1,0,0,-1],[1,3,1,0,4],[3,1,0,1,4.]]), 2, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: RHS must be > 0
+
+ >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]), -2, 2)
+ Traceback (most recent call last):
+ ...
+ ValueError: number of (artificial) variables must be a natural number
+ """
# Max iteration number to prevent cycling
maxiter = 100
@@ -53,6 +84,16 @@ self.stop_iter = False
def generate_col_titles(self) -> list[str]:
+ """Generate column titles for tableau of specific dimensions
+
+ >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]),
+ ... 2, 0).generate_col_titles()
+ ['x1', 'x2', 's1', 's2', 'RHS']
+
+ >>> Tableau(np.array([[-1,-1,0,0,1],[1,3,1,0,4],[3,1,0,1,4.]]),
+ ... 2, 2).generate_col_titles()
+ ['x1', 'x2', 'RHS']
+ """
args = (self.n_vars, self.n_slack)
# decision | slack
@@ -65,6 +106,11 @@ return titles
def find_pivot(self) -> tuple[Any, Any]:
+ """Finds the pivot row and column.
+ >>> tuple(int(x) for x in Tableau(np.array([[-2,1,0,0,0], [3,1,1,0,6],
+ ... [1,2,0,1,7.]]), 2, 0).find_pivot())
+ (1, 0)
+ """
objective = self.objectives[-1]
# Find entries of highest magnitude in objective rows
@@ -101,6 +147,15 @@ return row_idx, col_idx
def pivot(self, row_idx: int, col_idx: int) -> np.ndarray:
+ """Pivots on value on the intersection of pivot row and column.
+
+ >>> Tableau(np.array([[-2,-3,0,0,0],[1,3,1,0,4],[3,1,0,1,4.]]),
+ ... 2, 2).pivot(1, 0).tolist()
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [[0.0, 3.0, 2.0, 0.0, 8.0],
+ [1.0, 3.0, 1.0, 0.0, 4.0],
+ [0.0, -8.0, -3.0, 1.0, -8.0]]
+ """
# Avoid changes to original tableau
piv_row = self.tableau[row_idx].copy()
@@ -116,6 +171,21 @@ return self.tableau
def change_stage(self) -> np.ndarray:
+ """Exits first phase of the two-stage method by deleting artificial
+ rows and columns, or completes the algorithm if exiting the standard
+ case.
+
+ >>> Tableau(np.array([
+ ... [3, 3, -1, -1, 0, 0, 4],
+ ... [2, 1, 0, 0, 0, 0, 0.],
+ ... [1, 2, -1, 0, 1, 0, 2],
+ ... [2, 1, 0, -1, 0, 1, 2]
+ ... ]), 2, 2).change_stage().tolist()
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [[2.0, 1.0, 0.0, 0.0, 0.0],
+ [1.0, 2.0, -1.0, 0.0, 2.0],
+ [2.0, 1.0, 0.0, -1.0, 2.0]]
+ """
# Objective of original objective row remains
self.objectives.pop()
@@ -138,6 +208,84 @@ return self.tableau
def run_simplex(self) -> dict[Any, Any]:
+ """Operate on tableau until objective function cannot be
+ improved further.
+
+ # Standard linear program:
+ Max: x1 + x2
+ ST: x1 + 3x2 <= 4
+ 3x1 + x2 <= 4
+ >>> {key: float(value) for key, value in Tableau(np.array([[-1,-1,0,0,0],
+ ... [1,3,1,0,4],[3,1,0,1,4.]]), 2, 0).run_simplex().items()}
+ {'P': 2.0, 'x1': 1.0, 'x2': 1.0}
+
+ # Standard linear program with 3 variables:
+ Max: 3x1 + x2 + 3x3
+ ST: 2x1 + x2 + x3 ≤ 2
+ x1 + 2x2 + 3x3 ≤ 5
+ 2x1 + 2x2 + x3 ≤ 6
+ >>> {key: float(value) for key, value in Tableau(np.array([
+ ... [-3,-1,-3,0,0,0,0],
+ ... [2,1,1,1,0,0,2],
+ ... [1,2,3,0,1,0,5],
+ ... [2,2,1,0,0,1,6.]
+ ... ]),3,0).run_simplex().items()} # doctest: +ELLIPSIS
+ {'P': 5.4, 'x1': 0.199..., 'x3': 1.6}
+
+
+ # Optimal tableau input:
+ >>> {key: float(value) for key, value in Tableau(np.array([
+ ... [0, 0, 0.25, 0.25, 2],
+ ... [0, 1, 0.375, -0.125, 1],
+ ... [1, 0, -0.125, 0.375, 1]
+ ... ]), 2, 0).run_simplex().items()}
+ {'P': 2.0, 'x1': 1.0, 'x2': 1.0}
+
+ # Non-standard: >= constraints
+ Max: 2x1 + 3x2 + x3
+ ST: x1 + x2 + x3 <= 40
+ 2x1 + x2 - x3 >= 10
+ - x2 + x3 >= 10
+ >>> {key: float(value) for key, value in Tableau(np.array([
+ ... [2, 0, 0, 0, -1, -1, 0, 0, 20],
+ ... [-2, -3, -1, 0, 0, 0, 0, 0, 0],
+ ... [1, 1, 1, 1, 0, 0, 0, 0, 40],
+ ... [2, 1, -1, 0, -1, 0, 1, 0, 10],
+ ... [0, -1, 1, 0, 0, -1, 0, 1, 10.]
+ ... ]), 3, 2).run_simplex().items()}
+ {'P': 70.0, 'x1': 10.0, 'x2': 10.0, 'x3': 20.0}
+
+ # Non standard: minimisation and equalities
+ Min: x1 + x2
+ ST: 2x1 + x2 = 12
+ 6x1 + 5x2 = 40
+ >>> {key: float(value) for key, value in Tableau(np.array([
+ ... [8, 6, 0, 0, 52],
+ ... [1, 1, 0, 0, 0],
+ ... [2, 1, 1, 0, 12],
+ ... [6, 5, 0, 1, 40.],
+ ... ]), 2, 2).run_simplex().items()}
+ {'P': 7.0, 'x1': 5.0, 'x2': 2.0}
+
+
+ # Pivot on slack variables
+ Max: 8x1 + 6x2
+ ST: x1 + 3x2 <= 33
+ 4x1 + 2x2 <= 48
+ 2x1 + 4x2 <= 48
+ x1 + x2 >= 10
+ x1 >= 2
+ >>> {key: float(value) for key, value in Tableau(np.array([
+ ... [2, 1, 0, 0, 0, -1, -1, 0, 0, 12.0],
+ ... [-8, -6, 0, 0, 0, 0, 0, 0, 0, 0.0],
+ ... [1, 3, 1, 0, 0, 0, 0, 0, 0, 33.0],
+ ... [4, 2, 0, 1, 0, 0, 0, 0, 0, 60.0],
+ ... [2, 4, 0, 0, 1, 0, 0, 0, 0, 48.0],
+ ... [1, 1, 0, 0, 0, -1, 0, 1, 0, 10.0],
+ ... [1, 0, 0, 0, 0, 0, -1, 0, 1, 2.0]
+ ... ]), 2, 2).run_simplex().items()} # doctest: +ELLIPSIS
+ {'P': 132.0, 'x1': 12.000... 'x2': 5.999...}
+ """
# Stop simplex algorithm from cycling.
for _ in range(Tableau.maxiter):
# Completion of each stage removes an objective. If both stages
@@ -157,6 +305,15 @@ return {}
def interpret_tableau(self) -> dict[str, float]:
+ """Given the final tableau, add the corresponding values of the basic
+ decision variables to the `output_dict`
+ >>> {key: float(value) for key, value in Tableau(np.array([
+ ... [0,0,0.875,0.375,5],
+ ... [0,1,0.375,-0.125,1],
+ ... [1,0,-0.125,0.375,1]
+ ... ]),2, 0).interpret_tableau().items()}
+ {'P': 5.0, 'x1': 1.0, 'x2': 1.0}
+ """
# P = RHS of final tableau
output_dict = {"P": abs(self.tableau[0, -1])}
@@ -179,4 +336,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/linear_programming/simplex.py |
Add docstrings that explain logic |
import numpy as np
class Cell:
def __init__(self):
self.position = (0, 0)
self.parent = None
self.g = 0
self.h = 0
self.f = 0
"""
Overrides equals method because otherwise cell assign will give
wrong results.
"""
def __eq__(self, cell):
return self.position == cell.position
def showcell(self):
print(self.position)
class Gridworld:
def __init__(self, world_size=(5, 5)):
self.w = np.zeros(world_size)
self.world_x_limit = world_size[0]
self.world_y_limit = world_size[1]
def show(self):
print(self.w)
def get_neighbours(self, cell):
neughbour_cord = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
current_x = cell.position[0]
current_y = cell.position[1]
neighbours = []
for n in neughbour_cord:
x = current_x + n[0]
y = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
c = Cell()
c.position = (x, y)
c.parent = cell
neighbours.append(c)
return neighbours
def astar(world, start, goal):
_open = []
_closed = []
_open.append(start)
while _open:
min_f = np.argmin([n.f for n in _open])
current = _open[min_f]
_closed.append(_open.pop(min_f))
if current == goal:
break
for n in world.get_neighbours(current):
for c in _closed:
if c == n:
continue
n.g = current.g + 1
x1, y1 = n.position
x2, y2 = goal.position
n.h = (y2 - y1) ** 2 + (x2 - x1) ** 2
n.f = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(n)
path = []
while current.parent is not None:
path.append(current.position)
current = current.parent
path.append(current.position)
return path[::-1]
if __name__ == "__main__":
world = Gridworld()
# Start position and goal
start = Cell()
start.position = (0, 0)
goal = Cell()
goal.position = (4, 4)
print(f"path from {start.position} to {goal.position}")
s = astar(world, start, goal)
# Just for visual reasons.
for i in s:
world.w[i] = 1
print(world.w) | --- +++ @@ -1,8 +1,28 @@+"""
+The A* algorithm combines features of uniform-cost search and pure heuristic search to
+efficiently compute optimal solutions.
+
+The A* algorithm is a best-first search algorithm in which the cost associated with a
+node is f(n) = g(n) + h(n), where g(n) is the cost of the path from the initial state to
+node n and h(n) is the heuristic estimate or the cost or a path from node n to a goal.
+
+The A* algorithm introduces a heuristic into a regular graph-searching algorithm,
+essentially planning ahead at each step so a more optimal decision is made. For this
+reason, A* is known as an algorithm with brains.
+
+https://en.wikipedia.org/wiki/A*_search_algorithm
+"""
import numpy as np
class Cell:
+ """
+ Class cell represents a cell in the world which have the properties:
+ position: represented by tuple of x and y coordinates initially set to (0,0).
+ parent: Contains the parent cell object visited before we arrived at this cell.
+ g, h, f: Parameters used when calling our heuristic function.
+ """
def __init__(self):
self.position = (0, 0)
@@ -24,6 +44,11 @@
class Gridworld:
+ """
+ Gridworld class represents the external world here a grid M*M
+ matrix.
+ world_size: create a numpy array with the given world_size default is 5.
+ """
def __init__(self, world_size=(5, 5)):
self.w = np.zeros(world_size)
@@ -34,6 +59,9 @@ print(self.w)
def get_neighbours(self, cell):
+ """
+ Return the neighbours of cell
+ """
neughbour_cord = [
(-1, -1),
(-1, 0),
@@ -59,6 +87,20 @@
def astar(world, start, goal):
+ """
+ Implementation of a start algorithm.
+ world : Object of the world object.
+ start : Object of the cell as start position.
+ stop : Object of the cell as goal position.
+
+ >>> p = Gridworld()
+ >>> start = Cell()
+ >>> start.position = (0,0)
+ >>> goal = Cell()
+ >>> goal.position = (4,4)
+ >>> astar(p, start, goal)
+ [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
+ """
_open = []
_closed = []
_open.append(start)
@@ -103,4 +145,4 @@ # Just for visual reasons.
for i in s:
world.w[i] = 1
- print(world.w)+ print(world.w)
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/astar.py |
Generate docstrings for this script | #!/usr/bin/env python3
from __future__ import annotations
import random
import unittest
from pprint import pformat
from typing import TypeVar
import pytest
T = TypeVar("T")
class GraphAdjacencyMatrix[T]:
def __init__(
self, vertices: list[T], edges: list[list[T]], directed: bool = True
) -> None:
self.directed = directed
self.vertex_to_index: dict[T, int] = {}
self.adj_matrix: list[list[int]] = []
# Falsey checks
edges = edges or []
vertices = vertices or []
for vertex in vertices:
self.add_vertex(vertex)
for edge in edges:
if len(edge) != 2:
msg = f"Invalid input: {edge} must have length 2."
raise ValueError(msg)
self.add_edge(edge[0], edge[1])
def add_edge(self, source_vertex: T, destination_vertex: T) -> None:
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
):
msg = (
f"Incorrect input: Either {source_vertex} or "
f"{destination_vertex} does not exist"
)
raise ValueError(msg)
if self.contains_edge(source_vertex, destination_vertex):
msg = (
"Incorrect input: The edge already exists between "
f"{source_vertex} and {destination_vertex}"
)
raise ValueError(msg)
# Get the indices of the corresponding vertices and set their edge value to 1.
u: int = self.vertex_to_index[source_vertex]
v: int = self.vertex_to_index[destination_vertex]
self.adj_matrix[u][v] = 1
if not self.directed:
self.adj_matrix[v][u] = 1
def remove_edge(self, source_vertex: T, destination_vertex: T) -> None:
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
):
msg = (
f"Incorrect input: Either {source_vertex} or "
f"{destination_vertex} does not exist"
)
raise ValueError(msg)
if not self.contains_edge(source_vertex, destination_vertex):
msg = (
"Incorrect input: The edge does NOT exist between "
f"{source_vertex} and {destination_vertex}"
)
raise ValueError(msg)
# Get the indices of the corresponding vertices and set their edge value to 0.
u: int = self.vertex_to_index[source_vertex]
v: int = self.vertex_to_index[destination_vertex]
self.adj_matrix[u][v] = 0
if not self.directed:
self.adj_matrix[v][u] = 0
def add_vertex(self, vertex: T) -> None:
if self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} already exists in this graph."
raise ValueError(msg)
# build column for vertex
for row in self.adj_matrix:
row.append(0)
# build row for vertex and update other data structures
self.adj_matrix.append([0] * (len(self.adj_matrix) + 1))
self.vertex_to_index[vertex] = len(self.adj_matrix) - 1
def remove_vertex(self, vertex: T) -> None:
if not self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} does not exist in this graph."
raise ValueError(msg)
# first slide up the rows by deleting the row corresponding to
# the vertex being deleted.
start_index = self.vertex_to_index[vertex]
self.adj_matrix.pop(start_index)
# next, slide the columns to the left by deleting the values in
# the column corresponding to the vertex being deleted
for lst in self.adj_matrix:
lst.pop(start_index)
# final clean up
self.vertex_to_index.pop(vertex)
# decrement indices for vertices shifted by the deleted vertex in the adj matrix
for inner_vertex in self.vertex_to_index:
if self.vertex_to_index[inner_vertex] >= start_index:
self.vertex_to_index[inner_vertex] = (
self.vertex_to_index[inner_vertex] - 1
)
def contains_vertex(self, vertex: T) -> bool:
return vertex in self.vertex_to_index
def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool:
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
):
msg = (
f"Incorrect input: Either {source_vertex} "
f"or {destination_vertex} does not exist."
)
raise ValueError(msg)
u = self.vertex_to_index[source_vertex]
v = self.vertex_to_index[destination_vertex]
return self.adj_matrix[u][v] == 1
def clear_graph(self) -> None:
self.vertex_to_index = {}
self.adj_matrix = []
def __repr__(self) -> str:
first = "Adj Matrix:\n" + pformat(self.adj_matrix)
second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index)
return first + second
class TestGraphMatrix(unittest.TestCase):
def __assert_graph_edge_exists_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
edge: list[int],
) -> None:
assert undirected_graph.contains_edge(edge[0], edge[1])
assert undirected_graph.contains_edge(edge[1], edge[0])
assert directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_edge_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
edge: list[int],
) -> None:
assert not undirected_graph.contains_edge(edge[0], edge[1])
assert not undirected_graph.contains_edge(edge[1], edge[0])
assert not directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_vertex_exists_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
vertex: int,
) -> None:
assert undirected_graph.contains_vertex(vertex)
assert directed_graph.contains_vertex(vertex)
def __assert_graph_vertex_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
vertex: int,
) -> None:
assert not undirected_graph.contains_vertex(vertex)
assert not directed_graph.contains_vertex(vertex)
def __generate_random_edges(
self, vertices: list[int], edge_pick_count: int
) -> list[list[int]]:
assert edge_pick_count <= len(vertices)
random_source_vertices: list[int] = random.sample(
vertices[0 : int(len(vertices) / 2)], edge_pick_count
)
random_destination_vertices: list[int] = random.sample(
vertices[int(len(vertices) / 2) :], edge_pick_count
)
random_edges: list[list[int]] = []
for source in random_source_vertices:
for dest in random_destination_vertices:
random_edges.append([source, dest])
return random_edges
def __generate_graphs(
self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int
) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]:
if max_val - min_val + 1 < vertex_count:
raise ValueError(
"Will result in duplicate vertices. Either increase "
"range between min_val and max_val or decrease vertex count"
)
# generate graph input
random_vertices: list[int] = random.sample(
range(min_val, max_val + 1), vertex_count
)
random_edges: list[list[int]] = self.__generate_random_edges(
random_vertices, edge_pick_count
)
# build graphs
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=random_edges, directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=random_edges, directed=True
)
return undirected_graph, directed_graph, random_vertices, random_edges
def test_init_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# test graph initialization with vertices and edges
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
assert not undirected_graph.directed
assert directed_graph.directed
def test_contains_vertex(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# Build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=True
)
# Test contains_vertex
for num in range(101):
assert (num in random_vertices) == undirected_graph.contains_vertex(num)
assert (num in random_vertices) == directed_graph.contains_vertex(num)
def test_add_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build empty graphs
undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix(
vertices=[], edges=[], directed=False
)
directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix(
vertices=[], edges=[], directed=True
)
# run add_vertex
for num in random_vertices:
undirected_graph.add_vertex(num)
for num in random_vertices:
directed_graph.add_vertex(num)
# test add_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
def test_remove_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=True
)
# test remove_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
undirected_graph.remove_vertex(num)
directed_graph.remove_vertex(num)
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, num
)
def test_add_and_remove_vertices_repeatedly(self) -> None:
random_vertices1: list[int] = random.sample(range(51), 20)
random_vertices2: list[int] = random.sample(range(51, 101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices1, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices1, edges=[], directed=True
)
# test adding and removing vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.add_vertex(random_vertices2[i])
directed_graph.add_vertex(random_vertices2[i])
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, random_vertices2[i]
)
undirected_graph.remove_vertex(random_vertices1[i])
directed_graph.remove_vertex(random_vertices1[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices1[i]
)
# remove all vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.remove_vertex(random_vertices2[i])
directed_graph.remove_vertex(random_vertices2[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices2[i]
)
def test_contains_edge(self) -> None:
# generate graphs and graph input
vertex_count = 20
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(vertex_count, 0, 100, 4)
# generate all possible edges for testing
all_possible_edges: list[list[int]] = []
for i in range(vertex_count - 1):
for j in range(i + 1, vertex_count):
all_possible_edges.append([random_vertices[i], random_vertices[j]])
all_possible_edges.append([random_vertices[j], random_vertices[i]])
# test contains_edge function
for edge in all_possible_edges:
if edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
elif [edge[1], edge[0]] in random_edges:
# since this edge exists for undirected but the reverse may
# not exist for directed
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, [edge[1], edge[0]]
)
else:
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_edge(self) -> None:
# generate graph input
random_vertices: list[int] = random.sample(range(101), 15)
random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=True
)
# run and test add_edge
for edge in random_edges:
undirected_graph.add_edge(edge[0], edge[1])
directed_graph.add_edge(edge[0], edge[1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
def test_remove_edge(self) -> None:
# generate graph input and graphs
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# run and test remove_edge
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
undirected_graph.remove_edge(edge[0], edge[1])
directed_graph.remove_edge(edge[0], edge[1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_and_remove_edges_repeatedly(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# make some more edge options!
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for i, _ in enumerate(random_edges):
undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, more_random_edges[i]
)
undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1])
directed_graph.remove_edge(random_edges[i][0], random_edges[i][1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, random_edges[i]
)
def test_add_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.add_vertex(vertex)
with pytest.raises(ValueError):
directed_graph.add_vertex(vertex)
def test_remove_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for i in range(101):
if i not in random_vertices:
with pytest.raises(ValueError):
undirected_graph.remove_vertex(i)
with pytest.raises(ValueError):
directed_graph.remove_vertex(i)
def test_add_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for edge in random_edges:
with pytest.raises(ValueError):
undirected_graph.add_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.add_edge(edge[0], edge[1])
def test_remove_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for edge in more_random_edges:
with pytest.raises(ValueError):
undirected_graph.remove_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.remove_edge(edge[0], edge[1])
def test_contains_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
undirected_graph.contains_edge(103, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(103, 102)
if __name__ == "__main__":
unittest.main() | --- +++ @@ -1,4 +1,20 @@ #!/usr/bin/env python3
+"""
+Author: Vikram Nithyanandam
+
+Description:
+The following implementation is a robust unweighted Graph data structure
+implemented using an adjacency matrix. This vertices and edges of this graph can be
+effectively initialized and modified while storing your chosen generic
+value in each vertex.
+
+Adjacency Matrix: https://mathworld.wolfram.com/AdjacencyMatrix.html
+
+Potential Future Ideas:
+- Add a flag to set edge weights on and set edge weights
+- Make edge weights and vertex values customizable to store whatever the client wants
+- Support multigraph functionality if the client wants it
+"""
from __future__ import annotations
@@ -16,6 +32,15 @@ def __init__(
self, vertices: list[T], edges: list[list[T]], directed: bool = True
) -> None:
+ """
+ Parameters:
+ - vertices: (list[T]) The list of vertex names the client wants to
+ pass in. Default is empty.
+ - edges: (list[list[T]]) The list of edges the client wants to
+ pass in. Each edge is a 2-element list. Default is empty.
+ - directed: (bool) Indicates if graph is directed or undirected.
+ Default is True.
+ """
self.directed = directed
self.vertex_to_index: dict[T, int] = {}
self.adj_matrix: list[list[int]] = []
@@ -34,6 +59,11 @@ self.add_edge(edge[0], edge[1])
def add_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Creates an edge from source vertex to destination vertex. If any
+ given vertex doesn't exist or the edge already exists, a ValueError
+ will be thrown.
+ """
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
@@ -58,6 +88,10 @@ self.adj_matrix[v][u] = 1
def remove_edge(self, source_vertex: T, destination_vertex: T) -> None:
+ """
+ Removes the edge between the two vertices. If any given vertex
+ doesn't exist or the edge does not exist, a ValueError will be thrown.
+ """
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
@@ -82,6 +116,10 @@ self.adj_matrix[v][u] = 0
def add_vertex(self, vertex: T) -> None:
+ """
+ Adds a vertex to the graph. If the given vertex already exists,
+ a ValueError will be thrown.
+ """
if self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} already exists in this graph."
raise ValueError(msg)
@@ -95,6 +133,11 @@ self.vertex_to_index[vertex] = len(self.adj_matrix) - 1
def remove_vertex(self, vertex: T) -> None:
+ """
+ Removes the given vertex from the graph and deletes all incoming and
+ outgoing edges from the given vertex as well. If the given vertex
+ does not exist, a ValueError will be thrown.
+ """
if not self.contains_vertex(vertex):
msg = f"Incorrect input: {vertex} does not exist in this graph."
raise ValueError(msg)
@@ -120,9 +163,17 @@ )
def contains_vertex(self, vertex: T) -> bool:
+ """
+ Returns True if the graph contains the vertex, False otherwise.
+ """
return vertex in self.vertex_to_index
def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool:
+ """
+ Returns True if the graph contains the edge from the source_vertex to the
+ destination_vertex, False otherwise. If any given vertex doesn't exist, a
+ ValueError will be thrown.
+ """
if not (
self.contains_vertex(source_vertex)
and self.contains_vertex(destination_vertex)
@@ -138,6 +189,9 @@ return self.adj_matrix[u][v] == 1
def clear_graph(self) -> None:
+ """
+ Clears all vertices and edges.
+ """
self.vertex_to_index = {}
self.adj_matrix = []
@@ -552,4 +606,4 @@
if __name__ == "__main__":
- unittest.main()+ unittest.main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/graphs/graph_adjacency_matrix.py |
Write beginner-friendly docstrings |
import doctest
import numpy as np
from numpy import ndarray
from sklearn.datasets import load_iris
def collect_dataset() -> tuple[ndarray, ndarray]:
iris_dataset = load_iris()
return np.array(iris_dataset.data), np.array(iris_dataset.target)
def compute_pairwise_affinities(data_matrix: ndarray, sigma: float = 1.0) -> ndarray:
n_samples = data_matrix.shape[0]
squared_sum = np.sum(np.square(data_matrix), axis=1)
squared_distance = np.add(
np.add(-2 * np.dot(data_matrix, data_matrix.T), squared_sum).T, squared_sum
)
affinity_matrix = np.exp(-squared_distance / (2 * sigma**2))
np.fill_diagonal(affinity_matrix, 0)
affinity_matrix /= np.sum(affinity_matrix)
return (affinity_matrix + affinity_matrix.T) / (2 * n_samples)
def compute_low_dim_affinities(embedding_matrix: ndarray) -> tuple[ndarray, ndarray]:
squared_sum = np.sum(np.square(embedding_matrix), axis=1)
numerator_matrix = 1 / (
1
+ np.add(
np.add(-2 * np.dot(embedding_matrix, embedding_matrix.T), squared_sum).T,
squared_sum,
)
)
np.fill_diagonal(numerator_matrix, 0)
q_matrix = numerator_matrix / np.sum(numerator_matrix)
return q_matrix, numerator_matrix
def apply_tsne(
data_matrix: ndarray,
n_components: int = 2,
learning_rate: float = 200.0,
n_iter: int = 500,
) -> ndarray:
if n_components < 1 or n_iter < 1:
raise ValueError("n_components and n_iter must be >= 1")
n_samples = data_matrix.shape[0]
rng = np.random.default_rng()
embedding = rng.standard_normal((n_samples, n_components)) * 1e-4
high_dim_affinities = compute_pairwise_affinities(data_matrix)
high_dim_affinities = np.maximum(high_dim_affinities, 1e-12)
embedding_increment = np.zeros_like(embedding)
momentum = 0.5
for iteration in range(n_iter):
low_dim_affinities, numerator_matrix = compute_low_dim_affinities(embedding)
low_dim_affinities = np.maximum(low_dim_affinities, 1e-12)
affinity_diff = high_dim_affinities - low_dim_affinities
gradient = 4 * (
np.dot((affinity_diff * numerator_matrix), embedding)
- np.multiply(
np.sum(affinity_diff * numerator_matrix, axis=1)[:, np.newaxis],
embedding,
)
)
embedding_increment = momentum * embedding_increment - learning_rate * gradient
embedding += embedding_increment
if iteration == int(n_iter / 4):
momentum = 0.8
return embedding
def main() -> None:
features, _labels = collect_dataset()
embedding = apply_tsne(features, n_components=2, n_iter=300)
if not isinstance(embedding, np.ndarray):
raise TypeError("t-SNE embedding must be an ndarray")
print("t-SNE embedding (first 5 points):")
print(embedding[:5])
# Optional visualization (Ruff/mypy compliant)
# import matplotlib.pyplot as plt
# plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, cmap="viridis")
# plt.title("t-SNE Visualization of the Iris Dataset")
# plt.xlabel("Dimension 1")
# plt.ylabel("Dimension 2")
# plt.show()
if __name__ == "__main__":
doctest.testmod()
main() | --- +++ @@ -1,3 +1,9 @@+"""
+t-distributed stochastic neighbor embedding (t-SNE)
+
+For more details, see:
+https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding
+"""
import doctest
@@ -7,11 +13,38 @@
def collect_dataset() -> tuple[ndarray, ndarray]:
+ """
+ Load the Iris dataset and return features and labels.
+
+ Returns:
+ tuple[ndarray, ndarray]: Feature matrix and target labels.
+
+ >>> features, targets = collect_dataset()
+ >>> features.shape
+ (150, 4)
+ >>> targets.shape
+ (150,)
+ """
iris_dataset = load_iris()
return np.array(iris_dataset.data), np.array(iris_dataset.target)
def compute_pairwise_affinities(data_matrix: ndarray, sigma: float = 1.0) -> ndarray:
+ """
+ Compute high-dimensional affinities (P matrix) using a Gaussian kernel.
+
+ Args:
+ data_matrix: Input data of shape (n_samples, n_features).
+ sigma: Gaussian kernel bandwidth.
+
+ Returns:
+ ndarray: Symmetrized probability matrix.
+
+ >>> x = np.array([[0.0, 0.0], [1.0, 0.0]])
+ >>> probabilities = compute_pairwise_affinities(x)
+ >>> float(round(probabilities[0, 1], 3))
+ 0.25
+ """
n_samples = data_matrix.shape[0]
squared_sum = np.sum(np.square(data_matrix), axis=1)
squared_distance = np.add(
@@ -26,6 +59,20 @@
def compute_low_dim_affinities(embedding_matrix: ndarray) -> tuple[ndarray, ndarray]:
+ """
+ Compute low-dimensional affinities (Q matrix) using a Student-t distribution.
+
+ Args:
+ embedding_matrix: Low-dimensional embedding of shape (n_samples, n_components).
+
+ Returns:
+ tuple[ndarray, ndarray]: (Q probability matrix, numerator matrix).
+
+ >>> y = np.array([[0.0, 0.0], [1.0, 0.0]])
+ >>> q_matrix, numerators = compute_low_dim_affinities(y)
+ >>> q_matrix.shape
+ (2, 2)
+ """
squared_sum = np.sum(np.square(embedding_matrix), axis=1)
numerator_matrix = 1 / (
1
@@ -46,6 +93,23 @@ learning_rate: float = 200.0,
n_iter: int = 500,
) -> ndarray:
+ """
+ Apply t-SNE for dimensionality reduction.
+
+ Args:
+ data_matrix: Original dataset (features).
+ n_components: Target dimension (2D or 3D).
+ learning_rate: Step size for gradient descent.
+ n_iter: Number of iterations.
+
+ Returns:
+ ndarray: Low-dimensional embedding of the data.
+
+ >>> features, _ = collect_dataset()
+ >>> embedding = apply_tsne(features, n_components=2, n_iter=50)
+ >>> embedding.shape
+ (150, 2)
+ """
if n_components < 1 or n_iter < 1:
raise ValueError("n_components and n_iter must be >= 1")
@@ -83,6 +147,13 @@
def main() -> None:
+ """
+ Run t-SNE on the Iris dataset and display the first 5 embeddings.
+
+ >>> main() # doctest: +ELLIPSIS
+ t-SNE embedding (first 5 points):
+ [[...
+ """
features, _labels = collect_dataset()
embedding = apply_tsne(features, n_components=2, n_iter=300)
@@ -104,4 +175,4 @@
if __name__ == "__main__":
doctest.testmod()
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/t_stochastic_neighbour_embedding.py |
Document functions with detailed explanations | import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def norm_squared(vector: ndarray) -> float:
return np.dot(vector, vector)
class SVC:
def __init__(
self,
*,
regularization: float = np.inf,
kernel: str = "linear",
gamma: float = 0.0,
) -> None:
self.regularization = regularization
self.gamma = gamma
if kernel == "linear":
self.kernel = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma")
if not isinstance(self.gamma, (float, int)):
raise ValueError("gamma must be float or int")
if not self.gamma > 0:
raise ValueError("gamma must be > 0")
self.kernel = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
msg = f"Unknown kernel: {kernel}"
raise ValueError(msg)
# kernels
def __linear(self, vector1: ndarray, vector2: ndarray) -> float:
return np.dot(vector1, vector2)
def __rbf(self, vector1: ndarray, vector2: ndarray) -> float:
return np.exp(-(self.gamma * norm_squared(vector1 - vector2)))
def fit(self, observations: list[ndarray], classes: ndarray) -> None:
self.observations = observations
self.classes = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(n,) = np.shape(classes)
def to_minimize(candidate: ndarray) -> float:
s = 0
(n,) = np.shape(candidate)
for i in range(n):
for j in range(n):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i], observations[j])
)
return 1 / 2 * s - sum(candidate)
ly_contraint = LinearConstraint(classes, 0, 0)
l_bounds = Bounds(0, self.regularization)
l_star = minimize(
to_minimize, np.ones(n), bounds=l_bounds, constraints=[ly_contraint]
).x
self.optimum = l_star
# calculating mean offset of separation plane to points
s = 0
for i in range(n):
for j in range(n):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i], observations[j]
)
self.offset = s / n
def predict(self, observation: ndarray) -> int:
s = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n], observation)
for n in range(len(self.classes))
)
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -4,10 +4,52 @@
def norm_squared(vector: ndarray) -> float:
+ """
+ Return the squared second norm of vector
+ norm_squared(v) = sum(x * x for x in v)
+
+ Args:
+ vector (ndarray): input vector
+
+ Returns:
+ float: squared second norm of vector
+
+ >>> int(norm_squared([1, 2]))
+ 5
+ >>> int(norm_squared(np.asarray([1, 2])))
+ 5
+ >>> int(norm_squared([0, 0]))
+ 0
+ """
return np.dot(vector, vector)
class SVC:
+ """
+ Support Vector Classifier
+
+ Args:
+ kernel (str): kernel to use. Default: linear
+ Possible choices:
+ - linear
+ regularization: constraint for soft margin (data not linearly separable)
+ Default: unbound
+
+ >>> SVC(kernel="asdf")
+ Traceback (most recent call last):
+ ...
+ ValueError: Unknown kernel: asdf
+
+ >>> SVC(kernel="rbf")
+ Traceback (most recent call last):
+ ...
+ ValueError: rbf kernel requires gamma
+
+ >>> SVC(kernel="rbf", gamma=-1)
+ Traceback (most recent call last):
+ ...
+ ValueError: gamma must be > 0
+ """
def __init__(
self,
@@ -37,12 +79,33 @@
# kernels
def __linear(self, vector1: ndarray, vector2: ndarray) -> float:
+ """Linear kernel (as if no kernel used at all)"""
return np.dot(vector1, vector2)
def __rbf(self, vector1: ndarray, vector2: ndarray) -> float:
+ """
+ RBF: Radial Basis Function Kernel
+
+ Note: for more information see:
+ https://en.wikipedia.org/wiki/Radial_basis_function_kernel
+
+ Args:
+ vector1 (ndarray): first vector
+ vector2 (ndarray): second vector)
+
+ Returns:
+ float: exp(-(gamma * norm_squared(vector1 - vector2)))
+ """
return np.exp(-(self.gamma * norm_squared(vector1 - vector2)))
def fit(self, observations: list[ndarray], classes: ndarray) -> None:
+ """
+ Fits the SVC with a set of observations.
+
+ Args:
+ observations (list[ndarray]): list of observations
+ classes (ndarray): classification of each observation (in {1, -1})
+ """
self.observations = observations
self.classes = classes
@@ -65,6 +128,15 @@ (n,) = np.shape(classes)
def to_minimize(candidate: ndarray) -> float:
+ """
+ Opposite of the function to maximize
+
+ Args:
+ candidate (ndarray): candidate array to test
+
+ Return:
+ float: Wolfe's Dual result to minimize
+ """
s = 0
(n,) = np.shape(candidate)
for i in range(n):
@@ -96,6 +168,29 @@ self.offset = s / n
def predict(self, observation: ndarray) -> int:
+ """
+ Get the expected class of an observation
+
+ Args:
+ observation (Vector): observation
+
+ Returns:
+ int {1, -1}: expected class
+
+ >>> xs = [
+ ... np.asarray([0, 1]), np.asarray([0, 2]),
+ ... np.asarray([1, 1]), np.asarray([1, 2])
+ ... ]
+ >>> y = np.asarray([1, 1, -1, -1])
+ >>> s = SVC()
+ >>> s.fit(xs, y)
+ >>> s.predict(np.asarray([0, 1]))
+ 1
+ >>> s.predict(np.asarray([1, 1]))
+ -1
+ >>> s.predict(np.asarray([2, 2]))
+ -1
+ """
s = sum(
self.optimum[n]
* self.classes[n]
@@ -108,4 +203,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/support_vector_machines.py |
Add detailed docstrings explaining each function |
from collections import Counter
from itertools import combinations
def load_data() -> list[list[str]]:
return [["milk"], ["milk", "butter"], ["milk", "bread"], ["milk", "bread", "chips"]]
def prune(itemset: list, candidates: list, length: int) -> list:
itemset_counter = Counter(tuple(item) for item in itemset)
pruned = []
for candidate in candidates:
is_subsequence = True
for item in candidate:
item_tuple = tuple(item)
if (
item_tuple not in itemset_counter
or itemset_counter[item_tuple] < length - 1
):
is_subsequence = False
break
if is_subsequence:
pruned.append(candidate)
return pruned
def apriori(data: list[list[str]], min_support: int) -> list[tuple[list[str], int]]:
itemset = [list(transaction) for transaction in data]
frequent_itemsets = []
length = 1
while itemset:
# Count itemset support
counts = [0] * len(itemset)
for transaction in data:
for j, candidate in enumerate(itemset):
if all(item in transaction for item in candidate):
counts[j] += 1
# Prune infrequent itemsets
itemset = [item for i, item in enumerate(itemset) if counts[i] >= min_support]
# Append frequent itemsets (as a list to maintain order)
for i, item in enumerate(itemset):
frequent_itemsets.append((sorted(item), counts[i]))
length += 1
itemset = prune(itemset, list(combinations(itemset, length)), length)
return frequent_itemsets
if __name__ == "__main__":
"""
Apriori algorithm for finding frequent itemsets.
Args:
data: A list of transactions, where each transaction is a list of items.
min_support: The minimum support threshold for frequent itemsets.
Returns:
A list of frequent itemsets along with their support counts.
"""
import doctest
doctest.testmod()
# user-defined threshold or minimum support level
frequent_itemsets = apriori(data=load_data(), min_support=2)
print("\n".join(f"{itemset}: {support}" for itemset, support in frequent_itemsets)) | --- +++ @@ -1,13 +1,50 @@+"""
+Apriori Algorithm is a Association rule mining technique, also known as market basket
+analysis, aims to discover interesting relationships or associations among a set of
+items in a transactional or relational database.
+
+For example, Apriori Algorithm states: "If a customer buys item A and item B, then they
+are likely to buy item C." This rule suggests a relationship between items A, B, and C,
+indicating that customers who purchased A and B are more likely to also purchase item C.
+
+WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm
+Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining
+"""
from collections import Counter
from itertools import combinations
def load_data() -> list[list[str]]:
+ """
+ Returns a sample transaction dataset.
+
+ >>> load_data()
+ [['milk'], ['milk', 'butter'], ['milk', 'bread'], ['milk', 'bread', 'chips']]
+ """
return [["milk"], ["milk", "butter"], ["milk", "bread"], ["milk", "bread", "chips"]]
def prune(itemset: list, candidates: list, length: int) -> list:
+ """
+ Prune candidate itemsets that are not frequent.
+ The goal of pruning is to filter out candidate itemsets that are not frequent. This
+ is done by checking if all the (k-1) subsets of a candidate itemset are present in
+ the frequent itemsets of the previous iteration (valid subsequences of the frequent
+ itemsets from the previous iteration).
+
+ Prunes candidate itemsets that are not frequent.
+
+ >>> itemset = ['X', 'Y', 'Z']
+ >>> candidates = [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']]
+ >>> prune(itemset, candidates, 2)
+ [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']]
+
+ >>> itemset = ['1', '2', '3', '4']
+ >>> candidates = ['1', '2', '4']
+ >>> prune(itemset, candidates, 3)
+ []
+ """
itemset_counter = Counter(tuple(item) for item in itemset)
pruned = []
for candidate in candidates:
@@ -26,6 +63,17 @@
def apriori(data: list[list[str]], min_support: int) -> list[tuple[list[str], int]]:
+ """
+ Returns a list of frequent itemsets and their support counts.
+
+ >>> data = [['A', 'B', 'C'], ['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C']]
+ >>> apriori(data, 2)
+ [(['A', 'B'], 1), (['A', 'C'], 2), (['B', 'C'], 2)]
+
+ >>> data = [['1', '2', '3'], ['1', '2'], ['1', '3'], ['1', '4'], ['2', '3']]
+ >>> apriori(data, 3)
+ []
+ """
itemset = [list(transaction) for transaction in data]
frequent_itemsets = []
length = 1
@@ -68,4 +116,4 @@
# user-defined threshold or minimum support level
frequent_itemsets = apriori(data=load_data(), min_support=2)
- print("\n".join(f"{itemset}: {support}" for itemset, support in frequent_itemsets))+ print("\n".join(f"{itemset}: {support}" for itemset, support in frequent_itemsets))
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/apriori_algorithm.py |
Add docstrings for utility scripts | # Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def column_reshape(input_array: np.ndarray) -> np.ndarray:
return input_array.reshape((input_array.size, 1))
def covariance_within_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
covariance_sum = np.nan
for i in range(classes):
data = features[:, labels == i]
data_mean = data.mean(1)
# Centralize the data of class i
centered_data = data - column_reshape(data_mean)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(centered_data, centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
covariance_sum = np.dot(centered_data, centered_data.T)
return covariance_sum / features.shape[1]
def covariance_between_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
general_data_mean = features.mean(1)
covariance_sum = np.nan
for i in range(classes):
data = features[:, labels == i]
device_data = data.shape[1]
data_mean = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(data_mean) - column_reshape(general_data_mean),
(column_reshape(data_mean) - column_reshape(general_data_mean)).T,
)
else:
# If covariance_sum is np.nan (i.e. first loop)
covariance_sum = device_data * np.dot(
column_reshape(data_mean) - column_reshape(general_data_mean),
(column_reshape(data_mean) - column_reshape(general_data_mean)).T,
)
return covariance_sum / features.shape[1]
def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray:
# Check if the features have been loaded
if features.any():
data_mean = features.mean(1)
# Center the dataset
centered_data = features - np.reshape(data_mean, (data_mean.size, 1))
covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1]
_, eigenvectors = np.linalg.eigh(covariance_matrix)
# Take all the columns in the reverse order (-1), and then takes only the first
filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
projected_data = np.dot(filtered_eigenvectors.T, features)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
logging.error("Dataset empty")
raise AssertionError
def linear_discriminant_analysis(
features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int
) -> np.ndarray:
# Check if the dimension desired is less than the number of classes
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_, eigenvectors = eigh(
covariance_between_classes(features, labels, classes),
covariance_within_classes(features, labels, classes),
)
filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions]
svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors)
filtered_svd_matrix = svd_matrix[:, 0:dimensions]
projected_data = np.dot(filtered_svd_matrix.T, features)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True)
logging.error("Dataset empty")
raise AssertionError
def test_linear_discriminant_analysis() -> None:
# Create dummy dataset with 2 classes and 3 features
features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
labels = np.array([0, 0, 0, 1, 1])
classes = 2
dimensions = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(AssertionError) as error_info: # noqa: PT012
projected_data = linear_discriminant_analysis(
features, labels, classes, dimensions
)
if isinstance(projected_data, np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes"
)
assert error_info.type is AssertionError
def test_principal_component_analysis() -> None:
features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dimensions = 2
expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]])
with pytest.raises(AssertionError) as error_info: # noqa: PT012
output = principal_component_analysis(features, dimensions)
if not np.allclose(expected_output, output):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,5 +1,12 @@ # Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub
+"""
+Requirements:
+ - numpy version 1.21
+ - scipy version 1.3.3
+Notes:
+ - Each column of the features matrix corresponds to a class item
+"""
import logging
@@ -11,6 +18,13 @@
def column_reshape(input_array: np.ndarray) -> np.ndarray:
+ """Function to reshape a row Numpy array into a column Numpy array
+ >>> input_array = np.array([1, 2, 3])
+ >>> column_reshape(input_array)
+ array([[1],
+ [2],
+ [3]])
+ """
return input_array.reshape((input_array.size, 1))
@@ -18,6 +32,14 @@ def covariance_within_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
+ """Function to compute the covariance matrix inside each class.
+ >>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ >>> labels = np.array([0, 1, 0])
+ >>> covariance_within_classes(features, labels, 2)
+ array([[0.66666667, 0.66666667, 0.66666667],
+ [0.66666667, 0.66666667, 0.66666667],
+ [0.66666667, 0.66666667, 0.66666667]])
+ """
covariance_sum = np.nan
for i in range(classes):
@@ -38,6 +60,14 @@ def covariance_between_classes(
features: np.ndarray, labels: np.ndarray, classes: int
) -> np.ndarray:
+ """Function to compute the covariance matrix between multiple classes
+ >>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]])
+ >>> labels = np.array([0, 1, 0])
+ >>> covariance_between_classes(features, labels, 2)
+ array([[ 3.55555556, 1.77777778, -2.66666667],
+ [ 1.77777778, 0.88888889, -1.33333333],
+ [-2.66666667, -1.33333333, 2. ]])
+ """
general_data_mean = features.mean(1)
covariance_sum = np.nan
@@ -62,6 +92,16 @@
def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray:
+ """
+ Principal Component Analysis.
+
+ For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis.
+ Parameters:
+ * features: the features extracted from the dataset
+ * dimensions: to filter the projected data for the desired dimension
+
+ >>> test_principal_component_analysis()
+ """
# Check if the features have been loaded
if features.any():
@@ -86,6 +126,18 @@ def linear_discriminant_analysis(
features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int
) -> np.ndarray:
+ """
+ Linear Discriminant Analysis.
+
+ For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis.
+ Parameters:
+ * features: the features extracted from the dataset
+ * labels: the class labels of the features
+ * classes: the number of classes present in the dataset
+ * dimensions: to filter the projected data for the desired dimension
+
+ >>> test_linear_discriminant_analysis()
+ """
# Check if the dimension desired is less than the number of classes
assert classes > dimensions
@@ -143,4 +195,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/dimensionality_reduction.py |
Add missing documentation to my Python functions |
from statistics import mean, stdev
def normalization(data: list, ndigits: int = 3) -> list:
# variables for calculation
x_min = min(data)
x_max = max(data)
# normalize data
return [round((x - x_min) / (x_max - x_min), ndigits) for x in data]
def standardization(data: list, ndigits: int = 3) -> list:
# variables for calculation
mu = mean(data)
sigma = stdev(data)
# standardize data
return [round((x - mu) / (sigma), ndigits) for x in data] | --- +++ @@ -1,8 +1,46 @@+"""
+Normalization.
+
+Wikipedia: https://en.wikipedia.org/wiki/Normalization
+Normalization is the process of converting numerical data to a standard range of values.
+This range is typically between [0, 1] or [-1, 1]. The equation for normalization is
+x_norm = (x - x_min)/(x_max - x_min) where x_norm is the normalized value, x is the
+value, x_min is the minimum value within the column or list of data, and x_max is the
+maximum value within the column or list of data. Normalization is used to speed up the
+training of data and put all of the data on a similar scale. This is useful because
+variance in the range of values of a dataset can heavily impact optimization
+(particularly Gradient Descent).
+
+Standardization Wikipedia: https://en.wikipedia.org/wiki/Standardization
+Standardization is the process of converting numerical data to a normally distributed
+range of values. This range will have a mean of 0 and standard deviation of 1. This is
+also known as z-score normalization. The equation for standardization is
+x_std = (x - mu)/(sigma) where mu is the mean of the column or list of values and sigma
+is the standard deviation of the column or list of values.
+
+Choosing between Normalization & Standardization is more of an art of a science, but it
+is often recommended to run experiments with both to see which performs better.
+Additionally, a few rules of thumb are:
+ 1. gaussian (normal) distributions work better with standardization
+ 2. non-gaussian (non-normal) distributions work better with normalization
+ 3. If a column or list of values has extreme values / outliers, use standardization
+"""
from statistics import mean, stdev
def normalization(data: list, ndigits: int = 3) -> list:
+ """
+ Return a normalized list of values.
+
+ @params: data, a list of values to normalize
+ @returns: a list of normalized values (rounded to ndigits decimal places)
+ @examples:
+ >>> normalization([2, 7, 10, 20, 30, 50])
+ [0.0, 0.104, 0.167, 0.375, 0.583, 1.0]
+ >>> normalization([5, 10, 15, 20, 25])
+ [0.0, 0.25, 0.5, 0.75, 1.0]
+ """
# variables for calculation
x_min = min(data)
x_max = max(data)
@@ -11,8 +49,19 @@
def standardization(data: list, ndigits: int = 3) -> list:
+ """
+ Return a standardized list of values.
+
+ @params: data, a list of values to standardize
+ @returns: a list of standardized values (rounded to ndigits decimal places)
+ @examples:
+ >>> standardization([2, 7, 10, 20, 30, 50])
+ [-0.999, -0.719, -0.551, 0.009, 0.57, 1.69]
+ >>> standardization([5, 10, 15, 20, 25])
+ [-1.265, -0.632, 0.0, 0.632, 1.265]
+ """
# variables for calculation
mu = mean(data)
sigma = stdev(data)
# standardize data
- return [round((x - mu) / (sigma), ndigits) for x in data]+ return [round((x - mu) / (sigma), ndigits) for x in data]
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/data_transformations.py |
Add docstrings to my Python code |
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import pairwise_distances
warnings.filterwarnings("ignore")
TAG = "K-MEANS-CLUST/ "
def get_initial_centroids(data, k, seed=None):
# useful for obtaining consistent results
rng = np.random.default_rng(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = rng.integers(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices, :]
return centroids
def centroid_pairwise_dist(x, centroids):
return pairwise_distances(x, centroids, metric="euclidean")
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = centroid_pairwise_dist(data, centroids)
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in range(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(
member_data_points, [centroids[i]], metric="euclidean"
)
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7, 4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel("# Iterations")
plt.ylabel("Heterogeneity")
plt.title(f"Heterogeneity of clustering over time, K={k:d}")
plt.rcParams.update({"font.size": 16})
plt.show()
def plot_kmeans(data, centroids, cluster_assignment):
ax = plt.axes(projection="3d")
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=cluster_assignment, cmap="viridis")
ax.scatter(
centroids[:, 0], centroids[:, 1], centroids[:, 2], c="red", s=100, marker="x"
)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_title("3D K-Means Clustering Visualization")
plt.show()
def kmeans(
data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False
):
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in range(maxiter):
if verbose:
print(itr, end="")
# 1. Make cluster assignments using nearest centroids
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data
# points assigned to that cluster.
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if (
prev_cluster_assignment is not None
and (prev_cluster_assignment == cluster_assignment).all()
):
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment != cluster_assignment)
if verbose:
print(
f" {num_changed:5d} elements changed their cluster assignment."
)
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
# Mock test below
if False: # change to true to run this test case.
from sklearn import datasets as ds
dataset = ds.load_iris()
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(dataset["data"], k, seed=0)
centroids, cluster_assignment = kmeans(
dataset["data"],
k,
initial_centroids,
maxiter=400,
record_heterogeneity=heterogeneity,
verbose=True,
)
plot_heterogeneity(heterogeneity, k)
plot_kmeans(dataset["data"], centroids, cluster_assignment)
def report_generator(
predicted: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None
) -> pd.DataFrame:
# Fill missing values with given rules
if fill_missing_report:
predicted = predicted.fillna(value=fill_missing_report)
predicted["dummy"] = 1
numeric_cols = predicted.select_dtypes(np.number).columns
report = (
predicted.groupby(["Cluster"])[ # construct report dataframe
numeric_cols
] # group by cluster number
.agg(
[
("sum", "sum"),
("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))),
("mean_without_zeros", lambda x: x.replace(0, np.nan).mean()),
(
"mean_25-75",
lambda x: np.mean(
np.nan_to_num(
sorted(x)[
round(len(x) * 25 / 100) : round(len(x) * 75 / 100)
]
)
),
),
("mean_with_na", "mean"),
("min", lambda x: x.min()),
("5%", lambda x: x.quantile(0.05)),
("25%", lambda x: x.quantile(0.25)),
("50%", lambda x: x.quantile(0.50)),
("75%", lambda x: x.quantile(0.75)),
("95%", lambda x: x.quantile(0.95)),
("max", lambda x: x.max()),
("count", lambda x: x.count()),
("stdev", lambda x: x.std()),
("mode", lambda x: x.mode()[0]),
("median", lambda x: x.median()),
("# > 0", lambda x: (x > 0).sum()),
]
)
.T.reset_index()
.rename(index=str, columns={"level_0": "Features", "level_1": "Type"})
) # rename columns
# calculate the size of cluster(count of clientID's)
# avoid SettingWithCopyWarning
clustersize = report[
(report["Features"] == "dummy") & (report["Type"] == "count")
].copy()
# rename created predicted cluster to match report column names
clustersize.Type = "ClusterSize"
clustersize.Features = "# of Customers"
# calculating the proportion of cluster
clusterproportion = pd.DataFrame(
clustersize.iloc[:, 2:].to_numpy() / clustersize.iloc[:, 2:].to_numpy().sum()
)
# rename created predicted cluster to match report column names
clusterproportion["Type"] = "% of Customers"
clusterproportion["Features"] = "ClusterProportion"
cols = clusterproportion.columns.tolist()
cols = cols[-2:] + cols[:-2]
clusterproportion = clusterproportion[cols] # rearrange columns to match report
clusterproportion.columns = report.columns
# generating dataframe with count of nan values
a = pd.DataFrame(
abs(
report[report["Type"] == "count"].iloc[:, 2:].to_numpy()
- clustersize.iloc[:, 2:].to_numpy()
)
)
a["Features"] = 0
a["Type"] = "# of nan"
# filling values in order to match report
a.Features = report[report["Type"] == "count"].Features.tolist()
cols = a.columns.tolist()
cols = cols[-2:] + cols[:-2]
a = a[cols] # rearrange columns to match report
a.columns = report.columns # rename columns to match report
# drop count values except for cluster size
report = report.drop(report[report.Type == "count"].index)
# concat report with cluster size and nan values
report = pd.concat([report, a, clustersize, clusterproportion], axis=0)
report["Mark"] = report["Features"].isin(clustering_variables)
cols = report.columns.tolist()
cols = cols[0:2] + cols[-1:] + cols[2:-1]
report = report[cols]
sorter1 = {
"ClusterSize": 9,
"ClusterProportion": 8,
"mean_with_zeros": 7,
"mean_with_na": 6,
"max": 5,
"50%": 4,
"min": 3,
"25%": 2,
"75%": 1,
"# of nan": 0,
"# > 0": -1,
"sum_with_na": -2,
}
report = (
report.assign(
Sorter1=lambda x: x.Type.map(sorter1),
Sorter2=lambda x: list(reversed(range(len(x)))),
)
.sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False)
.drop(["Sorter1", "Sorter2"], axis=1)
)
report.columns.name = ""
report = report.reset_index()
report = report.drop(columns=["index"])
return report
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,51 @@+"""README, Author - Anurag Kumar(mailto:anuragkumarak95@gmail.com)
+Requirements:
+ - sklearn
+ - numpy
+ - matplotlib
+Python:
+ - 3.5
+Inputs:
+ - X , a 2D numpy array of features.
+ - k , number of clusters to create.
+ - initial_centroids , initial centroid values generated by utility function(mentioned
+ in usage).
+ - maxiter , maximum number of iterations to process.
+ - heterogeneity , empty list that will be filled with heterogeneity values if passed
+ to kmeans func.
+Usage:
+ 1. define 'k' value, 'X' features array and 'heterogeneity' empty list
+ 2. create initial_centroids,
+ initial_centroids = get_initial_centroids(
+ X,
+ k,
+ seed=0 # seed value for initial centroid generation,
+ # None for randomness(default=None)
+ )
+ 3. find centroids and clusters using kmeans function.
+ centroids, cluster_assignment = kmeans(
+ X,
+ k,
+ initial_centroids,
+ maxiter=400,
+ record_heterogeneity=heterogeneity,
+ verbose=True # whether to print logs in console or not.(default=False)
+ )
+ 4. Plot the loss function and heterogeneity values for every iteration saved in
+ heterogeneity list.
+ plot_heterogeneity(
+ heterogeneity,
+ k
+ )
+ 5. Plot the labeled 3D data points with centroids.
+ plot_kmeans(
+ X,
+ centroids,
+ cluster_assignment
+ )
+ 6. Transfers Dataframe into excel format it must have feature called
+ 'Clust' with k means clustering numbers in it.
+"""
import warnings
@@ -12,6 +60,7 @@
def get_initial_centroids(data, k, seed=None):
+ """Randomly choose k data points as initial centroids"""
# useful for obtaining consistent results
rng = np.random.default_rng(seed)
n = data.shape[0] # number of data points
@@ -99,6 +148,13 @@ def kmeans(
data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False
):
+ """Runs k-means on given data and initial set of centroids.
+ maxiter: maximum number of iterations to run.(default=500)
+ record_heterogeneity: (optional) a list, to store the history of heterogeneity
+ as function of iterations
+ if None, do not store the history.
+ verbose: if True, print how many data points changed their cluster labels in
+ each iteration"""
centroids = initial_centroids[:]
prev_cluster_assignment = None
@@ -162,6 +218,33 @@ def report_generator(
predicted: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None
) -> pd.DataFrame:
+ """
+ Generate a clustering report given these two arguments:
+ predicted - dataframe with predicted cluster column
+ fill_missing_report - dictionary of rules on how we are going to fill in missing
+ values for final generated report (not included in modelling);
+ >>> predicted = pd.DataFrame()
+ >>> predicted['numbers'] = [1, 2, 3]
+ >>> predicted['col1'] = [0.5, 2.5, 4.5]
+ >>> predicted['col2'] = [100, 200, 300]
+ >>> predicted['col3'] = [10, 20, 30]
+ >>> predicted['Cluster'] = [1, 1, 2]
+ >>> report_generator(predicted, ['col1', 'col2'], 0)
+ Features Type Mark 1 2
+ 0 # of Customers ClusterSize False 2.000000 1.000000
+ 1 % of Customers ClusterProportion False 0.666667 0.333333
+ 2 col1 mean_with_zeros True 1.500000 4.500000
+ 3 col2 mean_with_zeros True 150.000000 300.000000
+ 4 numbers mean_with_zeros False 1.500000 3.000000
+ .. ... ... ... ... ...
+ 99 dummy 5% False 1.000000 1.000000
+ 100 dummy 95% False 1.000000 1.000000
+ 101 dummy stdev False 0.000000 NaN
+ 102 dummy mode False 1.000000 1.000000
+ 103 dummy median False 1.000000 1.000000
+ <BLANKLINE>
+ [104 rows x 5 columns]
+ """
# Fill missing values with given rules
if fill_missing_report:
predicted = predicted.fillna(value=fill_missing_report)
@@ -277,4 +360,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/k_means_clust.py |
Create documentation for each function signature |
import math
def prime_factors(n: int) -> list:
if n <= 0:
raise ValueError("Only positive integers have prime factors")
pf = []
while n % 2 == 0:
pf.append(2)
n = int(n / 2)
for i in range(3, int(math.sqrt(n)) + 1, 2):
while n % i == 0:
pf.append(i)
n = int(n / i)
if n > 2:
pf.append(n)
return pf
def number_of_divisors(n: int) -> int:
if n <= 0:
raise ValueError("Only positive numbers are accepted")
div = 1
temp = 1
while n % 2 == 0:
temp += 1
n = int(n / 2)
div *= temp
for i in range(3, int(math.sqrt(n)) + 1, 2):
temp = 1
while n % i == 0:
temp += 1
n = int(n / i)
div *= temp
if n > 1:
div *= 2
return div
def sum_of_divisors(n: int) -> int:
if n <= 0:
raise ValueError("Only positive numbers are accepted")
s = 1
temp = 1
while n % 2 == 0:
temp += 1
n = int(n / 2)
if temp > 1:
s *= (2**temp - 1) / (2 - 1)
for i in range(3, int(math.sqrt(n)) + 1, 2):
temp = 1
while n % i == 0:
temp += 1
n = int(n / i)
if temp > 1:
s *= (i**temp - 1) / (i - 1)
return int(s)
def euler_phi(n: int) -> int:
if n <= 0:
raise ValueError("Only positive numbers are accepted")
s = n
for x in set(prime_factors(n)):
s *= (x - 1) / x
return int(s)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,73 +1,122 @@-
-import math
-
-
-def prime_factors(n: int) -> list:
- if n <= 0:
- raise ValueError("Only positive integers have prime factors")
- pf = []
- while n % 2 == 0:
- pf.append(2)
- n = int(n / 2)
- for i in range(3, int(math.sqrt(n)) + 1, 2):
- while n % i == 0:
- pf.append(i)
- n = int(n / i)
- if n > 2:
- pf.append(n)
- return pf
-
-
-def number_of_divisors(n: int) -> int:
- if n <= 0:
- raise ValueError("Only positive numbers are accepted")
- div = 1
- temp = 1
- while n % 2 == 0:
- temp += 1
- n = int(n / 2)
- div *= temp
- for i in range(3, int(math.sqrt(n)) + 1, 2):
- temp = 1
- while n % i == 0:
- temp += 1
- n = int(n / i)
- div *= temp
- if n > 1:
- div *= 2
- return div
-
-
-def sum_of_divisors(n: int) -> int:
- if n <= 0:
- raise ValueError("Only positive numbers are accepted")
- s = 1
- temp = 1
- while n % 2 == 0:
- temp += 1
- n = int(n / 2)
- if temp > 1:
- s *= (2**temp - 1) / (2 - 1)
- for i in range(3, int(math.sqrt(n)) + 1, 2):
- temp = 1
- while n % i == 0:
- temp += 1
- n = int(n / i)
- if temp > 1:
- s *= (i**temp - 1) / (i - 1)
- return int(s)
-
-
-def euler_phi(n: int) -> int:
- if n <= 0:
- raise ValueError("Only positive numbers are accepted")
- s = n
- for x in set(prime_factors(n)):
- s *= (x - 1) / x
- return int(s)
-
-
-if __name__ == "__main__":
- import doctest
-
- doctest.testmod()+"""Implementation of Basic Math in Python."""
+
+import math
+
+
+def prime_factors(n: int) -> list:
+ """Find Prime Factors.
+ >>> prime_factors(100)
+ [2, 2, 5, 5]
+ >>> prime_factors(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive integers have prime factors
+ >>> prime_factors(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive integers have prime factors
+ """
+ if n <= 0:
+ raise ValueError("Only positive integers have prime factors")
+ pf = []
+ while n % 2 == 0:
+ pf.append(2)
+ n = int(n / 2)
+ for i in range(3, int(math.sqrt(n)) + 1, 2):
+ while n % i == 0:
+ pf.append(i)
+ n = int(n / i)
+ if n > 2:
+ pf.append(n)
+ return pf
+
+
+def number_of_divisors(n: int) -> int:
+ """Calculate Number of Divisors of an Integer.
+ >>> number_of_divisors(100)
+ 9
+ >>> number_of_divisors(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive numbers are accepted
+ >>> number_of_divisors(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive numbers are accepted
+ """
+ if n <= 0:
+ raise ValueError("Only positive numbers are accepted")
+ div = 1
+ temp = 1
+ while n % 2 == 0:
+ temp += 1
+ n = int(n / 2)
+ div *= temp
+ for i in range(3, int(math.sqrt(n)) + 1, 2):
+ temp = 1
+ while n % i == 0:
+ temp += 1
+ n = int(n / i)
+ div *= temp
+ if n > 1:
+ div *= 2
+ return div
+
+
+def sum_of_divisors(n: int) -> int:
+ """Calculate Sum of Divisors.
+ >>> sum_of_divisors(100)
+ 217
+ >>> sum_of_divisors(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive numbers are accepted
+ >>> sum_of_divisors(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive numbers are accepted
+ """
+ if n <= 0:
+ raise ValueError("Only positive numbers are accepted")
+ s = 1
+ temp = 1
+ while n % 2 == 0:
+ temp += 1
+ n = int(n / 2)
+ if temp > 1:
+ s *= (2**temp - 1) / (2 - 1)
+ for i in range(3, int(math.sqrt(n)) + 1, 2):
+ temp = 1
+ while n % i == 0:
+ temp += 1
+ n = int(n / i)
+ if temp > 1:
+ s *= (i**temp - 1) / (i - 1)
+ return int(s)
+
+
+def euler_phi(n: int) -> int:
+ """Calculate Euler's Phi Function.
+ >>> euler_phi(100)
+ 40
+ >>> euler_phi(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive numbers are accepted
+ >>> euler_phi(-10)
+ Traceback (most recent call last):
+ ...
+ ValueError: Only positive numbers are accepted
+ """
+ if n <= 0:
+ raise ValueError("Only positive numbers are accepted")
+ s = n
+ for x in set(prime_factors(n)):
+ s *= (x - 1) / x
+ return int(s)
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/maths/basic_maths.py |
Add documentation for all methods |
from warnings import simplefilter
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def linear_regression_prediction(
train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list
) -> float:
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
y = np.array(train_usr)
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
# Suppress the User Warning raised by SARIMAX due to insufficient observations
simplefilter("ignore", UserWarning)
order = (1, 2, 1)
seasonal_order = (1, 1, 1, 7)
model = SARIMAX(
train_user, exog=train_match, order=order, seasonal_order=seasonal_order
)
model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match])
return float(result[0])
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
return float(y_pred[0])
def interquartile_range_checker(train_user: list) -> float:
train_user.sort()
q1 = np.percentile(train_user, 25)
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return float(low_lim)
def data_safety_checker(list_vote: list, actual_result: float) -> bool:
safe = 0
not_safe = 0
if not isinstance(actual_result, float):
raise TypeError("Actual result should be float. Value passed is a list")
for i in list_vote:
if i > actual_result:
safe = not_safe + 1
elif abs(abs(i) - abs(actual_result)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
"""
data column = total user in a day, how much online event held in one day,
what day is that(sunday-saturday)
"""
data_input_df = pd.read_csv("ex_data.csv")
# start normalization
normalize_df = Normalizer().fit_transform(data_input_df.values)
# split data
total_date = normalize_df[:, 2].tolist()
total_user = normalize_df[:, 0].tolist()
total_match = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
x = normalize_df[:, [1, 2]].tolist()
x_train = x[: len(x) - 1]
x_test = x[len(x) - 1 :]
# for linear regression & sarimax
train_date = total_date[: len(total_date) - 1]
train_user = total_user[: len(total_user) - 1]
train_match = total_match[: len(total_match) - 1]
test_date = total_date[len(total_date) - 1 :]
test_user = total_user[len(total_user) - 1 :]
test_match = total_match[len(total_match) - 1 :]
# voting system with forecasting
res_vote = [
linear_regression_prediction(
train_date, train_user, train_match, test_date, test_match
),
sarimax_predictor(train_user, train_match, test_match),
support_vector_regressor(x_train, x_test, train_user),
]
# check the safety of today's data
not_str = "" if data_safety_checker(res_vote, test_user[0]) else "not "
print(f"Today's data is {not_str}safe.") | --- +++ @@ -1,3 +1,15 @@+"""
+this is code for forecasting
+but I modified it and used it for safety checker of data
+for ex: you have an online shop and for some reason some data are
+missing (the amount of data that u expected are not supposed to be)
+ then we can use it
+*ps : 1. ofc we can use normal statistic method but in this case
+ the data is quite absurd and only a little^^
+ 2. ofc u can use this and modified it for forecasting purpose
+ for the next 3 months sales or something,
+ u can just adjust it for ur own purpose
+"""
from warnings import simplefilter
@@ -11,6 +23,14 @@ def linear_regression_prediction(
train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list
) -> float:
+ """
+ First method: linear regression
+ input : training data (date, total_user, total_event) in list of float
+ output : list of total user prediction in float
+ >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
+ >>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors
+ True
+ """
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
y = np.array(train_usr)
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
@@ -18,6 +38,15 @@
def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
+ """
+ second method: Sarimax
+ sarimax is a statistic method which using previous input
+ and learn its pattern to predict future data
+ input : training data (total_user, with exog data = total_event) in list of float
+ output : list of total user prediction in float
+ >>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2])
+ 6.6666671111109626
+ """
# Suppress the User Warning raised by SARIMAX due to insufficient observations
simplefilter("ignore", UserWarning)
order = (1, 2, 1)
@@ -31,6 +60,18 @@
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
+ """
+ Third method: Support vector regressor
+ svr is quite the same with svm(support vector machine)
+ it uses the same principles as the SVM for classification,
+ with only a few minor differences and the only different is that
+ it suits better for regression purpose
+ input : training data (date, total_user, total_event) in list of float
+ where x = list of set (date and total event)
+ output : list of total user prediction in float
+ >>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4])
+ 1.634932078116079
+ """
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
@@ -38,6 +79,14 @@
def interquartile_range_checker(train_user: list) -> float:
+ """
+ Optional method: interquatile range
+ input : list of total user in float
+ output : low limit of input in float
+ this method can be used to check whether some data is outlier or not
+ >>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10])
+ 2.8
+ """
train_user.sort()
q1 = np.percentile(train_user, 25)
q3 = np.percentile(train_user, 75)
@@ -47,6 +96,14 @@
def data_safety_checker(list_vote: list, actual_result: float) -> bool:
+ """
+ Used to review all the votes (list result prediction)
+ and compare it to the actual result.
+ input : list of predictions
+ output : print whether it's safe or not
+ >>> data_safety_checker([2, 3, 4], 5.0)
+ False
+ """
safe = 0
not_safe = 0
@@ -102,4 +159,4 @@
# check the safety of today's data
not_str = "" if data_safety_checker(res_vote, test_user[0]) else "not "
- print(f"Today's data is {not_str}safe.")+ print(f"Today's data is {not_str}safe.")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/forecasting/run.py |
Document all endpoints with docstrings |
from __future__ import annotations
from dataclasses import dataclass, field
@dataclass
class TreeNode:
name: str
count: int
parent: TreeNode | None = None
children: dict[str, TreeNode] = field(default_factory=dict)
node_link: TreeNode | None = None
def __repr__(self) -> str:
return f"TreeNode({self.name!r}, {self.count!r}, {self.parent!r})"
def inc(self, num_occur: int) -> None:
self.count += num_occur
def disp(self, ind: int = 1) -> None:
print(f"{' ' * ind} {self.name} {self.count}")
for child in self.children.values():
child.disp(ind + 1)
def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]:
header_table: dict = {}
for trans in data_set:
for item in trans:
header_table[item] = header_table.get(item, [0, None])
header_table[item][0] += 1
for k in list(header_table):
if header_table[k][0] < min_sup:
del header_table[k]
if not (freq_item_set := set(header_table)):
return TreeNode("Null Set", 1, None), {}
for key, value in header_table.items():
header_table[key] = [value, None]
fp_tree = TreeNode("Null Set", 1, None) # Parent is None for the root node
for tran_set in data_set:
local_d = {
item: header_table[item][0] for item in tran_set if item in freq_item_set
}
if local_d:
sorted_items = sorted(
local_d.items(), key=lambda item_info: item_info[1], reverse=True
)
ordered_items = [item[0] for item in sorted_items]
update_tree(ordered_items, fp_tree, header_table, 1)
return fp_tree, header_table
def update_tree(items: list, in_tree: TreeNode, header_table: dict, count: int) -> None:
if items[0] in in_tree.children:
in_tree.children[items[0]].inc(count)
else:
in_tree.children[items[0]] = TreeNode(items[0], count, in_tree)
if header_table[items[0]][1] is None:
header_table[items[0]][1] = in_tree.children[items[0]]
else:
update_header(header_table[items[0]][1], in_tree.children[items[0]])
if len(items) > 1:
update_tree(items[1:], in_tree.children[items[0]], header_table, count)
def update_header(node_to_test: TreeNode, target_node: TreeNode) -> TreeNode:
while node_to_test.node_link is not None:
node_to_test = node_to_test.node_link
if node_to_test.node_link is None:
node_to_test.node_link = target_node
# Return the updated node
return node_to_test
def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None:
if leaf_node.parent is not None:
prefix_path.append(leaf_node.name)
ascend_tree(leaf_node.parent, prefix_path)
def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001
cond_pats: dict = {}
while tree_node is not None:
prefix_path: list = []
ascend_tree(tree_node, prefix_path)
if len(prefix_path) > 1:
cond_pats[frozenset(prefix_path[1:])] = tree_node.count
tree_node = tree_node.node_link
return cond_pats
def mine_tree(
in_tree: TreeNode, # noqa: ARG001
header_table: dict,
min_sup: int,
pre_fix: set,
freq_item_list: list,
) -> None:
sorted_items = sorted(header_table.items(), key=lambda item_info: item_info[1][0])
big_l = [item[0] for item in sorted_items]
for base_pat in big_l:
new_freq_set = pre_fix.copy()
new_freq_set.add(base_pat)
freq_item_list.append(new_freq_set)
cond_patt_bases = find_prefix_path(base_pat, header_table[base_pat][1])
my_cond_tree, my_head = create_tree(list(cond_patt_bases), min_sup)
if my_head is not None:
# Pass header_table[base_pat][1] as node_to_test to update_header
header_table[base_pat][1] = update_header(
header_table[base_pat][1], my_cond_tree
)
mine_tree(my_cond_tree, my_head, min_sup, new_freq_set, freq_item_list)
if __name__ == "__main__":
from doctest import testmod
testmod()
data_set: list[frozenset] = [
frozenset(["bread", "milk", "cheese"]),
frozenset(["bread", "milk"]),
frozenset(["bread", "diapers"]),
frozenset(["bread", "milk", "diapers"]),
frozenset(["milk", "diapers"]),
frozenset(["milk", "cheese"]),
frozenset(["diapers", "cheese"]),
frozenset(["bread", "milk", "cheese", "diapers"]),
]
print(f"{len(data_set) = }")
fp_tree, header_table = create_tree(data_set, min_sup=3)
print(f"{fp_tree = }")
print(f"{len(header_table) = }")
freq_items: list = []
mine_tree(fp_tree, header_table, 3, set(), freq_items)
print(f"{freq_items = }") | --- +++ @@ -1,3 +1,14 @@+"""
+The Frequent Pattern Growth algorithm (FP-Growth) is a widely used data mining
+technique for discovering frequent itemsets in large transaction databases.
+
+It overcomes some of the limitations of traditional methods such as Apriori by
+efficiently constructing the FP-Tree
+
+WIKI: https://athena.ecs.csus.edu/~mei/associationcw/FpGrowth.html
+
+Examples: https://www.javatpoint.com/fp-growth-algorithm-in-data-mining
+"""
from __future__ import annotations
@@ -6,6 +17,22 @@
@dataclass
class TreeNode:
+ """
+ A node in a Frequent Pattern tree.
+
+ Args:
+ name: The name of this node.
+ num_occur: The number of occurrences of the node.
+ parent_node: The parent node.
+
+ Example:
+ >>> parent = TreeNode("Parent", 1, None)
+ >>> child = TreeNode("Child", 2, parent)
+ >>> child.name
+ 'Child'
+ >>> child.count
+ 2
+ """
name: str
count: int
@@ -26,6 +53,47 @@
def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]:
+ """
+ Create Frequent Pattern tree
+
+ Args:
+ data_set: A list of transactions, where each transaction is a list of items.
+ min_sup: The minimum support threshold.
+ Items with support less than this will be pruned. Default is 1.
+
+ Returns:
+ The root of the FP-Tree.
+ header_table: The header table dictionary with item information.
+
+ Example:
+ >>> data_set = [
+ ... ['A', 'B', 'C'],
+ ... ['A', 'C'],
+ ... ['A', 'B', 'E'],
+ ... ['A', 'B', 'C', 'E'],
+ ... ['B', 'E']
+ ... ]
+ >>> min_sup = 2
+ >>> fp_tree, header_table = create_tree(data_set, min_sup)
+ >>> fp_tree
+ TreeNode('Null Set', 1, None)
+ >>> len(header_table)
+ 4
+ >>> header_table["A"]
+ [[4, None], TreeNode('A', 4, TreeNode('Null Set', 1, None))]
+ >>> header_table["E"][1] # doctest: +NORMALIZE_WHITESPACE
+ TreeNode('E', 1, TreeNode('B', 3, TreeNode('A', 4, TreeNode('Null Set', 1, None))))
+ >>> sorted(header_table)
+ ['A', 'B', 'C', 'E']
+ >>> fp_tree.name
+ 'Null Set'
+ >>> sorted(fp_tree.children)
+ ['A', 'B']
+ >>> fp_tree.children['A'].name
+ 'A'
+ >>> sorted(fp_tree.children['A'].children)
+ ['B', 'C']
+ """
header_table: dict = {}
for trans in data_set:
for item in trans:
@@ -58,6 +126,38 @@
def update_tree(items: list, in_tree: TreeNode, header_table: dict, count: int) -> None:
+ """
+ Update the FP-Tree with a transaction.
+
+ Args:
+ items: List of items in the transaction.
+ in_tree: The current node in the FP-Tree.
+ header_table: The header table dictionary with item information.
+ count: The count of the transaction.
+
+ Example:
+ >>> data_set = [
+ ... ['A', 'B', 'C'],
+ ... ['A', 'C'],
+ ... ['A', 'B', 'E'],
+ ... ['A', 'B', 'C', 'E'],
+ ... ['B', 'E']
+ ... ]
+ >>> min_sup = 2
+ >>> fp_tree, header_table = create_tree(data_set, min_sup)
+ >>> fp_tree
+ TreeNode('Null Set', 1, None)
+ >>> transaction = ['A', 'B', 'E']
+ >>> update_tree(transaction, fp_tree, header_table, 1)
+ >>> fp_tree
+ TreeNode('Null Set', 1, None)
+ >>> fp_tree.children['A'].children['B'].children['E'].children
+ {}
+ >>> fp_tree.children['A'].children['B'].children['E'].count
+ 2
+ >>> header_table['E'][1].name
+ 'E'
+ """
if items[0] in in_tree.children:
in_tree.children[items[0]].inc(count)
else:
@@ -71,6 +171,37 @@
def update_header(node_to_test: TreeNode, target_node: TreeNode) -> TreeNode:
+ """
+ Update the header table with a node link.
+
+ Args:
+ node_to_test: The node to be updated in the header table.
+ target_node: The node to link to.
+
+ Example:
+ >>> data_set = [
+ ... ['A', 'B', 'C'],
+ ... ['A', 'C'],
+ ... ['A', 'B', 'E'],
+ ... ['A', 'B', 'C', 'E'],
+ ... ['B', 'E']
+ ... ]
+ >>> min_sup = 2
+ >>> fp_tree, header_table = create_tree(data_set, min_sup)
+ >>> fp_tree
+ TreeNode('Null Set', 1, None)
+ >>> node1 = TreeNode("A", 3, None)
+ >>> node2 = TreeNode("B", 4, None)
+ >>> node1
+ TreeNode('A', 3, None)
+ >>> node1 = update_header(node1, node2)
+ >>> node1
+ TreeNode('A', 3, None)
+ >>> node1.node_link
+ TreeNode('B', 4, None)
+ >>> node2.node_link is None
+ True
+ """
while node_to_test.node_link is not None:
node_to_test = node_to_test.node_link
if node_to_test.node_link is None:
@@ -80,12 +211,61 @@
def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None:
+ """
+ Ascend the FP-Tree from a leaf node to its root, adding item names to the prefix
+ path.
+
+ Args:
+ leaf_node: The leaf node to start ascending from.
+ prefix_path: A list to store the item as they are ascended.
+
+ Example:
+ >>> data_set = [
+ ... ['A', 'B', 'C'],
+ ... ['A', 'C'],
+ ... ['A', 'B', 'E'],
+ ... ['A', 'B', 'C', 'E'],
+ ... ['B', 'E']
+ ... ]
+ >>> min_sup = 2
+ >>> fp_tree, header_table = create_tree(data_set, min_sup)
+
+ >>> path = []
+ >>> ascend_tree(fp_tree.children['A'], path)
+ >>> path # ascending from a leaf node 'A'
+ ['A']
+ """
if leaf_node.parent is not None:
prefix_path.append(leaf_node.name)
ascend_tree(leaf_node.parent, prefix_path)
def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: # noqa: ARG001
+ """
+ Find the conditional pattern base for a given base pattern.
+
+ Args:
+ base_pat: The base pattern for which to find the conditional pattern base.
+ tree_node: The node in the FP-Tree.
+
+ Example:
+ >>> data_set = [
+ ... ['A', 'B', 'C'],
+ ... ['A', 'C'],
+ ... ['A', 'B', 'E'],
+ ... ['A', 'B', 'C', 'E'],
+ ... ['B', 'E']
+ ... ]
+ >>> min_sup = 2
+ >>> fp_tree, header_table = create_tree(data_set, min_sup)
+ >>> fp_tree
+ TreeNode('Null Set', 1, None)
+ >>> len(header_table)
+ 4
+ >>> base_pattern = frozenset(['A'])
+ >>> sorted(find_prefix_path(base_pattern, fp_tree.children['A']))
+ []
+ """
cond_pats: dict = {}
while tree_node is not None:
prefix_path: list = []
@@ -103,6 +283,34 @@ pre_fix: set,
freq_item_list: list,
) -> None:
+ """
+ Mine the FP-Tree recursively to discover frequent itemsets.
+
+ Args:
+ in_tree: The FP-Tree to mine.
+ header_table: The header table dictionary with item information.
+ min_sup: The minimum support threshold.
+ pre_fix: A set of items as a prefix for the itemsets being mined.
+ freq_item_list: A list to store the frequent itemsets.
+
+ Example:
+ >>> data_set = [
+ ... ['A', 'B', 'C'],
+ ... ['A', 'C'],
+ ... ['A', 'B', 'E'],
+ ... ['A', 'B', 'C', 'E'],
+ ... ['B', 'E']
+ ... ]
+ >>> min_sup = 2
+ >>> fp_tree, header_table = create_tree(data_set, min_sup)
+ >>> fp_tree
+ TreeNode('Null Set', 1, None)
+ >>> frequent_itemsets = []
+ >>> mine_tree(fp_tree, header_table, min_sup, set([]), frequent_itemsets)
+ >>> expe_itm = [{'C'}, {'C', 'A'}, {'E'}, {'A', 'E'}, {'E', 'B'}, {'A'}, {'B'}]
+ >>> all(expected in frequent_itemsets for expected in expe_itm)
+ True
+ """
sorted_items = sorted(header_table.items(), key=lambda item_info: item_info[1][0])
big_l = [item[0] for item in sorted_items]
for base_pat in big_l:
@@ -139,4 +347,4 @@ print(f"{len(header_table) = }")
freq_items: list = []
mine_tree(fp_tree, header_table, 3, set(), freq_items)
- print(f"{freq_items = }")+ print(f"{freq_items = }")
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/frequent_pattern_growth.py |
Annotate my code with docstrings |
from collections import Counter
from heapq import nsmallest
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
class KNN:
def __init__(
self,
train_data: np.ndarray[float],
train_target: np.ndarray[int],
class_labels: list[str],
) -> None:
self.data = zip(train_data, train_target)
self.labels = class_labels
@staticmethod
def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float:
return float(np.linalg.norm(a - b))
def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
# Distances of all points from the point to be classified
distances = (
(self._euclidean_distance(data_point[0], pred_point), data_point[1])
for data_point in self.data
)
# Choosing k points with the shortest distances
votes = (i[1] for i in nsmallest(k, distances))
# Most commonly occurring class is the one into which the point is classified
result = Counter(votes).most_common(1)[0][0]
return self.labels[result]
if __name__ == "__main__":
import doctest
doctest.testmod()
iris = datasets.load_iris()
X = np.array(iris["data"])
y = np.array(iris["target"])
iris_classes = iris["target_names"]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
iris_point = np.array([4.4, 3.1, 1.3, 1.4])
classifier = KNN(X_train, y_train, iris_classes)
print(classifier.classify(iris_point, k=3)) | --- +++ @@ -1,3 +1,16 @@+"""
+k-Nearest Neighbours (kNN) is a simple non-parametric supervised learning
+algorithm used for classification. Given some labelled training data, a given
+point is classified using its k nearest neighbours according to some distance
+metric. The most commonly occurring label among the neighbours becomes the label
+of the given point. In effect, the label of the given point is decided by a
+majority vote.
+
+This implementation uses the commonly used Euclidean distance metric, but other
+distance metrics can also be used.
+
+Reference: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
+"""
from collections import Counter
from heapq import nsmallest
@@ -14,14 +27,36 @@ train_target: np.ndarray[int],
class_labels: list[str],
) -> None:
+ """
+ Create a kNN classifier using the given training data and class labels
+ """
self.data = zip(train_data, train_target)
self.labels = class_labels
@staticmethod
def _euclidean_distance(a: np.ndarray[float], b: np.ndarray[float]) -> float:
+ """
+ Calculate the Euclidean distance between two points
+ >>> KNN._euclidean_distance(np.array([0, 0]), np.array([3, 4]))
+ 5.0
+ >>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11]))
+ 10.0
+ """
return float(np.linalg.norm(a - b))
def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
+ """
+ Classify a given point using the kNN algorithm
+ >>> train_X = np.array(
+ ... [[0, 0], [1, 0], [0, 1], [0.5, 0.5], [3, 3], [2, 3], [3, 2]]
+ ... )
+ >>> train_y = np.array([0, 0, 0, 0, 1, 1, 1])
+ >>> classes = ['A', 'B']
+ >>> knn = KNN(train_X, train_y, classes)
+ >>> point = np.array([1.2, 1.2])
+ >>> knn.classify(point)
+ 'A'
+ """
# Distances of all points from the point to be classified
distances = (
(self._euclidean_distance(data_point[0], pred_point), data_point[1])
@@ -50,4 +85,4 @@ X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
iris_point = np.array([4.4, 3.1, 1.3, 1.4])
classifier = KNN(X_train, y_train, iris_classes)
- print(classifier.classify(iris_point, k=3))+ print(classifier.classify(iris_point, k=3))
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/k_nearest_neighbours.py |
Create Google-style docstrings for my code |
from collections.abc import Callable
from math import log
from os import name, system
from random import gauss, seed
# Make a training dataset drawn from a gaussian distribution
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)]
# Make corresponding Y flags to detecting classes
def y_generator(class_count: int, instance_count: list) -> list:
return [k for k in range(class_count) for _ in range(instance_count[k])]
# Calculate the class means
def calculate_mean(instance_count: int, items: list) -> float:
# the sum of all items divided by number of instances
return sum(items) / instance_count
# Calculate the class probabilities
def calculate_probabilities(instance_count: int, total_count: int) -> float:
# number of instances in specific class divided by number of all instances
return instance_count / total_count
# Calculate the variance
def calculate_variance(items: list, means: list, total_count: int) -> float:
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
# for loop iterates over number of elements in inner layer of items
for j in range(len(items[i])):
# appending squared differences to 'squared_diff' list
squared_diff.append((items[i][j] - means[i]) ** 2)
# one divided by (the number of all instances - number of classes) multiplied by
# sum of all squared differences
n_classes = len(means) # Number of classes in dataset
return 1 / (total_count - n_classes) * sum(squared_diff)
# Making predictions
def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
# for loop iterates over number of elements in list
for i in range(len(x_items)):
# for loop iterates over number of inner items of each element
for j in range(len(x_items[i])):
temp = [] # to store all discriminant values of each item as a list
# for loop iterates over number of classes we have in our dataset
for k in range(len(x_items)):
# appending values of discriminants for each class to 'temp' list
temp.append(
x_items[i][j] * (means[k] / variance)
- (means[k] ** 2 / (2 * variance))
+ log(probabilities[k])
)
# appending discriminant values of each item to 'results' list
results.append(temp)
return [result.index(max(result)) for result in results]
# Calculating Accuracy
def accuracy(actual_y: list, predicted_y: list) -> float:
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of correct predictions divided by number
# of all data and multiplied by 100
return (correct / len(actual_y)) * 100
def valid_input[num](
input_type: Callable[[object], num], # Usually float or int
input_msg: str,
err_msg: str,
condition: Callable[[num], bool] = lambda _: True,
default: str | None = None,
) -> num:
while True:
try:
user_input = input_type(input(input_msg).strip() or default)
if condition(user_input):
return user_input
else:
print(f"{user_input}: {err_msg}")
continue
except ValueError:
print(
f"{user_input}: Incorrect input type, expected {input_type.__name__!r}"
)
# Main Function
def main():
while True:
print(" Linear Discriminant Analysis ".center(50, "*"))
print("*" * 50, "\n")
print("First of all we should specify the number of classes that")
print("we want to generate as training dataset")
# Trying to get number of classes
n_classes = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg="Enter the number of classes (Data Groupings): ",
err_msg="Number of classes should be positive!",
)
print("-" * 100)
# Trying to get the value of standard deviation
std_dev = valid_input(
input_type=float,
condition=lambda x: x >= 0,
input_msg=(
"Enter the value of standard deviation"
"(Default value is 1.0 for all classes): "
),
err_msg="Standard deviation should not be negative!",
default="1.0",
)
print("-" * 100)
# Trying to get number of instances in classes and theirs means to generate
# dataset
counts = [] # An empty list to store instance counts of classes in dataset
for i in range(n_classes):
user_count = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg=(f"Enter The number of instances for class_{i + 1}: "),
err_msg="Number of instances should be positive!",
)
counts.append(user_count)
print("-" * 100)
# An empty list to store values of user-entered means of classes
user_means = []
for a in range(n_classes):
user_mean = valid_input(
input_type=float,
input_msg=(f"Enter the value of mean for class_{a + 1}: "),
err_msg="This is an invalid value.",
)
user_means.append(user_mean)
print("-" * 100)
print("Standard deviation: ", std_dev)
# print out the number of instances in classes in separated line
for i, count in enumerate(counts, 1):
print(f"Number of instances in class_{i} is: {count}")
print("-" * 100)
# print out mean values of classes separated line
for i, user_mean in enumerate(user_means, 1):
print(f"Mean of class_{i} is: {user_mean}")
print("-" * 100)
# Generating training dataset drawn from gaussian distribution
x = [
gaussian_distribution(user_means[j], std_dev, counts[j])
for j in range(n_classes)
]
print("Generated Normal Distribution: \n", x)
print("-" * 100)
# Generating Ys to detecting corresponding classes
y = y_generator(n_classes, counts)
print("Generated Corresponding Ys: \n", y)
print("-" * 100)
# Calculating the value of actual mean for each class
actual_means = [calculate_mean(counts[k], x[k]) for k in range(n_classes)]
# for loop iterates over number of elements in 'actual_means' list and print
# out them in separated line
for i, actual_mean in enumerate(actual_means, 1):
print(f"Actual(Real) mean of class_{i} is: {actual_mean}")
print("-" * 100)
# Calculating the value of probabilities for each class
probabilities = [
calculate_probabilities(counts[i], sum(counts)) for i in range(n_classes)
]
# for loop iterates over number of elements in 'probabilities' list and print
# out them in separated line
for i, probability in enumerate(probabilities, 1):
print(f"Probability of class_{i} is: {probability}")
print("-" * 100)
# Calculating the values of variance for each class
variance = calculate_variance(x, actual_means, sum(counts))
print("Variance: ", variance)
print("-" * 100)
# Predicting Y values
# storing predicted Y values in 'pre_indexes' variable
pre_indexes = predict_y_values(x, actual_means, variance, probabilities)
print("-" * 100)
# Calculating Accuracy of the model
print(f"Accuracy: {accuracy(y, pre_indexes)}")
print("-" * 100)
print(" DONE ".center(100, "+"))
if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
print("\n" + "GoodBye!".center(100, "-") + "\n")
break
system("cls" if name == "nt" else "clear") # noqa: S605
if __name__ == "__main__":
main() | --- +++ @@ -1,3 +1,47 @@+"""
+Linear Discriminant Analysis
+
+
+
+Assumptions About Data :
+ 1. The input variables has a gaussian distribution.
+ 2. The variance calculated for each input variables by class grouping is the
+ same.
+ 3. The mix of classes in your training set is representative of the problem.
+
+
+Learning The Model :
+ The LDA model requires the estimation of statistics from the training data :
+ 1. Mean of each input value for each class.
+ 2. Probability of an instance belong to each class.
+ 3. Covariance for the input data for each class
+
+ Calculate the class means :
+ mean(x) = 1/n ( for i = 1 to i = n --> sum(xi))
+
+ Calculate the class probabilities :
+ P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1))
+ P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1))
+
+ Calculate the variance :
+ We can calculate the variance for dataset in two steps :
+ 1. Calculate the squared difference for each input variable from the
+ group mean.
+ 2. Calculate the mean of the squared difference.
+ ------------------------------------------------
+ Squared_Difference = (x - mean(k)) ** 2
+ Variance = (1 / (count(x) - count(classes))) *
+ (for i = 1 to i = n --> sum(Squared_Difference(xi)))
+
+Making Predictions :
+ discriminant(x) = x * (mean / variance) -
+ ((mean ** 2) / (2 * variance)) + Ln(probability)
+ ---------------------------------------------------------------------------
+ After calculating the discriminant value for each class, the class with the
+ largest discriminant value is taken as the prediction.
+
+Author: @EverLookNeverSee
+"""
from collections.abc import Callable
from math import log
@@ -7,30 +51,93 @@
# Make a training dataset drawn from a gaussian distribution
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
+ """
+ Generate gaussian distribution instances based-on given mean and standard deviation
+ :param mean: mean value of class
+ :param std_dev: value of standard deviation entered by usr or default value of it
+ :param instance_count: instance number of class
+ :return: a list containing generated values based-on given mean, std_dev and
+ instance_count
+
+ >>> gaussian_distribution(5.0, 1.0, 20) # doctest: +NORMALIZE_WHITESPACE
+ [6.288184753155463, 6.4494456086997705, 5.066335808938262, 4.235456349028368,
+ 3.9078267848958586, 5.031334516831717, 3.977896829989127, 3.56317055489747,
+ 5.199311976483754, 5.133374604658605, 5.546468300338232, 4.086029056264687,
+ 5.005005283626573, 4.935258239627312, 3.494170998739258, 5.537997178661033,
+ 5.320711100998849, 7.3891120432406865, 5.202969177309964, 4.855297691835079]
+ """
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)]
# Make corresponding Y flags to detecting classes
def y_generator(class_count: int, instance_count: list) -> list:
+ """
+ Generate y values for corresponding classes
+ :param class_count: Number of classes(data groupings) in dataset
+ :param instance_count: number of instances in class
+ :return: corresponding values for data groupings in dataset
+
+ >>> y_generator(1, [10])
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> y_generator(2, [5, 10])
+ [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> y_generator(4, [10, 5, 15, 20]) # doctest: +NORMALIZE_WHITESPACE
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
+ """
return [k for k in range(class_count) for _ in range(instance_count[k])]
# Calculate the class means
def calculate_mean(instance_count: int, items: list) -> float:
+ """
+ Calculate given class mean
+ :param instance_count: Number of instances in class
+ :param items: items that related to specific class(data grouping)
+ :return: calculated actual mean of considered class
+
+ >>> items = gaussian_distribution(5.0, 1.0, 20)
+ >>> calculate_mean(len(items), items)
+ 5.011267842911003
+ """
# the sum of all items divided by number of instances
return sum(items) / instance_count
# Calculate the class probabilities
def calculate_probabilities(instance_count: int, total_count: int) -> float:
+ """
+ Calculate the probability that a given instance will belong to which class
+ :param instance_count: number of instances in class
+ :param total_count: the number of all instances
+ :return: value of probability for considered class
+
+ >>> calculate_probabilities(20, 60)
+ 0.3333333333333333
+ >>> calculate_probabilities(30, 100)
+ 0.3
+ """
# number of instances in specific class divided by number of all instances
return instance_count / total_count
# Calculate the variance
def calculate_variance(items: list, means: list, total_count: int) -> float:
+ """
+ Calculate the variance
+ :param items: a list containing all items(gaussian distribution of all classes)
+ :param means: a list containing real mean values of each class
+ :param total_count: the number of all instances
+ :return: calculated variance for considered dataset
+
+ >>> items = gaussian_distribution(5.0, 1.0, 20)
+ >>> means = [5.011267842911003]
+ >>> total_count = 20
+ >>> calculate_variance([items], means, total_count)
+ 0.9618530973487491
+ """
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
@@ -49,6 +156,44 @@ def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
+ """This function predicts new indexes(groups for our data)
+ :param x_items: a list containing all items(gaussian distribution of all classes)
+ :param means: a list containing real mean values of each class
+ :param variance: calculated value of variance by calculate_variance function
+ :param probabilities: a list containing all probabilities of classes
+ :return: a list containing predicted Y values
+
+ >>> x_items = [[6.288184753155463, 6.4494456086997705, 5.066335808938262,
+ ... 4.235456349028368, 3.9078267848958586, 5.031334516831717,
+ ... 3.977896829989127, 3.56317055489747, 5.199311976483754,
+ ... 5.133374604658605, 5.546468300338232, 4.086029056264687,
+ ... 5.005005283626573, 4.935258239627312, 3.494170998739258,
+ ... 5.537997178661033, 5.320711100998849, 7.3891120432406865,
+ ... 5.202969177309964, 4.855297691835079], [11.288184753155463,
+ ... 11.44944560869977, 10.066335808938263, 9.235456349028368,
+ ... 8.907826784895859, 10.031334516831716, 8.977896829989128,
+ ... 8.56317055489747, 10.199311976483754, 10.133374604658606,
+ ... 10.546468300338232, 9.086029056264687, 10.005005283626572,
+ ... 9.935258239627313, 8.494170998739259, 10.537997178661033,
+ ... 10.320711100998848, 12.389112043240686, 10.202969177309964,
+ ... 9.85529769183508], [16.288184753155463, 16.449445608699772,
+ ... 15.066335808938263, 14.235456349028368, 13.907826784895859,
+ ... 15.031334516831716, 13.977896829989128, 13.56317055489747,
+ ... 15.199311976483754, 15.133374604658606, 15.546468300338232,
+ ... 14.086029056264687, 15.005005283626572, 14.935258239627313,
+ ... 13.494170998739259, 15.537997178661033, 15.320711100998848,
+ ... 17.389112043240686, 15.202969177309964, 14.85529769183508]]
+
+ >>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
+ >>> variance = 0.9618530973487494
+ >>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
+ >>> predict_y_values(x_items, means, variance,
+ ... probabilities) # doctest: +NORMALIZE_WHITESPACE
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2]
+
+ """
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
@@ -73,6 +218,28 @@
# Calculating Accuracy
def accuracy(actual_y: list, predicted_y: list) -> float:
+ """
+ Calculate the value of accuracy based-on predictions
+ :param actual_y:a list containing initial Y values generated by 'y_generator'
+ function
+ :param predicted_y: a list containing predicted Y values generated by
+ 'predict_y_values' function
+ :return: percentage of accuracy
+
+ >>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
+ ... 1, 1 ,1 ,1 ,1 ,1 ,1]
+ >>> predicted_y = [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,
+ ... 0, 0, 1, 1, 1, 0, 1, 1, 1]
+ >>> accuracy(actual_y, predicted_y)
+ 50.0
+
+ >>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
+ ... 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ >>> predicted_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
+ ... 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
+ >>> accuracy(actual_y, predicted_y)
+ 100.0
+ """
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
@@ -88,6 +255,16 @@ condition: Callable[[num], bool] = lambda _: True,
default: str | None = None,
) -> num:
+ """
+ Ask for user value and validate that it fulfill a condition.
+
+ :input_type: user input expected type of value
+ :input_msg: message to show user in the screen
+ :err_msg: message to show in the screen in case of error
+ :condition: function that represents the condition that user input is valid.
+ :default: Default value in case the user does not type anything
+ :return: user's input
+ """
while True:
try:
user_input = input_type(input(input_msg).strip() or default)
@@ -104,6 +281,7 @@
# Main Function
def main():
+ """This function starts execution phase"""
while True:
print(" Linear Discriminant Analysis ".center(50, "*"))
print("*" * 50, "\n")
@@ -222,4 +400,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/linear_discriminant_analysis.py |
Add missing documentation to my Python functions |
import math
class SelfOrganizingMap:
def get_winner(self, weights: list[list[float]], sample: list[int]) -> int:
d0 = 0.0
d1 = 0.0
for i in range(len(sample)):
d0 += math.pow((sample[i] - weights[0][i]), 2)
d1 += math.pow((sample[i] - weights[1][i]), 2)
return 0 if d0 > d1 else 1
return 0
def update(
self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float
) -> list[list[int | float]]:
for i in range(len(weights)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
# Driver code
def main() -> None:
# Training Examples ( m, n )
training_samples = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
weights = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
self_organizing_map = SelfOrganizingMap()
epochs = 3
alpha = 0.5
for _ in range(epochs):
for j in range(len(training_samples)):
# training sample
sample = training_samples[j]
# Compute the winning vector
winner = self_organizing_map.get_winner(weights, sample)
# Update the winning vector
weights = self_organizing_map.update(weights, sample, winner, alpha)
# classify test sample
sample = [0, 0, 0, 1]
winner = self_organizing_map.get_winner(weights, sample)
# results
print(f"Clusters that the test sample belongs to : {winner}")
print(f"Weights that have been trained : {weights}")
# running the main() function
if __name__ == "__main__":
main() | --- +++ @@ -1,9 +1,18 @@+"""
+https://en.wikipedia.org/wiki/Self-organizing_map
+"""
import math
class SelfOrganizingMap:
def get_winner(self, weights: list[list[float]], sample: list[int]) -> int:
+ """
+ Compute the winning vector by Euclidean distance
+
+ >>> SelfOrganizingMap().get_winner([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
+ 1
+ """
d0 = 0.0
d1 = 0.0
for i in range(len(sample)):
@@ -15,6 +24,12 @@ def update(
self, weights: list[list[int | float]], sample: list[int], j: int, alpha: float
) -> list[list[int | float]]:
+ """
+ Update the winning vector.
+
+ >>> SelfOrganizingMap().update([[1, 2, 3], [4, 5, 6]], [1, 2, 3], 1, 0.1)
+ [[1, 2, 3], [3.7, 4.7, 6]]
+ """
for i in range(len(weights)):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
@@ -55,4 +70,4 @@
# running the main() function
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/self_organizing_map.py |
Write Python docstrings for this snippet | # XGBoost Regressor Example
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def data_handling(data: dict) -> tuple:
# Split dataset into features and target. Data is features.
return (data["data"], data["target"])
def xgboost(
features: np.ndarray, target: np.ndarray, test_features: np.ndarray
) -> np.ndarray:
xgb = XGBRegressor(
verbosity=0, random_state=42, tree_method="exact", base_score=0.5
)
xgb.fit(features, target)
# Predict target for test data
predictions = xgb.predict(test_features)
predictions = predictions.reshape(len(predictions), 1)
return predictions
def main() -> None:
# Load California house price dataset
california = fetch_california_housing()
data, target = data_handling(california)
x_train, x_test, y_train, y_test = train_test_split(
data, target, test_size=0.25, random_state=1
)
predictions = xgboost(x_train, y_train, x_test)
# Error printing
print(f"Mean Absolute Error: {mean_absolute_error(y_test, predictions)}")
print(f"Mean Square Error: {mean_squared_error(y_test, predictions)}")
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | --- +++ @@ -8,12 +8,25 @@
def data_handling(data: dict) -> tuple:
# Split dataset into features and target. Data is features.
+ """
+ >>> data_handling((
+ ... {'data':'[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]'
+ ... ,'target':([4.526])}))
+ ('[ 8.3252 41. 6.9841269 1.02380952 322. 2.55555556 37.88 -122.23 ]', [4.526])
+ """
return (data["data"], data["target"])
def xgboost(
features: np.ndarray, target: np.ndarray, test_features: np.ndarray
) -> np.ndarray:
+ """
+ >>> xgboost(np.array([[ 2.3571 , 52. , 6.00813008, 1.06775068,
+ ... 907. , 2.45799458, 40.58 , -124.26]]),np.array([1.114]),
+ ... np.array([[1.97840000e+00, 3.70000000e+01, 4.98858447e+00, 1.03881279e+00,
+ ... 1.14300000e+03, 2.60958904e+00, 3.67800000e+01, -1.19780000e+02]]))
+ array([[1.1139996]], dtype=float32)
+ """
xgb = XGBRegressor(
verbosity=0, random_state=42, tree_method="exact", base_score=0.5
)
@@ -25,6 +38,15 @@
def main() -> None:
+ """
+ The URL for this algorithm
+ https://xgboost.readthedocs.io/en/stable/
+ California house price dataset is used to demonstrate the algorithm.
+
+ Expected error values:
+ Mean Absolute Error: 0.30957163379906033
+ Mean Square Error: 0.22611560196662744
+ """
# Load California house price dataset
california = fetch_california_housing()
data, target = data_handling(california)
@@ -41,4 +63,4 @@ import doctest
doctest.testmod(verbose=True)
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/xgboost_regressor.py |
Write clean docstrings for readability |
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "numpy",
# ]
# ///
import httpx
import numpy as np
def collect_dataset():
response = httpx.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv",
timeout=10,
)
lines = response.text.splitlines()
data = []
for item in lines:
item = item.split(",")
data.append(item)
data.pop(0) # This is for removing the labels from the list
dataset = np.matrix(data)
return dataset
def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):
n = len_data
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_grad = np.dot(prod, data_x)
theta = theta - (alpha / n) * sum_grad
return theta
def sum_of_square_error(data_x, data_y, len_data, theta):
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_elem = np.sum(np.square(prod))
error = sum_elem / (2 * len_data)
return error
def run_linear_regression(data_x, data_y):
iterations = 100000
alpha = 0.0001550
no_features = data_x.shape[1]
len_data = data_x.shape[0] - 1
theta = np.zeros((1, no_features))
for i in range(iterations):
theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
error = sum_of_square_error(data_x, data_y, len_data, theta)
print(f"At Iteration {i + 1} - Error is {error:.5f}")
return theta
def mean_absolute_error(predicted_y, original_y):
total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y))
return total / len(original_y)
def main():
data = collect_dataset()
len_data = data.shape[0]
data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float)
data_y = data[:, -1].astype(float)
theta = run_linear_regression(data_x, data_y)
len_result = theta.shape[1]
print("Resultant Feature vector : ")
for i in range(len_result):
print(f"{theta[0, i]:.5f}")
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | --- +++ @@ -1,3 +1,12 @@+"""
+Linear regression is the most basic type of regression commonly used for
+predictive analysis. The idea is pretty simple: we have a dataset and we have
+features associated with it. Features should be chosen very cautiously
+as they determine how much our model will be able to make future predictions.
+We try to set the weight of these features, over many iterations, so that they best
+fit our dataset. In this particular code, I had used a CSGO dataset (ADR vs
+Rating). We try to best fit a line through dataset and estimate the parameters.
+"""
# /// script
# requires-python = ">=3.13"
@@ -12,6 +21,10 @@
def collect_dataset():
+ """Collect dataset of CSGO
+ The dataset contains ADR vs Rating of a Player
+ :return : dataset obtained from the link, as matrix
+ """
response = httpx.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv",
@@ -28,6 +41,23 @@
def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):
+ """Run steep gradient descent and updates the Feature vector accordingly_
+ :param data_x : contains the dataset
+ :param data_y : contains the output associated with each data-entry
+ :param len_data : length of the data_
+ :param alpha : Learning rate of the model
+ :param theta : Feature vector (weight's for our model)
+ ;param return : Updated Feature's, using
+ curr_features - alpha_ * gradient(w.r.t. feature)
+ >>> import numpy as np
+ >>> data_x = np.array([[1, 2], [3, 4]])
+ >>> data_y = np.array([5, 6])
+ >>> len_data = len(data_x)
+ >>> alpha = 0.01
+ >>> theta = np.array([0.1, 0.2])
+ >>> run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)
+ array([0.196, 0.343])
+ """
n = len_data
prod = np.dot(theta, data_x.transpose())
@@ -38,6 +68,19 @@
def sum_of_square_error(data_x, data_y, len_data, theta):
+ """Return sum of square error for error calculation
+ :param data_x : contains our dataset
+ :param data_y : contains the output (result vector)
+ :param len_data : len of the dataset
+ :param theta : contains the feature vector
+ :return : sum of square error computed from given feature's
+
+ Example:
+ >>> vc_x = np.array([[1.1], [2.1], [3.1]])
+ >>> vc_y = np.array([1.2, 2.2, 3.2])
+ >>> round(sum_of_square_error(vc_x, vc_y, 3, np.array([1])),3)
+ np.float64(0.005)
+ """
prod = np.dot(theta, data_x.transpose())
prod -= data_y.transpose()
sum_elem = np.sum(np.square(prod))
@@ -46,6 +89,11 @@
def run_linear_regression(data_x, data_y):
+ """Implement Linear regression over the dataset
+ :param data_x : contains our dataset
+ :param data_y : contains the output (result vector)
+ :return : feature for line of best fit (Feature vector)
+ """
iterations = 100000
alpha = 0.0001550
@@ -63,11 +111,22 @@
def mean_absolute_error(predicted_y, original_y):
+ """Return sum of square error for error calculation
+ :param predicted_y : contains the output of prediction (result vector)
+ :param original_y : contains values of expected outcome
+ :return : mean absolute error computed from given feature's
+
+ >>> predicted_y = [3, -0.5, 2, 7]
+ >>> original_y = [2.5, 0.0, 2, 8]
+ >>> mean_absolute_error(predicted_y, original_y)
+ 0.5
+ """
total = sum(abs(y - predicted_y[i]) for i, y in enumerate(original_y))
return total / len(original_y)
def main():
+ """Driver function"""
data = collect_dataset()
len_data = data.shape[0]
@@ -85,4 +144,4 @@ import doctest
doctest.testmod()
- main()+ main()
| https://raw.githubusercontent.com/TheAlgorithms/Python/HEAD/machine_learning/linear_regression.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.