add project2
This commit is contained in:
154
project2/iterative_solvers.py
Normal file
154
project2/iterative_solvers.py
Normal file
@@ -0,0 +1,154 @@
|
||||
import numpy as np
|
||||
from typing import Callable, Any, List, Tuple
|
||||
|
||||
# Tolerance and maximum iterations
|
||||
TOL = 1e-10
|
||||
MAX_ITER = 20000
|
||||
|
||||
def stationary_method(
|
||||
b: np.ndarray,
|
||||
method: str,
|
||||
get_diag_inv: Callable[[int, Any], float],
|
||||
get_off_diag_sum: Callable[[np.ndarray, int, Any], float],
|
||||
mat_vec_prod: Callable[[np.ndarray, Any], np.ndarray],
|
||||
omega: float = 1.0,
|
||||
**kwargs
|
||||
) -> Tuple[np.ndarray, int, List[float]]:
|
||||
"""
|
||||
Solves Ax=b using stationary methods (Jacobi, Gauss-Seidel, SOR).
|
||||
"""
|
||||
n = len(b)
|
||||
x = np.zeros(n)
|
||||
r0 = b.copy()
|
||||
r0_norm = np.linalg.norm(r0)
|
||||
if r0_norm == 0:
|
||||
return x, 0, [0.0]
|
||||
|
||||
residual_history = []
|
||||
|
||||
for k in range(MAX_ITER):
|
||||
# 1. Calculate current residual and its norm
|
||||
r = b - mat_vec_prod(x, **kwargs)
|
||||
res_norm = np.linalg.norm(r)
|
||||
|
||||
# 2. Append to history
|
||||
residual_history.append(res_norm)
|
||||
|
||||
# 3. Check for convergence
|
||||
if res_norm / r0_norm < TOL:
|
||||
return x, k, residual_history
|
||||
|
||||
# 4. If not converged, perform the next iteration update
|
||||
if method == 'jacobi':
|
||||
x_old = x.copy()
|
||||
for i in range(n):
|
||||
off_diag_val = get_off_diag_sum(x_old, i, **kwargs)
|
||||
diag_inv_val = get_diag_inv(i, **kwargs)
|
||||
x[i] = diag_inv_val * (b[i] - off_diag_val)
|
||||
else: # Gauss-Seidel and SOR
|
||||
x_k_iter = x.copy() # Needed for SOR
|
||||
for i in range(n):
|
||||
off_diag_val = get_off_diag_sum(x, i, **kwargs)
|
||||
diag_inv_val = get_diag_inv(i, **kwargs)
|
||||
x_gs = diag_inv_val * (b[i] - off_diag_val)
|
||||
|
||||
if method == 'gauss_seidel':
|
||||
x[i] = x_gs
|
||||
elif method == 'sor':
|
||||
x[i] = (1 - omega) * x_k_iter[i] + omega * x_gs
|
||||
|
||||
# If loop finishes (MAX_ITER reached), calculate and append final residual
|
||||
r_final = b - mat_vec_prod(x, **kwargs)
|
||||
residual_history.append(np.linalg.norm(r_final))
|
||||
return x, MAX_ITER, residual_history
|
||||
|
||||
|
||||
def steepest_descent(
|
||||
mat_vec_prod: Callable[[np.ndarray, Any], np.ndarray],
|
||||
b: np.ndarray,
|
||||
**kwargs
|
||||
) -> Tuple[np.ndarray, int, List[float]]:
|
||||
"""
|
||||
Solves Ax=b using Steepest Descent, with corrected residual logging.
|
||||
"""
|
||||
x = np.zeros_like(b)
|
||||
r = b.copy()
|
||||
|
||||
r0_norm = np.linalg.norm(r)
|
||||
if r0_norm == 0:
|
||||
return x, 0, [0.0]
|
||||
|
||||
residual_history = []
|
||||
|
||||
for k in range(MAX_ITER):
|
||||
# 1. Current residual norm is from previous step (or initial)
|
||||
res_norm = np.linalg.norm(r)
|
||||
|
||||
# 2. Append to history
|
||||
residual_history.append(res_norm)
|
||||
|
||||
# 3. Check for convergence BEFORE the update
|
||||
if res_norm / r0_norm < TOL:
|
||||
return x, k, residual_history
|
||||
|
||||
# 4. If not converged, compute update
|
||||
Ar = mat_vec_prod(r, **kwargs)
|
||||
r_dot_Ar = np.dot(r, Ar)
|
||||
if r_dot_Ar == 0: # Should not happen for SPD if r!=0
|
||||
break
|
||||
alpha = res_norm**2 / r_dot_Ar
|
||||
|
||||
x += alpha * r
|
||||
r -= alpha * Ar
|
||||
|
||||
# If loop finishes, append final residual
|
||||
residual_history.append(np.linalg.norm(r))
|
||||
return x, MAX_ITER, residual_history
|
||||
|
||||
|
||||
def conjugate_gradient(
|
||||
mat_vec_prod: Callable[[np.ndarray, Any], np.ndarray],
|
||||
b: np.ndarray,
|
||||
**kwargs
|
||||
) -> Tuple[np.ndarray, int, List[float]]:
|
||||
"""
|
||||
Solves Ax=b using Conjugate Gradient, with corrected residual logging.
|
||||
"""
|
||||
x = np.zeros_like(b)
|
||||
r = b.copy()
|
||||
p = r.copy()
|
||||
|
||||
r0_norm = np.linalg.norm(r)
|
||||
if r0_norm == 0:
|
||||
return x, 0, [0.0]
|
||||
|
||||
res_norm_old = r0_norm
|
||||
residual_history = []
|
||||
|
||||
for k in range(MAX_ITER):
|
||||
# 1. Current residual norm is from previous step
|
||||
# 2. Append to history
|
||||
residual_history.append(res_norm_old)
|
||||
|
||||
# 3. Check for convergence
|
||||
if res_norm_old / r0_norm < TOL:
|
||||
return x, k, residual_history
|
||||
|
||||
# 4. If not converged, compute update
|
||||
Ap = mat_vec_prod(p, **kwargs)
|
||||
p_dot_Ap = np.dot(p, Ap)
|
||||
if p_dot_Ap == 0:
|
||||
break
|
||||
alpha = res_norm_old**2 / p_dot_Ap
|
||||
|
||||
x += alpha * p
|
||||
r -= alpha * Ap
|
||||
|
||||
res_norm_new = np.linalg.norm(r)
|
||||
beta = res_norm_new**2 / res_norm_old**2
|
||||
|
||||
p = r + beta * p
|
||||
res_norm_old = res_norm_new
|
||||
# If loop finishes, append final residual
|
||||
residual_history.append(res_norm_old)
|
||||
return x, MAX_ITER, residual_history
|
||||
Reference in New Issue
Block a user