add project2

This commit is contained in:
2025-11-10 20:23:08 -05:00
parent bd5715388b
commit dd4675235d
17 changed files with 730 additions and 1 deletions

4
.gitignore vendored
View File

@@ -1,2 +1,4 @@
.vscode .vscode
.DS_Store .DS_Store
__pycache__
*.pyc

View File

@@ -0,0 +1,154 @@
import numpy as np
from typing import Callable, Any, List, Tuple
# Tolerance and maximum iterations
TOL = 1e-10
MAX_ITER = 20000
def stationary_method(
b: np.ndarray,
method: str,
get_diag_inv: Callable[[int, Any], float],
get_off_diag_sum: Callable[[np.ndarray, int, Any], float],
mat_vec_prod: Callable[[np.ndarray, Any], np.ndarray],
omega: float = 1.0,
**kwargs
) -> Tuple[np.ndarray, int, List[float]]:
"""
Solves Ax=b using stationary methods (Jacobi, Gauss-Seidel, SOR).
"""
n = len(b)
x = np.zeros(n)
r0 = b.copy()
r0_norm = np.linalg.norm(r0)
if r0_norm == 0:
return x, 0, [0.0]
residual_history = []
for k in range(MAX_ITER):
# 1. Calculate current residual and its norm
r = b - mat_vec_prod(x, **kwargs)
res_norm = np.linalg.norm(r)
# 2. Append to history
residual_history.append(res_norm)
# 3. Check for convergence
if res_norm / r0_norm < TOL:
return x, k, residual_history
# 4. If not converged, perform the next iteration update
if method == 'jacobi':
x_old = x.copy()
for i in range(n):
off_diag_val = get_off_diag_sum(x_old, i, **kwargs)
diag_inv_val = get_diag_inv(i, **kwargs)
x[i] = diag_inv_val * (b[i] - off_diag_val)
else: # Gauss-Seidel and SOR
x_k_iter = x.copy() # Needed for SOR
for i in range(n):
off_diag_val = get_off_diag_sum(x, i, **kwargs)
diag_inv_val = get_diag_inv(i, **kwargs)
x_gs = diag_inv_val * (b[i] - off_diag_val)
if method == 'gauss_seidel':
x[i] = x_gs
elif method == 'sor':
x[i] = (1 - omega) * x_k_iter[i] + omega * x_gs
# If loop finishes (MAX_ITER reached), calculate and append final residual
r_final = b - mat_vec_prod(x, **kwargs)
residual_history.append(np.linalg.norm(r_final))
return x, MAX_ITER, residual_history
def steepest_descent(
mat_vec_prod: Callable[[np.ndarray, Any], np.ndarray],
b: np.ndarray,
**kwargs
) -> Tuple[np.ndarray, int, List[float]]:
"""
Solves Ax=b using Steepest Descent, with corrected residual logging.
"""
x = np.zeros_like(b)
r = b.copy()
r0_norm = np.linalg.norm(r)
if r0_norm == 0:
return x, 0, [0.0]
residual_history = []
for k in range(MAX_ITER):
# 1. Current residual norm is from previous step (or initial)
res_norm = np.linalg.norm(r)
# 2. Append to history
residual_history.append(res_norm)
# 3. Check for convergence BEFORE the update
if res_norm / r0_norm < TOL:
return x, k, residual_history
# 4. If not converged, compute update
Ar = mat_vec_prod(r, **kwargs)
r_dot_Ar = np.dot(r, Ar)
if r_dot_Ar == 0: # Should not happen for SPD if r!=0
break
alpha = res_norm**2 / r_dot_Ar
x += alpha * r
r -= alpha * Ar
# If loop finishes, append final residual
residual_history.append(np.linalg.norm(r))
return x, MAX_ITER, residual_history
def conjugate_gradient(
mat_vec_prod: Callable[[np.ndarray, Any], np.ndarray],
b: np.ndarray,
**kwargs
) -> Tuple[np.ndarray, int, List[float]]:
"""
Solves Ax=b using Conjugate Gradient, with corrected residual logging.
"""
x = np.zeros_like(b)
r = b.copy()
p = r.copy()
r0_norm = np.linalg.norm(r)
if r0_norm == 0:
return x, 0, [0.0]
res_norm_old = r0_norm
residual_history = []
for k in range(MAX_ITER):
# 1. Current residual norm is from previous step
# 2. Append to history
residual_history.append(res_norm_old)
# 3. Check for convergence
if res_norm_old / r0_norm < TOL:
return x, k, residual_history
# 4. If not converged, compute update
Ap = mat_vec_prod(p, **kwargs)
p_dot_Ap = np.dot(p, Ap)
if p_dot_Ap == 0:
break
alpha = res_norm_old**2 / p_dot_Ap
x += alpha * p
r -= alpha * Ap
res_norm_new = np.linalg.norm(r)
beta = res_norm_new**2 / res_norm_old**2
p = r + beta * p
res_norm_old = res_norm_new
# If loop finishes, append final residual
residual_history.append(res_norm_old)
return x, MAX_ITER, residual_history

187
project2/main.tex Normal file
View File

@@ -0,0 +1,187 @@
\documentclass{article}
% Language setting
\usepackage[english]{babel}
% Set page size and margins
\usepackage[letterpaper,top=2cm,bottom=2cm,left=3cm,right=3cm,marginparwidth=1.75cm]{geometry}
% Useful packages
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{amsfonts}
\usepackage{float} % For better table and figure placement
\usepackage[colorlinks=true, allcolors=blue]{hyperref}
\usepackage{array} % For better table column definitions
\title{MATH 6601 - Project 2 Report}
\author{Zhe Yuan (yuan.1435)}
\date{Nov 2025}
\begin{document}
\maketitle
\section*{Common Iterative Methods for Both Problems}
This project implements five iterative methods to solve large, sparse, symmetric positive-definite (SPD) linear systems of the form $Ax=b$. The implemented methods are:
\begin{itemize}
\item Jacobi method
\item Gauss-Seidel method
\item Successive Over-Relaxation (SOR) method
\item Steepest Descent method
\item Conjugate Gradient method
\end{itemize}
All five methods are implemented in Python. The core logic for each solver is contained in the `iterative\_solvers.py' file. The specific problems are set up and solved in `problem1.py' and `problem2.py', respectively.
The initial guess was $x^{(0)}=0$, and the stopping condition was $\|r^{(k)}\|_2 / \|r^{(0)}\|_2 < 10^{-10}$. All $A$ were not explicitly constructed.
\section{Problem 1: Ordinary Differential Equations}
This problem solves the boundary value problem $-u'' = t$ on $(0,1)$ with $u(0)=u(1)=0$. To solve the problem numerically, the interval $(0,1)$ is discretized into $n+1$ subintervals of width $h=1/(n+1)$. The second derivative $u''(t_i)$ is approximated using a central difference scheme:
\[ u''(t_i) = -t_i \approx \frac{u(t_{i-1}) - 2u(t_i) + u(t_{i+1})}{h^2} \quad \implies \quad -u_{i-1} + 2u_i - u_{i+1} = h^2 t_i \]
for the internal grid points $i=1, \dots, n$. Combined with the boundary conditions $u_0=0$ and $u_{n+1}=0$, this forms a linear system $Au=b$, where $u = [u_1, \dots, u_n]^T$. The matrix $A \in \mathbb{R}^{n \times n}$ is a tridiagonal matrix, and the vector $b \in \mathbb{R}^n$ is given by:
\[
A = \begin{pmatrix}
2 & -1 & & & \\
-1 & 2 & -1 & & \\
& \ddots & \ddots & \ddots & \\
& & -1 & 2 & -1 \\
& & & -1 & 2
\end{pmatrix}, \quad
b = h^2 \begin{pmatrix} t_1 \\ t_2 \\ \vdots \\ t_n \end{pmatrix}
\]
\subsection*{(a) Exact Solution}
The boundary value problem is given by $-u'' = t$ on the interval $(0, 1)$ with boundary conditions $u(0) = u(1) = 0$.
Integrating twice gives the general solution:
\[ u(t) = -\frac{t^3}{6} - C_1 t - C_2 \]
Applying the boundary conditions $u(0)=0$ and $u(1)=0$, we find the constants $C_2=0$ and $C_1 = -1/6$. Thus, the exact solution is:
\[ u(t) = \frac{t - t^3}{6} \]
The plot of this exact solution is shown in Figure~\ref{fig:p1_exact_solution}.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{problem1_exact_solution.png}
\caption{Plot of the exact solution $u(t) = (t-t^3)/6$.}
\label{fig:p1_exact_solution}
\end{figure}
\subsection*{(b, c) Iteration Solution}
According to the requirments from 1(b), The linear system was solved for $n=20$ and $n=40$. For SOR, the optimal relaxation parameter $\omega = \frac{2}{1+\sin(\pi h)}$ was used. The number of iterations required for each method to converge is summarized in Table~\ref{tab:p1_iterations}.
\begin{table}[H]
\centering
\begin{tabular}{|l|r|r|}
\hline
\textbf{Method} & \textbf{Iterations (n=20)} & \textbf{Iterations (n=40)} \\
\hline
Jacobi & 2031 & 7758 \\
Gauss-Seidel & 1018 & 3882 \\
SOR & 94 & 183 \\
Steepest Descent & 2044 & 7842\\
Conjugate Gradient & 20 & 40 \\
\hline
\end{tabular}
\caption{Number of iterations for each method for Problem 1.}
\label{tab:p1_iterations}
\end{table}
\noindent \textbf{Observations:}
\begin{enumerate}
\item The Conjugate Gradient method is the most efficient, requiring the fewest iterations. SOR is the second-best with a significant speedup over Gauss-Seidel. Gauss-Seidel converges about twice as fast as Jacobi. The Steepest Descent method performs poorly, rivaling Jacobi in its slow convergence.
\item As $n$ doubles, the number of iterations increases for all methods. The iteration count for Jacobi, Gauss-Seidel, and Steepest Descent increases by a factor of roughly 4, while for SOR and CG, the increase is closer to a factor of 2.
\end{enumerate}
\subsection*{(d) Convergence History}
The convergence history is shown in Figures~\ref{fig:p1_conv_n20} and \ref{fig:p1_conv_n40}. The stationary methods and SD show linear convergence, while CG shows superlinear convergence.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{problem1_convergence_n20.png}
\caption{Convergence history for Problem 1 with n=20.}
\label{fig:p1_conv_n20}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{problem1_convergence_n40.png}
\caption{Convergence history for Problem 1 with n=40.}
\label{fig:p1_conv_n40}
\end{figure}
\newpage
\section{Problem 2: Partial Differential Equation}
This problem involves solving the PDE $-\nabla^2 u + u = 1$ (this should be $-\nabla^2 u + u = 1$ instead of $-\nabla u(\text{vector}) + u(\text{scalar}) = 1$ in question) on the unit square $[0,1]^2$ with zero boundary conditions. To solve the problem numerically, a finite difference approximation on an $n \times n$ grid of interior points leads to the discrete equations:
\[ (4+h^2)u_{i,j} - u_{i-1,j} - u_{i,j-1} - u_{i+1,j} - u_{i,j+1} = h^2, \quad i,j=1,\dots,n \]
To formulate this as a standard matrix system $Ax=b$, we reshape the 2D array of unknowns $u_{i,j}$ into a single 1D vector $x$ of length $N=n^2$. After flattening, $u_{i,j}$ maps to the index $k = (i-1)n + j$ in the vector $x$, the matrix $A \in \mathbb{R}^{N \times N}$ becomes a large, sparse, block tridiagonal matrix:
\[
A = \begin{pmatrix}
T & -I & & & \\
-I & T & -I & & \\
& \ddots & \ddots & \ddots & \\
& & -I & T & -I \\
& & & -I & T
\end{pmatrix}_{n^2 \times n^2}
\]
where $I$ is the $n \times n$ identity matrix, and $T$ is an $n \times n$ tridiagonal matrix given by:
\[
T = \begin{pmatrix}
4+h^2 & -1 & & & \\
-1 & 4+h^2 & -1 & & \\
& \ddots & \ddots & \ddots & \\
& & -1 & 4+h^2 & -1 \\
& & & -1 & 4+h^2
\end{pmatrix}_{n \times n}
\]
The right-hand side vector $b \in \mathbb{R}^{N}$ is a constant vector where every element is $h^2$. So:
\[
(A_{i,i}=T)_{j,j}x_{(i-1)n + j} = (4+h^2)u_{i,j}, (A_{i-1,i}=I)_{j,j}x_{(i-2)n + j} = u_{i-1,j},\dots
\]
\subsection*{(a) Iterative Solution}
The system was solved for $n=16$ ($N=256$) and $n=32$ ($N=1024$) using the same five iterative methods. The number of iterations to reach convergence is reported in Table~\ref{tab:p2_iterations}.
\begin{table}[H]
\centering
\begin{tabular}{|l|r|r|}
\hline
\textbf{Method} & \textbf{Iterations (n=16)} & \textbf{Iterations (n=32)} \\
\hline
Jacobi & 1268 & 4792 \\
Gauss-Seidel & 635 & 2397 \\
SOR & 71 & 138 \\
Steepest Descent & 1247 & 4807 \\
Conjugate Gradient & 31 & 66 \\
\hline
\end{tabular}
\caption{Number of iterations for each method for Problem 2.}
\label{tab:p2_iterations}
\end{table}
\noindent \textbf{Observations:}
The relative performance of the methods remains consistent with Problem 1: CG is the fastest, followed by SOR, Gauss-Seidel, Jacobi, and Steepest Descent. Doubling $n$ from 16 to 32 quadruples the number of unknowns. Again, the iteration counts for slower methods increase by a factor of approximately 4, while for CG and SOR, the increase is only about a factor of 2. This reinforces the conclusion that CG and SOR scale much better with problem size.
\subsection*{(b) Convergence History}
The convergence history plots are shown in Figures~\ref{fig:p2_conv_n16} and \ref{fig:p2_conv_n32}.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{problem2_convergence_n16.png}
\caption{Convergence history for Problem 2 with n=16.}
\label{fig:p2_conv_n16}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{problem2_convergence_n32.png}
\caption{Convergence history for Problem 2 with n=32.}
\label{fig:p2_conv_n32}
\end{figure}
\subsection*{(c) Advantages of Iterative Methods}
For large, sparse linear systems arising from PDE discretizations, iterative methods offer significant advantages over direct methods:
\begin{enumerate}
\item Iterative methods typically only require storing a few vectors of size $N$, leading to a memory complexity of $O(N)$. In contrast, direct methods require denser helper matrices like $Q$ and $R$.
\item The cost per iteration for these problems is dominated by the sparse matrix-vector product, which is $O(N)$. If the method converges in $K$ iterations, the total cost is $O(KN)$. If a good iterative method is used, $K$ can be much smaller than $N$ (like C.G.). Direct sparse solvers often have a higher complexity. When $N$ is very large, iterative methods can be much faster.
\item Implementing iterative methods is often simpler than implementing a direct solver for sparse matrix.
\end{enumerate}
\end{document}

Binary file not shown.

160
project2/problem1.py Normal file
View File

@@ -0,0 +1,160 @@
import numpy as np
import matplotlib.pyplot as plt
from iterative_solvers import *
# (a): Exact Solution
def exact_solution(t: np.ndarray) -> np.ndarray:
"""Computes the exact solution u(t) = t/6 * (1 - t^2)."""
return t / 6.0 * (1 - t**2)
def plot_exact_solution():
"""Plots the exact solution of the BVP."""
t_fine = np.linspace(0, 1, 500)
u_exact_fine = exact_solution(t_fine)
plt.figure(figsize=(10, 6))
plt.plot(t_fine, u_exact_fine, 'k-', label='Exact Solution $u(t) = t/6 \cdot (1 - t^2)$')
plt.title('Exact Solution of $-u\'\'=t$ with $u(0)=u(1)=0$')
plt.xlabel('$t$')
plt.ylabel('$u(t)$')
plt.grid(True)
plt.legend()
plt.savefig('problem1_exact_solution.png')
# plt.show()
print("Exact solution plot saved to 'problem1_exact_solution.png'")
# (b): Helper functions for iterative methods
def mat_vec_prod_p1(x: np.ndarray, n: int) -> np.ndarray:
"""Computes the matrix-vector product Ax for Problem 1."""
y = np.zeros(n)
# A is tridiagonal with [ -1, 2, -1 ]
# y_i = -x_{i-1} + 2x_i - x_{i+1}
for i in range(n):
term_prev = -x[i-1] if i > 0 else 0
term_next = -x[i+1] if i < n - 1 else 0
y[i] = term_prev + 2 * x[i] + term_next
return y
def get_diag_inv_p1(i: int, n: int) -> float:
"""Returns 1/A_ii for Problem 1. Diagonal elements are all 2, so inverse is 1/2."""
return 1.0 / 2.0
def get_off_diag_sum_p1(x: np.ndarray, i: int, n: int) -> float:
"""Computes sum_{j!=i} A_ij * x_j. For Problem 1, A_ij = -1 for j=i-1, i+1, else 0."""
sum_val = 0.0
if i > 0:
sum_val += -x[i-1]
if i < n - 1:
sum_val += -x[i+1]
return sum_val
def solve_and_report(n: int):
"""
Solves the linear system for a given n and reports results.
"""
print(f"\n--- Solving for n = {n} ---")
h = 1.0 / (n + 1)
# Setup the right-hand side vector b
t = np.linspace(h, 1.0 - h, n)
b = (h**2) * t
# Calculate SOR parameter omega
omega = 2.0 / (1.0 + np.sin(np.pi * h))
# Store results
iteration_counts = []
all_residuals = {}
all_solutions = {}
# Define methods and run solvers
methods_to_run = {
'Jacobi': ('jacobi', {}),
'Gauss-Seidel': ('gauss_seidel', {}),
'SOR': ('sor', {'omega': omega}),
'Steepest Descent': (steepest_descent, {}),
'Conjugate Gradient': (conjugate_gradient, {})
}
print("\nStarting iterative solvers...")
for name, (method_func, params) in methods_to_run.items():
print(f"Running {name}...")
common_args = {'b': b, 'n': n}
if isinstance(method_func, str):
solver_args = { 'method': method_func, 'get_diag_inv': get_diag_inv_p1,
'get_off_diag_sum': get_off_diag_sum_p1, 'mat_vec_prod': mat_vec_prod_p1,
**common_args, **params }
solution, iters, res_hist = stationary_method(**solver_args)
else:
solver_args = {'mat_vec_prod': mat_vec_prod_p1, **common_args}
solution, iters, res_hist = method_func(**solver_args)
iteration_counts.append((name, iters))
all_residuals[name] = res_hist
all_solutions[name] = solution if iters < MAX_ITER else None
# (c): Report iteration counts
print("\n--- Iteration Counts ---")
for name, iters in iteration_counts:
status = "converged" if iters < MAX_ITER else "did NOT converge"
print(f"{name:<20}: {iters} iterations ({status}, last residual = {all_residuals[name][-1]:.2e})")
# (d): Plot residue history
plt.figure(figsize=(12, 8))
for name, res_hist in all_residuals.items():
res_log = np.log10(np.array(res_hist) + 1e-20)
plt.plot(res_log, label=name, linestyle='-')
plt.title(f'Convergence History for n = {n}')
plt.xlabel('Iteration Step (m)')
plt.ylabel('log10(||r^(m)||_2)')
plt.legend()
plt.grid(True)
# Set the y-axis limit to make the plot readable
current_ylim = plt.ylim()
plt.ylim(bottom=max(current_ylim[0], -13), top=current_ylim[1])
plt.savefig(f'problem1_convergence_n{n}.png')
# plt.show()
print(f"\nConvergence plot saved to 'problem1_convergence_n{n}.png'")
if all_solutions:
plt.figure(figsize=(12, 8))
# Plot exact solution on a fine grid
t_fine = np.linspace(0, 1, 500)
u_exact_fine = exact_solution(t_fine)
plt.plot(t_fine, u_exact_fine, 'k-', label='Exact Solution', linewidth=1, zorder=10) # Black, thick, on top
# Define some styles for the different numerical methods
styles = {
'Jacobi': {'color': 'red', 'linestyle': '--'},
'Gauss-Seidel': {'color': 'blue', 'linestyle': '-.'},
'SOR': {'color': 'green', 'linestyle': ':'},
'Steepest Descent':{'color': 'purple','linestyle': '--'},
'Conjugate Gradient':{'color': 'orange','linestyle': '-.'},
}
# Plot each numerical solution
for name, solution in all_solutions.items():
# Add boundary points for a complete plot
t_numerical_with_bounds = np.concatenate(([0], t, [1]))
u_numerical_with_bounds = np.concatenate(([0], solution, [0]))
style = styles.get(name, {'color': 'gray', 'linestyle': '-'})
plt.plot(t_numerical_with_bounds, u_numerical_with_bounds,
label=f'Numerical ({name})', **style, linewidth=2)
plt.title(f'Comparison of Exact and Numerical Solutions for n = {n}')
plt.xlabel('$t$')
plt.ylabel('$u(t)$')
plt.legend()
plt.grid(True)
plt.savefig(f'problem1_all_solutions_comparison_n{n}.png')
# plt.show()
print(f"All solutions comparison plot saved to 'problem1_all_solutions_comparison_n{n}.png'")
if __name__ == "__main__":
# Part (a)
plot_exact_solution()
# Part (b), (c), (d)
solve_and_report(n=20)
solve_and_report(n=40)

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

View File

@@ -0,0 +1,39 @@
Exact solution plot saved to 'problem1_exact_solution.png'
--- Solving for n = 20 ---
Starting iterative solvers...
Running Jacobi...
Running Gauss-Seidel...
Running SOR...
Running Steepest Descent...
Running Conjugate Gradient...
--- Iteration Counts ---
Jacobi : 2031 iterations (converged, last residual = 5.78e-13)
Gauss-Seidel : 1018 iterations (converged, last residual = 5.72e-13)
SOR : 94 iterations (converged, last residual = 4.52e-13)
Steepest Descent : 2044 iterations (converged, last residual = 5.74e-13)
Conjugate Gradient : 20 iterations (converged, last residual = 1.39e-18)
Convergence plot saved to 'problem1_convergence_n20.png'
All solutions comparison plot saved to 'problem1_all_solutions_comparison_n20.png'
--- Solving for n = 40 ---
Starting iterative solvers...
Running Jacobi...
Running Gauss-Seidel...
Running SOR...
Running Steepest Descent...
Running Conjugate Gradient...
--- Iteration Counts ---
Jacobi : 7758 iterations (converged, last residual = 2.15e-13)
Gauss-Seidel : 3882 iterations (converged, last residual = 2.16e-13)
SOR : 183 iterations (converged, last residual = 2.04e-13)
Steepest Descent : 7842 iterations (converged, last residual = 2.16e-13)
Conjugate Gradient : 40 iterations (converged, last residual = 5.40e-19)
Convergence plot saved to 'problem1_convergence_n40.png'
All solutions comparison plot saved to 'problem1_all_solutions_comparison_n40.png'

147
project2/problem2.py Normal file
View File

@@ -0,0 +1,147 @@
import numpy as np
import matplotlib.pyplot as plt
from iterative_solvers import *
def mat_vec_prod_p2(x_flat: np.ndarray, n: int, h_sq: float) -> np.ndarray:
"""
Computes the matrix-vector product y = Ax for the 2D PDE problem.
"""
x_2d = x_flat.reshape((n, n))
x_padded = np.zeros((n + 2, n + 2))
x_padded[1:-1, 1:-1] = x_2d
y_2d = np.zeros((n, n))
# Apply the finite difference stencil: (4+h^2)u_ij - u_{i-1,j} - u_{i+1,j} - u_{i,j-1} - u_{i,j+1}
for i in range(n):
for j in range(n):
y_2d[i, j] = (4.0 + h_sq) * x_padded[i + 1, j + 1] \
- x_padded[i, j + 1] \
- x_padded[i + 2, j + 1] \
- x_padded[i + 1, j] \
- x_padded[i + 1, j + 2]
return y_2d.flatten()
def get_diag_inv_p2(i: int, n: int, h_sq: float) -> float:
"""Returns 1/A_ii for Problem 2. Diagonal is constant: 4 + h^2."""
return 1.0 / (4.0 + h_sq)
def get_off_diag_sum_p2(x_flat: np.ndarray, i_flat: int, n: int, **kwargs) -> float:
"""Computes the off-diagonal sum for a given row `i_flat` in Problem 2."""
row = i_flat // n
col = i_flat % n
sum_val = 0.0
if row > 0: sum_val += -x_flat[i_flat - n]
if row < n - 1: sum_val += -x_flat[i_flat + n]
if col > 0: sum_val += -x_flat[i_flat - 1]
if col < n - 1: sum_val += -x_flat[i_flat + 1]
return sum_val
def solve_and_report_p2(n: int):
"""Solves the 2D PDE linear system for a given n and reports results."""
print(f"\n--- Solving for n = {n} (System size: {n*n} x {n*n}) ---")
h = 1.0 / (n + 1)
h_sq = h**2
N = n * n
b = np.full(N, h_sq)
omega = 2.0 / (1.0 + np.sin(np.pi * h))
iteration_counts = []
all_residuals = {}
all_solutions = {}
methods_to_run = {
'Jacobi': ('jacobi', {}),
'Gauss-Seidel': ('gauss_seidel', {}),
'SOR': ('sor', {'omega': omega}),
'Steepest Descent': (steepest_descent, {}),
'Conjugate Gradient': (conjugate_gradient, {})
}
print("\nStarting iterative solvers...")
for name, (method_func, params) in methods_to_run.items():
print(f"Running {name}...")
solver_kwargs = {'n': n, 'h_sq': h_sq}
if isinstance(method_func, str):
solver_args = {'b': b, 'method': method_func, 'get_diag_inv': get_diag_inv_p2,
'get_off_diag_sum': get_off_diag_sum_p2, 'mat_vec_prod': mat_vec_prod_p2,
**solver_kwargs, **params}
solution, iters, res_hist = stationary_method(**solver_args)
else:
solver_args = {'b': b, 'mat_vec_prod': mat_vec_prod_p2, **solver_kwargs}
solution, iters, res_hist = method_func(**solver_args)
iteration_counts.append((name, iters))
all_residuals[name] = res_hist
all_solutions[name] = solution if iters < MAX_ITER else None
# Report iteration counts
print("\n--- Iteration Counts ---")
for name, iters in iteration_counts:
status = "converged" if iters < MAX_ITER else "did NOT converge"
print(f"{name:<20}: {iters} iterations ({status})")
# Plot residue history
plt.figure(figsize=(12, 8))
for name, res_hist in all_residuals.items():
res_log = np.log10(np.array(res_hist) + 1e-20)
plt.plot(res_log, label=name, linestyle='-')
plt.title(f'Convergence History for PDE Problem, n = {n}')
plt.xlabel('Iteration Step (m)')
plt.ylabel('log10(||r^(m)||_2)')
plt.legend()
plt.grid(True)
current_ylim = plt.ylim()
plt.ylim(bottom=max(current_ylim[0], -13), top=current_ylim[1])
plt.savefig(f'problem2_convergence_n{n}.png')
print(f"\nConvergence plot saved to 'problem2_convergence_n{n}.png'")
if all_solutions:
print("Generating 3D surface plots of the solutions...")
# Create the X, Y grid for the surface plot, including boundaries
plot_grid = np.linspace(0, 1, n + 2)
X, Y = np.meshgrid(plot_grid, plot_grid)
# Create a figure for the subplots
fig = plt.figure(figsize=(18, 10))
fig.suptitle(f'3D Surface Plots of Numerical Solutions for n = {n}', fontsize=16)
# Loop through the stored solutions and create a subplot for each
for i, (name, solution_flat) in enumerate(all_solutions.items()):
# Reshape the 1D solution vector into a 2D grid
solution_2d = solution_flat.reshape((n, n))
# Pad the solution with zero boundaries for plotting
u_padded = np.zeros((n + 2, n + 2))
u_padded[1:-1, 1:-1] = solution_2d
# Add a new 3D subplot
ax = fig.add_subplot(2, 3, i + 1, projection='3d')
# Plot the surface
ax.plot_surface(X, Y, u_padded, cmap='viridis', edgecolor='none')
# Set titles and labels for each subplot
ax.set_title(name)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('u(x,y)')
ax.view_init(elev=30, azim=-60) # Set a nice viewing angle
plt.tight_layout(rect=[0, 0.03, 1, 0.95]) # Adjust layout to make room for suptitle
plt.savefig(f'problem2_3d_solutions_n{n}.png')
print(f"3D surface comparison plot saved to 'problem2_3d_solutions_n{n}.png'")
if __name__ == "__main__":
# Solve Problem 2 for n=16 and n=32
solve_and_report_p2(n=16)
solve_and_report_p2(n=32)

Binary file not shown.

After

Width:  |  Height:  |  Size: 416 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 525 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 70 KiB

View File

@@ -0,0 +1,40 @@
--- Solving for n = 16 (System size: 256 x 256) ---
Starting iterative solvers...
Running Jacobi...
Running Gauss-Seidel...
Running SOR...
Running Steepest Descent...
Running Conjugate Gradient...
--- Iteration Counts ---
Jacobi : 1268 iterations (converged)
Gauss-Seidel : 635 iterations (converged)
SOR : 71 iterations (converged)
Steepest Descent : 1247 iterations (converged)
Conjugate Gradient : 31 iterations (converged)
Convergence plot saved to 'problem2_convergence_n16.png'
Generating 3D surface plots of the solutions...
3D surface comparison plot saved to 'problem2_3d_solutions_n16.png'
--- Solving for n = 32 (System size: 1024 x 1024) ---
Starting iterative solvers...
Running Jacobi...
Running Gauss-Seidel...
Running SOR...
Running Steepest Descent...
Running Conjugate Gradient...
--- Iteration Counts ---
Jacobi : 4792 iterations (converged)
Gauss-Seidel : 2397 iterations (converged)
SOR : 138 iterations (converged)
Steepest Descent : 4807 iterations (converged)
Conjugate Gradient : 66 iterations (converged)
Convergence plot saved to 'problem2_convergence_n32.png'
Generating 3D surface plots of the solutions...
3D surface comparison plot saved to 'problem2_3d_solutions_n32.png'