diff --git a/ia/analysis/02_series.tex b/ia/analysis/02_series.tex index ccbf6da..5033ae4 100644 --- a/ia/analysis/02_series.tex +++ b/ia/analysis/02_series.tex @@ -21,7 +21,7 @@ \subsection{Definition} s_N & = \sum_{j=1}^N (\lambda a_j + \mu b_j) \\ & = \sum_{j=1}^N \lambda a_j + \sum_{j=1}^N \mu b_j \\ & = \lambda c_N + \mu d_N \\ - \therefore s_N & \to \lambda c + \mu d + \therefore\ s_N & \to \lambda c + \mu d \end{align*} \item For any \(n \geq N\), we have \begin{align*} diff --git a/ia/analysis/07_differentiability.tex b/ia/analysis/07_differentiability.tex index 0a22a9c..f2e318a 100644 --- a/ia/analysis/07_differentiability.tex +++ b/ia/analysis/07_differentiability.tex @@ -242,7 +242,7 @@ \subsection{Inverse function theorem} \begin{align*} \frac{g(y + k) - g(y)}{k} & = \frac{x + h - x}{f(x+h) - y} \\ & = \frac{h}{f(x+h) - f(x)} \\ - \therefore \lim_{k \to 0} \frac{g(y + k) - g(y)}{k} & = \lim_{h \to 0} \frac{h}{f(x+h) - f(x)} \\ + \therefore\ \lim_{k \to 0} \frac{g(y + k) - g(y)}{k} & = \lim_{h \to 0} \frac{h}{f(x+h) - f(x)} \\ & = \frac{1}{f'(x)} \end{align*} as required. diff --git a/ia/analysis/09_power_series.tex b/ia/analysis/09_power_series.tex index d80fc26..1e1838f 100644 --- a/ia/analysis/09_power_series.tex +++ b/ia/analysis/09_power_series.tex @@ -205,7 +205,7 @@ \subsection{Infinite differentiability} \begin{align*} (z + h)^n - z^n - nhz^{n-1} & = \left( \sum_{r=0}^n \binom{n}{r} z^{n-r} h^r \right) - z^n - nhz^{n-1} \\ & = \sum_{r=2}^n \binom{n}{r} z^{n-r} h^r \\ - \therefore \abs{(z + h)^n - z^n - nhz^{n-1}} & = \abs{\sum_{r=2}^n \binom{n}{r} z^{n-r} h^r} \\ + \therefore\ \abs{(z + h)^n - z^n - nhz^{n-1}} & = \abs{\sum_{r=2}^n \binom{n}{r} z^{n-r} h^r} \\ & \leq \sum_{r=2}^n \abs{\binom{n}{r} z^{n-r} h^r} \\ & = \sum_{r=2}^n \binom{n}{r} \abs{z}^{n-r} \abs{h}^r \\ & \leq n(n-1) \underbrace{\left[ \sum_{r=2}^n \binom{n-2}{r-2} \abs{z}^{n-r} \abs{h}^{r-2} \right]}_{(\abs{z} + \abs{h})^{n-2}} \abs{h}^2 \\ diff --git a/ia/de/01_differentiation.tex b/ia/de/01_differentiation.tex index 1bf036a..9a9540d 100644 --- a/ia/de/01_differentiation.tex +++ b/ia/de/01_differentiation.tex @@ -190,7 +190,7 @@ \subsection{Equation of a tangent} \begin{align*} \eval{\frac{\dd{f}}{\dd{x}}}_{x=x_0} & = \frac{f(x_0 + h) - f(x_0)}{h} \\ & = \frac{f(x_0 + h) - f(x_0)}{h} + \frac{o(h)}{h} \\ - \therefore f(x_0 + h) & = f(x_0) + \eval{\frac{\dd{f}}{\dd{x}}}_{x=x_0} h + o(h) + \therefore\ f(x_0 + h) & = f(x_0) + \eval{\frac{\dd{f}}{\dd{x}}}_{x=x_0} h + o(h) \end{align*} If we now take \(x=x_0+h;\,y=f(x);\,y_0=f(x_0)\), we have \[ diff --git a/ia/de/04_linear_ordinary_differential_equations.tex b/ia/de/04_linear_ordinary_differential_equations.tex index 4ad3781..bba0392 100644 --- a/ia/de/04_linear_ordinary_differential_equations.tex +++ b/ia/de/04_linear_ordinary_differential_equations.tex @@ -58,19 +58,19 @@ \subsection{Eigenfunction forcing} \begin{align*} \dot a & = -k_a a \implies a = a_0 e^{-k_a t} \\ \dot b & = k_a a - k_b b \\ - \therefore \dot b + k_b b & = k_a a_0 e^{-k_a t} + \therefore\ \dot b + k_b b & = k_a a_0 e^{-k_a t} \end{align*} So we have a linear first order ODE with an eigenfunction as the forcing function. We can guess that the particular integral is of the form \(b_p = \lambda e^{-k_a t}\). \begin{align*} -k_a\lambda e^{-k_a t} + k_b \lambda e^{-k_a t} & = k_a a_0 e^{-k_a t} \\ \lambda(k_b-k_a) & = k_a a_0 \\ - \therefore \lambda & = \frac{k_a}{k_b - k_a} a_0 + \therefore\ \lambda & = \frac{k_a}{k_b - k_a} a_0 \end{align*} We can form the complementary function by solving: \begin{align*} \dot{b_c} + k_b b_c & = 0 \\ - \therefore b_c & = Ae^{-k_b t} + \therefore\ b_c & = Ae^{-k_b t} \end{align*} So combining everything, we have \[ diff --git a/ia/de/06_isoclines_and_solution_curves.tex b/ia/de/06_isoclines_and_solution_curves.tex index 8b4a6eb..cd11a0f 100644 --- a/ia/de/06_isoclines_and_solution_curves.tex +++ b/ia/de/06_isoclines_and_solution_curves.tex @@ -175,14 +175,14 @@ \subsection{Fixed points and perturbation analysis} \item (\(y = 1\)) \begin{align*} \dot \varepsilon & = -2(1)t \varepsilon \\ & = -2t\varepsilon \\ - \therefore \varepsilon & = \varepsilon_0 e^{-t^2} \\ + \therefore\ \varepsilon & = \varepsilon_0 e^{-t^2} \\ \lim_{t \to \infty} \varepsilon_0 e^{-t^2} & = 0 \end{align*} so this point is stable. \item (\(y = 1\)) \begin{align*} \dot \varepsilon & = -2(-1)t \varepsilon \\ & = 2t\varepsilon \\ - \therefore \varepsilon & = \varepsilon_0 e^{t^2} \\ + \therefore\ \varepsilon & = \varepsilon_0 e^{t^2} \\ \lim_{t \to \infty} \varepsilon_0 e^{t^2} & = \pm\infty \end{align*} so this point is unstable. @@ -194,7 +194,7 @@ \subsection{Autonomous differential equations} Therefore, near a fixed point \(y=a\), we have: \begin{align*} y & = a + \varepsilon(t) \\ - \therefore\dot\varepsilon & = \varepsilon \frac{\dd{f}}{\dd{y}}(a) = \varepsilon k + \therefore\\dot\varepsilon & = \varepsilon \frac{\dd{f}}{\dd{y}}(a) = \varepsilon k \end{align*} where \(k\) is the constant value \(\frac{\dd{f}}{\dd{y}}(a)\). Note that we can use normal derivatives in place of partial derivatives because \(f\) depends only on \(y\). diff --git a/ia/de/07_phase_portraits.tex b/ia/de/07_phase_portraits.tex index 3f351c4..c5178d5 100644 --- a/ia/de/07_phase_portraits.tex +++ b/ia/de/07_phase_portraits.tex @@ -145,7 +145,7 @@ \subsection{Phase portraits} Let \(\alpha y\) denote the birth rate, and \(\beta y\) be the death rate. Then, we can model this using a linear model by: \[ - \frac{\dd{y}}{\dd{t}} = \alpha y - \beta y \quad \therefore y = y_0 e^{(\alpha - \beta) t} + \frac{\dd{y}}{\dd{t}} = \alpha y - \beta y \quad \therefore\ y = y_0 e^{(\alpha - \beta) t} \] If \(\alpha > \beta\) then we have exponential growth; if \(\alpha < \beta\) then we have exponential decay. This is an unrealistic model, so we can use a nonlinear model to increase accuracy. diff --git a/ia/de/09_forced_second_order_odes.tex b/ia/de/09_forced_second_order_odes.tex index afbf5bc..6485f00 100644 --- a/ia/de/09_forced_second_order_odes.tex +++ b/ia/de/09_forced_second_order_odes.tex @@ -262,7 +262,7 @@ \subsection{Resonance in undamped systems} C(-\omega^2 + \omega_0^2) = 1 \] \[ - \therefore y_p = \frac{1}{\omega_0^2 - \omega^2}\sin\omega t + \therefore\ y_p = \frac{1}{\omega_0^2 - \omega^2}\sin\omega t \] As the system is linear in \(y\) and its derivatives, we can freely add some multiple of the complementary function and it will remain a solution. \[ diff --git a/ia/de/11_discrete_equations_and_the_method_of_frobenius.tex b/ia/de/11_discrete_equations_and_the_method_of_frobenius.tex index 44d60ff..073a5a3 100644 --- a/ia/de/11_discrete_equations_and_the_method_of_frobenius.tex +++ b/ia/de/11_discrete_equations_and_the_method_of_frobenius.tex @@ -168,7 +168,7 @@ \subsection{Fuch's theorem} a_n & = \frac{n-3}{n-1}a_{n-2} \\ a_n & = \frac{n-3}{n-1}\frac{n-5}{n-3}a_{n-4} = \frac{n-5}{n-1}a_{n-4} \\ a_n & = \frac{n-5}{n-1}\frac{n-7}{n-5}a_{n-6} = \frac{n-7}{n-1}a_{n-6} \\ - \therefore a_n & = \frac{-1}{n-1}a_0 + \therefore\ a_n & = \frac{-1}{n-1}a_0 \end{align*} Therefore \[ diff --git a/ia/de/12_multivariate_calculus.tex b/ia/de/12_multivariate_calculus.tex index 3c70025..267e7cc 100644 --- a/ia/de/12_multivariate_calculus.tex +++ b/ia/de/12_multivariate_calculus.tex @@ -199,7 +199,7 @@ \subsection{Contours near stationary points} f = \text{constant (since \(f\) is a contour)} \approx f(\vb x_s) = \frac{1}{2}\delta \vb x \cdot H \cdot \delta \vb x^\transpose \] \begin{equation}\label{contourhessian} - \therefore \lambda_1 \xi^2 + \lambda_2 \eta^2 \approx \text{constant} + \therefore\ \lambda_1 \xi^2 + \lambda_2 \eta^2 \approx \text{constant} \end{equation} Near a minimum or maximum point, \(\lambda_1\) and \(\lambda_2\) have the same sign. \eqref{contourhessian} implies that the contours of \(f\) are elliptical. diff --git a/ia/dr/06_angular_motion_and_orbits.tex b/ia/dr/06_angular_motion_and_orbits.tex index ec2d6d1..e991d4b 100644 --- a/ia/dr/06_angular_motion_and_orbits.tex +++ b/ia/dr/06_angular_motion_and_orbits.tex @@ -68,7 +68,7 @@ \subsection{Polar coordinates in the plane} We can compute expressions for velocity and acceleration in terms of these new coordinates. \begin{align*} \vb r & = r \vb e_r \\ - \therefore \dot{\vb r} & = \dot r \vb e_r + r \frac{\dd}{\dd{t}}\vb e_r \\ + \therefore\ \dot{\vb r} & = \dot r \vb e_r + r \frac{\dd}{\dd{t}}\vb e_r \\ & = \dot r \vb e_r + r \dot\theta \vb e_\theta \end{align*} So \(\dot r\) is the radial component of the velocity, and \(r\dot\theta\) is the angular component of the velocity. diff --git a/ia/dr/16_relativistic_physics.tex b/ia/dr/16_relativistic_physics.tex index b250fec..2a2b603 100644 --- a/ia/dr/16_relativistic_physics.tex +++ b/ia/dr/16_relativistic_physics.tex @@ -23,7 +23,7 @@ \subsection{Proper time} & = \frac{1}{c}\sqrt{c^2\dd{t}^2 - \abs{\dd{\vb x}}^2} \\ & = \frac{1}{c}\sqrt{c^2\dd{t}^2 - \abs{\vb u}^2 \dd{t}^2} \\ & = \qty(1 - \frac{\vb u^2}{c^2})^{\frac{1}{2}}\dd{t} \\ - \therefore \dv{t}{\tau} & = \gamma_{\vb u} + \therefore\ \dv{t}{\tau} & = \gamma_{\vb u} \end{align*} where \(\gamma_{\vb u} = \qty(1 - \frac{\vb u^2}{c^2})^{\frac{1}{2}}\). Now, the total time observed by a particle moving along its world line is diff --git a/ia/probability/08_combinations_of_random_variables.tex b/ia/probability/08_combinations_of_random_variables.tex index fa978cb..84ad10b 100644 --- a/ia/probability/08_combinations_of_random_variables.tex +++ b/ia/probability/08_combinations_of_random_variables.tex @@ -156,7 +156,7 @@ \subsection{Properties of conditional expectation} We can see by the standard properties of the expectation that \begin{align*} \expect{X \mid Y} & = \sum_y 1(Y = y) \expect{X \mid Y = y} \\ - \therefore \expect{\expect{X \mid Y}} & = \sum_y \expect{1(Y = y)} \expect{X \mid Y = y} \\ + \therefore\ \expect{\expect{X \mid Y}} & = \sum_y \expect{1(Y = y)} \expect{X \mid Y = y} \\ & = \sum_y \prob{Y = y} \expect{X \mid Y = y} \\ & = \sum_y \prob{Y = y} \frac{\expect{X \cdot 1(Y = y)}}{\prob{Y = y}} \\ & = \sum_y \expect{X \cdot 1(Y = y)} \\ diff --git a/ia/probability/11_branching_processes.tex b/ia/probability/11_branching_processes.tex index e2bcb83..a7ed6ff 100644 --- a/ia/probability/11_branching_processes.tex +++ b/ia/probability/11_branching_processes.tex @@ -30,8 +30,8 @@ \subsection{Expectation of generation size} & = \expect{Y_{1,n} + \cdots + Y_{m,n} \mid X_n = m} \\ & = m \expect{Y_{1,n}} \\ & = m \expect{X_1} \\ - \therefore \expect{X_{n+1} \mid X_n} & = X_n \cdot \expect{X_1} \\ - \therefore \expect{X_{n+1}} & = \expect{X_n \cdot \expect{X_1}} \\ + \therefore\ \expect{X_{n+1} \mid X_n} & = X_n \cdot \expect{X_1} \\ + \therefore\ \expect{X_{n+1}} & = \expect{X_n \cdot \expect{X_1}} \\ & = \expect{X_n} \cdot \expect{X_1} \end{align*} \end{proof} @@ -51,7 +51,7 @@ \subsection{Probability generating functions} \expect{z^{X_{n+1}} \mid X_n = m} & = \expect{z^{Y_{1, n} + \dots + Y_{m,n}} \mid X_n = m} \\ & = \expect{z^{X_1}}^m \\ & = G(z)^m \\ - \therefore \expect{\expect{z^{X_{n+1}} \mid X_n}} & = \expect{G(z)^{X_n}} \\ + \therefore\ \expect{\expect{z^{X_{n+1}} \mid X_n}} & = \expect{G(z)^{X_n}} \\ & = G_n(G(z)) \end{align*} \end{proof} diff --git a/ia/probability/13_multivariate_density_functions.tex b/ia/probability/13_multivariate_density_functions.tex index 24ab7a9..4c51771 100644 --- a/ia/probability/13_multivariate_density_functions.tex +++ b/ia/probability/13_multivariate_density_functions.tex @@ -241,7 +241,7 @@ \subsection{Order statistics of a random sample} \cdot \prob{X_1 \leq x_1, \dots, X_n \leq x_n, X_1 < \dots < X_n} \\ & = n! \int_{-\infty}^{x_1} \int_{u_1}^{x_2} \cdots \int_{u_{n-1}}^{x_n} f(u_1) \cdots f(u_n) \dd{u_1} \cdots \dd{u_n} \\ - \therefore f_{Y_1, \dots, Y_n}(x_1, \dots, x_n) & = n! + \therefore\ f_{Y_1, \dots, Y_n}(x_1, \dots, x_n) & = n! f(x_1) \cdots f(x_n) \end{align*} when \(x_1 < x_2 < \dots < x_n\), and the joint density is zero otherwise. diff --git a/ia/probability/17_simulation_of_random_variables.tex b/ia/probability/17_simulation_of_random_variables.tex index 490021c..201fb5e 100644 --- a/ia/probability/17_simulation_of_random_variables.tex +++ b/ia/probability/17_simulation_of_random_variables.tex @@ -84,5 +84,5 @@ \subsection{Rejection sampling} & = \frac{1}{\lambda} \int_B f(x) \dd{x} \\ \abs{A} & = \frac{1}{\lambda} \int_{[0,1]^{d-1}} f(x) \dd{x} \\ & = \frac{1}{\lambda} \\ - \therefore \prob{X \in B} & = \int_B f(x) \dd{x} + \therefore\ \prob{X \in B} & = \int_B f(x) \dd{x} \end{align*} diff --git a/ia/vc/08_maxwell_s_equations.tex b/ia/vc/08_maxwell_s_equations.tex index 37904e2..e94eddb 100644 --- a/ia/vc/08_maxwell_s_equations.tex +++ b/ia/vc/08_maxwell_s_equations.tex @@ -74,7 +74,7 @@ \subsection{Electromagnetic waves} & = \frac{1}{c^2} \pdv[2]{\vb E}{t} \end{align*} \[ - \therefore \laplacian \vb E - \frac{1}{c^2} \pdv[2]{\vb E}{t} = \vb 0 + \therefore\ \laplacian \vb E - \frac{1}{c^2} \pdv[2]{\vb E}{t} = \vb 0 \] which is the wave equation for waves travelling at speed \(c\). Hence, in a vacuum, the electric field propagates at speed \(c\). @@ -87,7 +87,7 @@ \subsection{Electromagnetic waves} & = \frac{1}{c^2} \pdv[2]{\vb B}{t} \end{align*} \[ - \therefore \laplacian \vb B - \frac{1}{c^2} \pdv[2]{\vb B}{t} = \vb 0 + \therefore\ \laplacian \vb B - \frac{1}{c^2} \pdv[2]{\vb B}{t} = \vb 0 \] Hence the magnetic field also propagates at speed \(c\). So in general, we can say that electromagnetic waves always travel at speed \(c\) in a vacuum. diff --git a/ia/vm/01_complex_numbers.tex b/ia/vm/01_complex_numbers.tex index 7b0348b..6d3dd78 100644 --- a/ia/vm/01_complex_numbers.tex +++ b/ia/vm/01_complex_numbers.tex @@ -43,7 +43,7 @@ \subsection{Definition and basic theorems} \begin{align*} \abs{z_2 - z_1} & \geq \abs{z_2} - \abs{z_1} \\ \text{or } \abs{z_2 - z_1} & \geq \abs{z_1} - \abs{z_2} \\ - \therefore \abs{z_2 - z_1} & \geq \abs{\abs{z_2} - \abs{z_1}} \\ + \therefore\ \abs{z_2 - z_1} & \geq \abs{\abs{z_2} - \abs{z_1}} \\ \end{align*} De Moivre's Theorem states that diff --git a/ia/vm/05_vectors_in_real_euclidean_space.tex b/ia/vm/05_vectors_in_real_euclidean_space.tex index 72d90c7..55d1ef3 100644 --- a/ia/vm/05_vectors_in_real_euclidean_space.tex +++ b/ia/vm/05_vectors_in_real_euclidean_space.tex @@ -103,7 +103,7 @@ \subsection{Inner product spaces} Then by the Cauchy--Schwarz inequality, we have \begin{align*} \abs{\langle f, g \rangle} & \leq \norm{f} \cdot \norm{g} \\ - \therefore \abs{\int_0^1 f(x)g(x)\dd{x}} & \leq \sqrt{\int_0^1 f(x)^2 \dd{x}}\sqrt{\int_0^1 g(x)^2 \dd{x}} + \therefore\ \abs{\int_0^1 f(x)g(x)\dd{x}} & \leq \sqrt{\int_0^1 f(x)^2 \dd{x}}\sqrt{\int_0^1 g(x)^2 \dd{x}} \end{align*} \begin{lemma} @@ -114,7 +114,7 @@ \subsection{Inner product spaces} \begin{align*} \left\langle \vb v_j, \sum_i \alpha_i \vb v_i \right\rangle & = 0 \\ \intertext{And because each vector that is not \(\vb v_j\) is orthogonal to it, those terms cancel, leaving} - \therefore \left\langle \vb v_j, \alpha_j \vb v_j \right\rangle & = 0 \\ + \therefore\ \left\langle \vb v_j, \alpha_j \vb v_j \right\rangle & = 0 \\ \alpha_j \left\langle \vb v_j, \vb v_j \right\rangle & = 0 \\ \alpha_j = 0 \end{align*} diff --git a/ia/vm/08_linear_maps.tex b/ia/vm/08_linear_maps.tex index f56056a..390ffc2 100644 --- a/ia/vm/08_linear_maps.tex +++ b/ia/vm/08_linear_maps.tex @@ -337,7 +337,7 @@ \subsection{Matrices} Therefore: \begin{align*} x_i' & = x_i + \lambda b_j x_j a_i = S_{ij}x_j \\ - \therefore S_{ij} & = \delta_{ij} + \lambda a_i b_j + \therefore\ S_{ij} & = \delta_{ij} + \lambda a_i b_j \end{align*} For example in \(\mathbb R^2\) with \(\vb a = \begin{pmatrix} 1 \\ 0 @@ -353,7 +353,7 @@ \subsection{Matrices} \begin{align*} \vb x' & = R\vb x = (\cos \theta)\vb x + (1 - \cos \theta)(\nhat \cdot \vb x)\nhat + (\sin \theta)(\nhat \times \vb x) \\ x_i' & = (\cos \theta)x_i + (1 - \cos \theta)n_j x_j n_i - (\sin \theta) \varepsilon_{ijk}x_j n_k = R_{ij} x_j \\ - \therefore R_{ij} & = \delta_{ij}(\cos \theta) - (1 - \cos \theta)n_i n_j - (\sin \theta)\varepsilon_{ijk} n_k + \therefore\ R_{ij} & = \delta_{ij}(\cos \theta) - (1 - \cos \theta)n_i n_j - (\sin \theta)\varepsilon_{ijk} n_k \end{align*} \end{enumerate} \end{example} diff --git a/ia/vm/09_transpose_and_hermitian_conjugate.tex b/ia/vm/09_transpose_and_hermitian_conjugate.tex index 4d00216..61cfeeb 100644 --- a/ia/vm/09_transpose_and_hermitian_conjugate.tex +++ b/ia/vm/09_transpose_and_hermitian_conjugate.tex @@ -160,7 +160,7 @@ \subsection{Orthogonal matrices} \] because \[ - H_{ij} = \delta_{ij} - 2n_i n_j \therefore H = \begin{pmatrix} + H_{ij} = \delta_{ij} - 2n_i n_j \therefore\ H = \begin{pmatrix} 1 - 2 \sin^2 \frac{\theta}{2} & 2\sin\frac{\theta}{2}\cos\frac{\theta}{2} \\ 2\sin\frac{\theta}{2} \cos\frac{\theta}{2} & 1-2\cos^2\frac{\theta}{2} \end{pmatrix} diff --git a/ia/vm/20_symmetries_and_transformation_groups.tex b/ia/vm/20_symmetries_and_transformation_groups.tex index 6bec389..938ddac 100644 --- a/ia/vm/20_symmetries_and_transformation_groups.tex +++ b/ia/vm/20_symmetries_and_transformation_groups.tex @@ -28,7 +28,7 @@ \subsection{2D Minkowski space} \end{pmatrix} \] \[ - \therefore \left( \begin{pmatrix} + \therefore\ \left( \begin{pmatrix} x_0 \\ x_1 \end{pmatrix}, \begin{pmatrix} y_0 \\ y_1 diff --git a/ib/antop/03_metric_spaces.tex b/ib/antop/03_metric_spaces.tex index 7019698..977213b 100644 --- a/ib/antop/03_metric_spaces.tex +++ b/ib/antop/03_metric_spaces.tex @@ -326,10 +326,15 @@ \subsection{Continuity} \end{proposition} \begin{proof} Let \( \varepsilon > 0 \). - We want to find \( \delta > 0 \) such that \( \forall x \in M \), \( d(x,a) < \delta \) implies \( d''(g(f(x)), g(f(a))) < \varepsilon \). - Since \( g \) is continuous at \( f(a) \), there exists \( \eta > 0 \) such that \( \forall y \in M' \), \( d'(y,f(a)) < \eta \implies d''(g(y), g(f(a))) < \varepsilon \). - Now, since \( f \) is continuous at \( a \), for this \( \eta \) there exists \( \delta \) such that for all \( x \in M \), \( d(x,a) < \delta \implies d'(f(x) - f(a)) < \eta \). - Then \( d(x,a) < \delta \implies d''(g(f(x)), g(f(a))) < \varepsilon \) as required. + We want to find \( \delta > 0 \) such that \( \forall x \in M \), + \[ d(x,a) < \delta \implies d''(g(f(x)), g(f(a))) < \varepsilon \] + Since \( g \) is continuous at \( f(a) \), there exists \( \eta > 0 \) such that \( \forall y \in M' \), + \[ d'(y,f(a)) < \eta \implies d''(g(y), g(f(a))) < \varepsilon \] + Now, since \( f \) is continuous at \( a \), for this \( \eta \) there exists \( \delta \) such that for all \( x \in M \), + \[ d(x,a) < \delta \implies d'(f(x) - f(a)) < \eta \] + Then + \[ d(x,a) < \delta \implies d''(g(f(x)), g(f(a))) < \varepsilon \] + as required. \end{proof} \begin{example} @@ -362,9 +367,9 @@ \subsection{Isometric, Lipschitz, and uniformly continuous functions} Let \( f \colon M \to M' \) be a function between metric spaces. Then, \( f \) is \begin{enumerate} - \item \textit{isometric}, if \( \forall x,y \in M, d'(f(x),f(y)) = d(x,y) \) - \item \textit{Lipschitz}, or \( c \)-Lipschitz, if \( \exists c \in \mathbb R^+, \forall x,y \in M, d'(f(x),f(y)) \leq c\cdot d(x,y) \) - \item \textit{uniformly continuous}, if \( \forall \varepsilon > 0, \exists \delta > 0, \forall x,y \in M, d(x,y) < \delta \implies d'(f(x), f(y)) < \varepsilon \) + \item \textit{isometric}, if \[ \forall x,y \in M, d'(f(x),f(y)) = d(x,y) \] + \item \textit{Lipschitz}, or \( c \)-Lipschitz, if \[ \exists c \in \mathbb R^+, \forall x,y \in M, d'(f(x),f(y)) \leq c\cdot d(x,y) \] + \item \textit{uniformly continuous}, if \[ \forall \varepsilon > 0, \exists \delta > 0, \forall x,y \in M, d(x,y) < \delta \implies d'(f(x), f(y)) < \varepsilon \] \end{enumerate} \end{definition} \begin{remark} diff --git a/ib/antop/05_completeness.tex b/ib/antop/05_completeness.tex index 77b3f86..9ff867d 100644 --- a/ib/antop/05_completeness.tex +++ b/ib/antop/05_completeness.tex @@ -38,8 +38,8 @@ \subsection{Cauchy sequences} \end{proof} \begin{remark} Boundedness does not imply the sequence is Cauchy. - For instance, consider \( 0,1,0,1,\dots \) in \( \mathbb R \). - If a sequence is Cauchy, it is not necessary convergent in an arbitrary metric space (not \( \mathbb R, \mathbb C \)). + For instance, consider the sequence \( 0,1,0,1,\dots \) in \( \mathbb R \). + If a sequence is Cauchy, it is not necessarily convergent in an arbitrary metric space (not \( \mathbb R, \mathbb C \)). For instance, consider \( x_n = \frac{1}{n} \) in \( (0, \infty) \). This is certainly not convergent, since the limit cannot be zero. \end{remark} @@ -154,9 +154,11 @@ \subsection{Completeness of subspaces and function spaces} \exists \delta > 0, \forall x \in M, d(x,a) < \delta \implies \abs{f_n(x) - f_n(a)} < \varepsilon \] Hence, \( \forall x \in M \), if \( d(x,a) < \delta \) we have - \[ - \abs{f(x) - f(a)} \leq \abs{f(x) - f_n(x)} + \abs{f_n(x) - f_n(a)} + \abs{f_n(a) - f(a)} \leq 2 D(f_n,f) + \abs{f_n(x) - f_n(a)} < 3\varepsilon - \] + \begin{align*} + \abs{f(x) - f(a)} &\leq \abs{f(x) - f_n(x)} + \abs{f_n(x) - f_n(a)} + \abs{f_n(a) - f(a)} \\ + &\leq 2 D(f_n,f) + \abs{f_n(x) - f_n(a)} \\ + &< 3\varepsilon + \end{align*} \end{proof} \begin{corollary} Consider \( C[a,b] \), the space of continuous functions on \( [a,b] \). @@ -235,7 +237,8 @@ \subsection{Completeness of subspaces and function spaces} Let \( a \in M \), \( \varepsilon > 0 \). Then, since \( f_k \to f \) in \( \ell_\infty(M,N) \), we can fix \( k \in \mathbb N \) such that \( D(f_k,f) < \varepsilon \). Since \( f_k \) is continuous, \( \exists \delta > 0, \forall x \in M, d(x,a) < \delta \implies e(f_k(x), f_k(a)) < \varepsilon \). - \[ - \forall x \in M, f(x,a) < \delta \implies e(f(x),f(a)) \leq e(f(x), f_k(x)) + e(f_k(x), f_k(a)) + e(f_k(a), f(a)) \leq 3\varepsilon - \] + \begin{align*} + \forall x \in M, f(x,a) < \delta \implies e(f(x),f(a)) &\leq e(f(x), f_k(x)) + e(f_k(x), f_k(a)) + e(f_k(a), f(a)) \\ + &\leq 3\varepsilon + \end{align*} \end{proof} diff --git a/ib/antop/07_topology.tex b/ib/antop/07_topology.tex index 0612960..f836869 100644 --- a/ib/antop/07_topology.tex +++ b/ib/antop/07_topology.tex @@ -448,9 +448,12 @@ \subsection{Quotients} Let \( x \in X, t \in X/R \). Then \( x \in t \) if and only if \( t = q(x) \). For \( V \subset X/R \), - \[ - q^{-1}(V) = \qty{x \in X \colon q(x) \in V} = \qty{x \in X \colon \exists t \in V, t = q(x)} = \qty{x \in X \colon \exists t \in V, x \in t} = \bigcup_{t \in V} t - \] + \begin{align*} + q^{-1}(V) &= \qty{x \in X \colon q(x) \in V} \\ + &= \qty{x \in X \colon \exists t \in V, t = q(x)} \\ + &= \qty{x \in X \colon \exists t \in V, x \in t} \\ + &= \bigcup_{t \in V} t + \end{align*} \end{remark} \begin{example} Consider \( \mathbb R \), an abelian group under addition, and the subgroup \( \mathbb Z \). diff --git a/ib/antop/08_connectedness.tex b/ib/antop/08_connectedness.tex index d301321..b2a5686 100644 --- a/ib/antop/08_connectedness.tex +++ b/ib/antop/08_connectedness.tex @@ -160,13 +160,16 @@ \subsection{Consequences of definition} Any quotient of a connected topological space is connected. \end{corollary} \begin{example} - Let \( Y = \qty{\qty(x, \sin\frac{1}{x}) \colon x > 0} \subset \mathbb R^2 \). + Let + \[ Y = \qty{\qty(x, \sin\frac{1}{x}) \colon x > 0} \subset \mathbb R^2 \] This space is connected; the function \( f \colon (0, \infty) \to \mathbb R^2 \) defined by \( f(x) = \qty(x,\sin \frac{1}{x}) \) is continuous. So we have that \( Y = \Im f \) is connected. Hence, \( \overline Y \) is connected. - We claim that \( Z \equiv Y \cup \qty{(0,y) \colon y \in [-1,1]} = \overline Y \). + We claim that + \[ Z \equiv Y \cup \qty{(0,y) \colon y \in [-1,1]} = \overline Y \] Indeed, given \( y \in [-1,1] \), for all \( n \in \mathbb N \) we have that \( (0, \frac{1}{n}) \) is mapped to \( (n,\infty) \) by \( x \to \frac{1}{x} \), so by the intermediate value theorem there exists \( x_n \in \qty(0, \frac{1}{n}) \) such that \( \sin \frac{1}{x_n} = y \). - Hence, \( \qty(x_n, \sin \frac{1}{x_n}) = (x_n, y) \to (0,y) \in \overline Y \). + Hence, + \[ \qty(x_n, \sin \frac{1}{x_n}) = (x_n, y) \to (0,y) \in \overline Y \] So \( Y \subset Z \subset \overline Y \). If we can show \( Z \) is closed, \( Z = \overline Y \) since \( \overline Y \) is the smallest closed superset of \( Y \). Suppose \( (x_n, y_n) \in Z \) for all \( n \in \mathbb N \), and \( (x_n, y_n) \to (x,y) \) in \( \mathbb R^2 \). @@ -174,7 +177,8 @@ \subsection{Consequences of definition} If \( x = 0 \), we have \( (x,y) \in Z \). If \( x \neq 0 \), then \( x_n \to x \) implies \( x_n \neq 0 \) for all sufficiently large \( n \). Hence \( y_n = \sin \frac{1}{x_n} \) for all sufficiently large \( n \). - Hence \( (x_n, y_n) \to \qty(x, \sin \frac{1}{x}) \in Z \). + Thus + \[ (x_n, y_n) \to \qty(x, \sin \frac{1}{x}) \in Z \] \end{example} \begin{lemma} Let \( X \) be a topological space and \( \mathcal A \) be a family of connected subsets of \( X \). diff --git a/ib/antop/11_partial_derivatives.tex b/ib/antop/11_partial_derivatives.tex index 619800a..5db0985 100644 --- a/ib/antop/11_partial_derivatives.tex +++ b/ib/antop/11_partial_derivatives.tex @@ -150,9 +150,13 @@ \subsection{Mean value inequality} \] By the mean value theorem, there exists \( \theta \in (0,1) \) such that \( \phi(1) - \phi(0) = \phi'(\theta) \). Then, by the Cauchy--Schwarz inequality, - \[ - \norm{f(b) - f(a)}^2 = \phi'(\theta) = \inner{f'(a+\theta u)(u), v} \leq \norm{f'(a+\theta u)(u)} \cdot \norm{v} \leq \norm{f'(a+\theta u)} \cdot \norm{u} \cdot \norm{v} \leq M \norm{b-a} \cdot \norm{v} - \] + \begin{align*} + \norm{f(b) - f(a)}^2 &= \phi'(\theta) \\ + &= \inner{f'(a+\theta u)(u), v} \\ + &\leq \norm{f'(a+\theta u)(u)} \cdot \norm{v} \\ + &\leq \norm{f'(a+\theta u)} \cdot \norm{u} \cdot \norm{v} \\ + &\leq M \norm{b-a} \cdot \norm{v} + \end{align*} Hence, \[ \norm{f(b) - f(a)} \leq M \norm{b-a} diff --git a/ib/ca/02_integration.tex b/ib/ca/02_integration.tex index f153a0f..77493e8 100644 --- a/ib/ca/02_integration.tex +++ b/ib/ca/02_integration.tex @@ -202,7 +202,7 @@ \subsection{Fundamental theorem of calculus} \abs{\frac{F(w+h) - F(w)}{h} - f(w)} & = \frac{1}{\abs{h}} \abs{\int_{\delta_h} (f(z) - f(w)) \dd{z}} \\ & \leq \frac{1}{\abs{h}} \mathrm{length}(\delta_h) \sup_{z \in \Im \delta_h} \abs{f(z) - f(w)} \\ & = \sup_{z \in \Im \delta_h} \abs{f(z) - f(w)} \\ - \therefore \lim_{h \to 0} \abs{\frac{F(w+h) - F(w)}{h} - f(w)} & = \lim_{h \to 0} \sup_{z \in \Im \delta_h} \abs{f(z) - f(w)} = 0 + \therefore\ \lim_{h \to 0} \abs{\frac{F(w+h) - F(w)}{h} - f(w)} & = \lim_{h \to 0} \sup_{z \in \Im \delta_h} \abs{f(z) - f(w)} = 0 \end{align*} Thus, \( F \) is differentiable at \( w \) with \( F'(w) = f(w) \). \end{proof} @@ -288,7 +288,7 @@ \subsection{Star-shaped domains} & \leq \varepsilon \qty(\sup_{z \in \partial T_n} \abs{z - z_0}) \mathrm{length}(\partial T_n) \\ & \leq \varepsilon \cdot \mathrm{length}(\partial T_n)^2 \\ & = \frac{\varepsilon}{4^n} \mathrm{length}(\partial T_0)^2 \\ - \therefore \abs{\eta(T_0)} & \leq \varepsilon \cdot \mathrm{length}(\partial T_0)^2 + \therefore\ \abs{\eta(T_0)} & \leq \varepsilon \cdot \mathrm{length}(\partial T_0)^2 \end{align*} \( \varepsilon \) was arbitrary, hence \( \eta(T_0) \) must be zero. \end{proof} diff --git a/ib/linalg/01_vector_spaces_and_linear_dependence.tex b/ib/linalg/01_vector_spaces_and_linear_dependence.tex index 8b57857..843a6cb 100644 --- a/ib/linalg/01_vector_spaces_and_linear_dependence.tex +++ b/ib/linalg/01_vector_spaces_and_linear_dependence.tex @@ -258,9 +258,7 @@ \subsection{Bases} v = \sum_{i=1}^n \lambda_i v_i, \forall i, \lambda_i \in F \] \end{lemma} -\begin{remark} - In the above definition, we call \( (\lambda_1, \dots, \lambda_n) \) the \textit{coordinates} of \( v \) in the basis \( (v_1, \dots, v_n) \). -\end{remark} +In the above definition, we call \( (\lambda_1, \dots, \lambda_n) \) the \textit{coordinates} of \( v \) in the basis \( (v_1, \dots, v_n) \). \begin{proof} Suppose \( (v_1, \dots, v_n) \) is a basis of \( V \). Then \( \forall v \in V \) there exists \( \lambda_1, \dots, \lambda_n \in F \) such that @@ -350,7 +348,8 @@ \subsection{Dimensionality of sums} \end{proposition} \begin{proof} Consider a basis \( (v_1, \dots, v_n) \) of the intersection. - Extend this basis to a basis \( (v_1, \dots, v_n, u_1, \dots, u_m) \) of \( U \) and \( (v_1, \dots, v_n, w_1, \dots, w_k) \) of \( W \). + Extend this basis to a basis + \[ (v_1, \dots, v_n, u_1, \dots, u_m) \text{ of } U;\quad (v_1, \dots, v_n, w_1, \dots, w_k) \text{ of } W \] Then, we will show that \( (v_1, \dots, v_n, u_1, \dots, u_m, w_1, \dots, w_k) \) is a basis of \( \dim_F (U + W) \), which will conclude the proof. Indeed, since any component of \( U + W \) can be decomposed as a sum of some element of \( U \) and some element of \( W \), we can add their decompositions together. Now we must show that this new basis is free. @@ -371,7 +370,8 @@ \subsection{Dimensionality of sums} \end{proposition} \begin{proof} Let \( (u_1, \dots, u_\ell) \) be a basis of \( U \). - We extend this basis to a basis of \( V \): \( (u_1, \dots, u_\ell, w_{\ell + 1}, \dots, w_n) \). + We extend this basis to a basis of \( V \), giving + \[ (u_1, \dots, u_\ell, w_{\ell + 1}, \dots, w_n) \] We claim that \( (w_{\ell + 1} + U, \dots, w_n + U) \) is a basis of the vector space \( V / U \). % exercise. \end{proof} diff --git a/ib/linalg/02_linear_maps.tex b/ib/linalg/02_linear_maps.tex index 74cedb6..ded6809 100644 --- a/ib/linalg/02_linear_maps.tex +++ b/ib/linalg/02_linear_maps.tex @@ -429,8 +429,8 @@ \subsection{Change of basis} & = Q [\alpha]_{B',C'} [v]_{B'} \\ [\alpha(v)]_C & = [\alpha]_{B,C} [v]_B \\ & = AP[v]_{B'} \\ - \therefore \forall v,\ QA[v]_{B'} & = AP[v]_{B'} \\ - \therefore QA & = AP + \therefore\ \forall v,\ QA[v]_{B'} & = AP[v]_{B'} \\ + \therefore\ QA & = AP \end{align*} as required. \end{proof} diff --git a/ib/linalg/03_dual_spaces.tex b/ib/linalg/03_dual_spaces.tex index 51ded42..65608a7 100644 --- a/ib/linalg/03_dual_spaces.tex +++ b/ib/linalg/03_dual_spaces.tex @@ -40,7 +40,7 @@ \subsection{Dual spaces} For all \( i \), \begin{align*} \sum_{j=1}^n \lambda_j \varepsilon_j & = 0 \\ - \therefore \qty( \sum_{j=1}^n \lambda_j \varepsilon_j ) e_i & = 0 \\ + \therefore\ \qty( \sum_{j=1}^n \lambda_j \varepsilon_j ) e_i & = 0 \\ \sum_{j=1}^n \lambda_j \varepsilon_j(e_i) & = 0 \\ \lambda_i & = 0 \end{align*} diff --git a/ib/markov/02_elementary_properties.tex b/ib/markov/02_elementary_properties.tex index 19654b1..a25f400 100644 --- a/ib/markov/02_elementary_properties.tex +++ b/ib/markov/02_elementary_properties.tex @@ -415,9 +415,11 @@ \subsection{Strong Markov property} h_2 = \psub{2}{T_0 < \infty, T_1 < \infty} = \psub{2}{T_0 < \infty \mid T_1 < \infty} \psub{2}{T_2 < \infty} \] Note that - \[ - \psub{2}{T_0 < \infty \mid T_1 < \infty} = \psub{2}{T_1 + \widetilde T_0 < \infty \mid T_1 < \infty} = \psub{2}{\widetilde T_0 < \infty \mid T_1 < \infty} = \psub{1}{T_0 < \infty} - \] + \begin{align*} + \psub{2}{T_0 < \infty \mid T_1 < \infty} &= \psub{2}{T_1 + \widetilde T_0 < \infty \mid T_1 < \infty} \\ + &= \psub{2}{\widetilde T_0 < \infty \mid T_1 < \infty} \\ + &= \psub{1}{T_0 < \infty} + \end{align*} But \( \psub{2}{T_1 < \infty} = \psub{1}{T_0 < \infty} \), so \[ h_2 = \psub{2}{T_1 < \infty} \psub{1}{T_0 < \infty} diff --git a/ib/methods/02_sturm_liouville_theory.tex b/ib/methods/02_sturm_liouville_theory.tex index 1aba43b..18c1b25 100644 --- a/ib/methods/02_sturm_liouville_theory.tex +++ b/ib/methods/02_sturm_liouville_theory.tex @@ -141,7 +141,7 @@ \subsection{Real eigenvalues} We can in fact show that (for a second-order equation) it is always possible to take linear combinations of eigenfunctions such that the result is linear, for example in the exponential form of the Fourier series. Hence, we can assume that \( y_n \) is real. We can further prove that the regular Sturm--Liouville problem must have simple (non-degenerate) eigenvalues \( \lambda_n \), by considering two possible eigenfunctions \( u, v \) for the same \( \lambda \), and use the expression for self-adjointness. - We find \( u \mathcal L v - (\mathcal L u) v = [-p(uv' - u'v)]' \) which contains the Wronskian. + We find \( u \mathcal L v - (\mathcal L u) v = [-p(uv' - u'v)]' \) which contains the Wro\'nskian. We can integrate and impose homogeneous boundary conditions to get the required result. % exercise \end{proof} diff --git a/ib/methods/04_green_s_functions.tex b/ib/methods/04_green_s_functions.tex index 3fab445..c8f8f36 100644 --- a/ib/methods/04_green_s_functions.tex +++ b/ib/methods/04_green_s_functions.tex @@ -179,8 +179,8 @@ \subsection{Motivation for Green's functions} \begin{align*} 0 & = T (\sin \theta_1 + \sin \theta_2) - \delta m g \\ & = T\qty(\frac{-y_i}{\xi_i} + \frac{-y_i}{1-\xi_i}) - \delta m g \\ - \therefore -T\qty(y_i(1-\xi_i) + y_i \xi_i) & = \delta m g \xi_i(1-\xi_i) \\ - \therefore y_i(\xi_i) & = \frac{-\delta m g}{T} \xi_i (1-\xi_i) + \therefore\ -T\qty(y_i(1-\xi_i) + y_i \xi_i) & = \delta m g \xi_i(1-\xi_i) \\ + \therefore\ y_i(\xi_i) & = \frac{-\delta m g}{T} \xi_i (1-\xi_i) \end{align*} So the solution is \[ @@ -339,7 +339,7 @@ \subsection{Solving boundary value problems} -D\qty[\sinh((1-\xi) + \xi)] & = \sinh \xi \\ -D\sinh 1 & = \sinh \xi \\ D & = \frac{\sinh \xi}{\sinh 1} \\ - \therefore C & = \frac{-\sinh(1-\xi)}{\sinh 1} + \therefore\ C & = \frac{-\sinh(1-\xi)}{\sinh 1} \end{align*} Therefore, \[ diff --git a/ib/quantum/02_wavefunctions.tex b/ib/quantum/02_wavefunctions.tex index 698a3e8..7e6f633 100644 --- a/ib/quantum/02_wavefunctions.tex +++ b/ib/quantum/02_wavefunctions.tex @@ -92,7 +92,7 @@ \subsection{Inner product} \begin{align*} \int_{\mathbb R^3} \abs{\psi}^2 \dd{V} & \leq N_1; \\ \int_{\mathbb R^3} \abs{\phi}^2 \dd{V} & \leq N_2; \\ - \therefore \int_{\mathbb R^3} \abs{\psi \phi} \dd{V} & \leq \sqrt{\int_{\mathbb R^3} \abs{\psi}^2 \dd{V} \cdot \int_{\mathbb R^3} \abs{\phi}^2 \dd{V}} < \infty \\ + \therefore\ \int_{\mathbb R^3} \abs{\psi \phi} \dd{V} & \leq \sqrt{\int_{\mathbb R^3} \abs{\psi}^2 \dd{V} \cdot \int_{\mathbb R^3} \abs{\phi}^2 \dd{V}} < \infty \\ \end{align*} \subsection{Normalisation} @@ -175,7 +175,7 @@ \subsection{Normalisation and time evolution} \begin{align*} \pdv{\psi}{t} & = \frac{i \hbar}{2m} \laplacian \psi^2 + \frac{i}{k} U \psi; \\ \pdv{\psi^\star}{t} & = - \frac{i \hbar}{2m} \laplacian \psi^2 - \frac{i}{k} U \psi^\star \\ - \therefore \pdv{\abs{\psi}^2}{t} & = \div[\frac{i\hbar}{2m}\qty(\psi^\star \grad{\psi} - \psi \grad{\psi^\star})] + \therefore\ \pdv{\abs{\psi}^2}{t} & = \div[\frac{i\hbar}{2m}\qty(\psi^\star \grad{\psi} - \psi \grad{\psi^\star})] \end{align*} Finally, \[ diff --git a/ib/quantum/04_one_dimensional_solutions_to_the_schr_odinger_equation.tex b/ib/quantum/04_one_dimensional_solutions_to_the_schr_odinger_equation.tex index 4eba3a6..0912e36 100644 --- a/ib/quantum/04_one_dimensional_solutions_to_the_schr_odinger_equation.tex +++ b/ib/quantum/04_one_dimensional_solutions_to_the_schr_odinger_equation.tex @@ -213,9 +213,10 @@ \subsection{Gaussian wavepacket} \] produces a solution called the Gaussian wavepacket. Substituting into the above, -\[ - \psi_{\text{GP}}(x,t) = \int_0^\infty \exp[-\frac{\sigma}{2}(k-k_0)^2] \psi_k(x,t) \dd{k} = \int_0^\infty \exp[F(k)] \dd{k};\quad F(k) = -\frac{\sigma}{2}(k-k_0)^2 + ikx - i \frac{\hbar k^2}{2m} t -\] +\begin{align*} + \psi_{\text{GP}}(x,t) &= \int_0^\infty \exp[-\frac{\sigma}{2}(k-k_0)^2] \psi_k(x,t) \dd{k} = \int_0^\infty \exp[F(k)] \dd{k} \\ + F(k) &= -\frac{\sigma}{2}(k-k_0)^2 + ikx - i \frac{\hbar k^2}{2m} t +\end{align*} We can rewrite this as \[ F(k) = -\frac{1}{2}\qty(\sigma + \frac{i \hbar t}{m}) k^2 + (k_0 \sigma + ix)k - \frac{\sigma}{2} k_0^2 @@ -313,7 +314,7 @@ \subsection{Scattering states} \] By definition, \( R + T = 1 \). \end{definition} -In practice, working with Gaussian packets is mathematically challenging (but not impossible). +In practice, working with Gaussian packets is mathematically challenging, although not impossible. The beam interpretation, by allowing us to use non-normalisable stationary state wavefunctions, greatly simplifies the computation. \subsection{Scattering off potential step} diff --git a/ib/quantum/05_operators_and_measurements.tex b/ib/quantum/05_operators_and_measurements.tex index 51ff43f..10a9474 100644 --- a/ib/quantum/05_operators_and_measurements.tex +++ b/ib/quantum/05_operators_and_measurements.tex @@ -158,7 +158,7 @@ \subsection{Commutators} \begin{align*} \hat x \hat p \psi & = x \qty(-i\hbar \pdv{x}) \psi(x) = -i\hbar x \pdv{\psi}{x} \\ \hat p \hat x \psi & = \qty(-i\hbar \pdv{x}) x \psi(x) = -i \hbar \psi - i \hbar x \pdv{\psi}{x} \\ - \therefore \qty[\hat x, \hat p] \psi & = i \hbar \psi + \therefore\ \qty[\hat x, \hat p] \psi & = i \hbar \psi \end{align*} Hence, \[ @@ -340,7 +340,7 @@ \subsection{Generalised uncertainty theorem} \qty(\Delta_\psi \hat A')^2 \qty(\Delta_\psi \hat B')^2 & \geq \abs{\inner{\psi, \frac{1}{2} \qty[\hat A', \hat B'] \psi} + \inner{\psi, \frac{1}{2} \qty{\hat A', \hat B'} \psi}}^2 \\ & = \frac{1}{4} \abs{\inner{\psi, \qty[\hat A', \hat B'] \psi}}^2 + \frac{1}{4} \abs{\inner{\psi, \qty{\hat A', \hat B'}\psi}}^2 \\ & \geq \frac{1}{4} \abs{\inner{\psi, \qty{\hat A', \hat B'} \psi}}^2 \\ - \therefore \qty(\Delta_\psi \hat A')^2 \qty(\Delta_\psi \hat B')^2 & \geq \frac{1}{4} \abs{\inner{\psi, \qty{\hat A, \hat B} \psi}}^2 + \therefore\ \qty(\Delta_\psi \hat A')^2 \qty(\Delta_\psi \hat B')^2 & \geq \frac{1}{4} \abs{\inner{\psi, \qty{\hat A, \hat B} \psi}}^2 \end{align*} \end{proof} diff --git a/ib/quantum/07_solution_to_hydrogen_atom.tex b/ib/quantum/07_solution_to_hydrogen_atom.tex index 20986ae..021b34b 100644 --- a/ib/quantum/07_solution_to_hydrogen_atom.tex +++ b/ib/quantum/07_solution_to_hydrogen_atom.tex @@ -133,7 +133,8 @@ \subsection{Angular momentum} Each component \( \hat L_i \) is a Hermitian operator. Note, \begin{align*} - \qty[\hat L_1, \hat L_2] \psi(x_1, x_2, x_3) & = -\hbar^2\qty[\qty(x_2 \pdv{x_3} - x_3 \pdv{x_2})\qty(x_3 \pdv{x_1} - x_1 \pdv{x_3}) - \qty(x_3 \pdv{x_1} - x_1 \pdv{x_3})\qty(x_2 \pdv{x_3} - x_3 \pdv{x_2})] \psi \\ + \qty[\hat L_1, \hat L_2] \psi(x_1, x_2, x_3) & = -\hbar^2\Bigg[\qty(x_2 \pdv{x_3} - x_3 \pdv{x_2})\qty(x_3 \pdv{x_1} - x_1 \pdv{x_3}) \\ + & - \qty(x_3 \pdv{x_1} - x_1 \pdv{x_3})\qty(x_2 \pdv{x_3} - x_3 \pdv{x_2})\Bigg] \psi \\ & = -\hbar^2 \qty[ x_2 \pdv{x_1} - x_1 \pdv{x_2} ] \psi \\ & = -i \hbar \hat L_3 \psi \end{align*} @@ -253,9 +254,10 @@ \subsection{Full solution to hydrogen atom} \hat L^2 (R(r) Y_{\ell,m}(\theta,\phi)) = R(r) \hbar^2 \ell (\ell+1)Y_{\ell,m}(\theta, \phi) \] Substituting into the TISE, we find -\[ - -\frac{\hbar^2}{2m_e} \qty( \pdv[2]{R}{r} + \frac{2}{r} \pdv{R}{r} ) Y_{\ell,m}(\theta, \phi) + \frac{\hbar^2}{2 m_e r^2} \ell(\ell+1) R(r)Y_{\ell,m}(\theta, \phi) - \frac{e^2}{4 \pi \varepsilon_0 r} R(r)Y_{\ell,m}(\theta,\phi) = E R(r)Y_{\ell,m}(\theta,\phi) -\] +\begin{align*} + & -\frac{\hbar^2}{2m_e} \qty( \pdv[2]{R}{r} + \frac{2}{r} \pdv{R}{r} ) Y_{\ell,m}(\theta, \phi) + \frac{\hbar^2}{2 m_e r^2} \ell(\ell+1) R(r)Y_{\ell,m}(\theta, \phi) - \frac{e^2}{4 \pi \varepsilon_0 r} R(r)Y_{\ell,m}(\theta,\phi) \\ + & = E R(r)Y_{\ell,m}(\theta,\phi) +\end{align*} Cancelling the spherical harmonic, \[ -\frac{\hbar^2}{2m_e} \qty( \pdv[2]{R}{r} + \frac{2}{r} \pdv{R}{r} ) + \underbrace{\qty(\frac{\hbar^2}{2 m_e r^2} \ell(\ell+1) - \frac{e^2}{4 \pi \varepsilon_0 r})}_{U_{\mathrm{eff}} = \text{ effective potential}} R(r) = E R(r) @@ -293,9 +295,10 @@ \subsection{Full solution to hydrogen atom} g(r) = r^\ell \sum_{n=0}^\infty a_n r^n \] We can evaluate the recurrence relation between the coefficients as before to find -\[ - \sum_{n=0}^\infty \qty[(n+\ell)(n+\ell - 1)a_n + 2(n+1)a_n - \ell(\ell+1)a_n - 2 \nu(n+\ell - 1) a_{n-1} + (\beta - 2\nu) a_{n-1}] r^{\ell + n - 2} = 0 -\] +\begin{align*} + \sum_{n=0}^\infty [&(n+\ell)(n+\ell - 1)a_n + 2(n+1)a_n - \ell(\ell+1)a_n \\ + &- 2 \nu(n+\ell - 1) a_{n-1} + (\beta - 2\nu) a_{n-1}] r^{\ell + n - 2} = 0 +\end{align*} which gives \[ a_n = \frac{2\nu(n+\ell) - \beta}{n(n+2\ell - 1)} diff --git a/ib/vp/05_noether_s_theorem.tex b/ib/vp/05_noether_s_theorem.tex index 5b72516..691c180 100644 --- a/ib/vp/05_noether_s_theorem.tex +++ b/ib/vp/05_noether_s_theorem.tex @@ -26,7 +26,7 @@ \subsection{Statement and proof} & = \eval{\pdv{f}{y_i} \dv{Y_i}{s}}_{s=0} + \eval{\pdv{f}{y_i'}\pdv{Y_i'}{s}}_{s=0} \\ & = \eval{\qty[\dv{x} \qty(\pdv{f}{y_i'})\dv{Y_i}{s} + \pdv{f}{y_i'}\dv{x}\qty(\dv{Y_i}{s})]}_{s=0} \\ & = \dv{x}\eval{\qty[\pdv{f}{y_i'}\pdv{Y_i}{s}]}_{s=0} \\ - \therefore \text{constant} & = \pdv{f}{y_i'}\pdv{Y_i}{s} + \therefore\ \text{constant} & = \pdv{f}{y_i'}\pdv{Y_i}{s} \end{align*} \end{proof} @@ -40,7 +40,7 @@ \subsection{Conservation of momentum} \begin{align*} Y = y + s & \implies Y' = y' \\ Z = z + s & \implies Z = z' \\ - \therefore V(Y-Z) = V(y-z) & \implies \dv{s} f = 0 + \therefore\ V(Y-Z) = V(y-z) & \implies \dv{s} f = 0 \end{align*} Then from Noether's theorem, \[