\documentclass[12pt]{article}
\usepackage{latexsym}
\usepackage{amssymb,amsmath}
\usepackage[pdftex]{graphicx}
\usepackage{listings}
\usepackage{courier}
\usepackage{color}
\usepackage[usenames,dvipsnames]{xcolor}
\usepackage{enumerate}
\usepackage{endnotes}
\usepackage{extpfeil}
\usepackage{stackrel}
\usepackage{bbm}
\usepackage{tikz}
\usepackage[margin=2cm]{geometry}
\usepackage{hyperref}
\hypersetup{colorlinks=true,urlcolor=MidnightBlue,citecolor=PineGreen,linkcolor=BrickRed}
\lstset{
basicstyle=\small\ttfamily,
keywordstyle=\color{blue},
language=python,
xleftmargin=16pt,
}
\newtheorem{thm}{Theorem}[section]
\newtheorem{ithm}{Theorem}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{defi}[thm]{Definition}
\newtheorem{example}[thm]{Example}
\newtheorem{exercise}[thm]{Exercise}
\newtheorem{rem}[thm]{Remark}
\def\B{{\mathbb B}}
\def\C{{\mathbb C}}
\def\D{{\mathbb D}}
\def\Fp{{\mathbb F}_p}
\def\F{{\mathbb F}}
\def\H{{\mathbb H}}
\def\M{{\mathbb M}}
\def\N{{\mathbb N}}
\def\O{{\mathcal O}}
\def\0{{\mathbb 0}}
\def\P{{{\mathbb P}}}
\def\Q{{\mathbb Q}}
\def\R{{\mathbb R}}
\def\T{{\mathbb T}}
\def\Z{{\mathbb Z}}
\newcommand{\sol}{_{a^p,b^p,c^p}}
\newcommand{\bound}{\partial}
\newcommand{\la}[1]{\mathfrak{#1}}
\newcommand{\im}{\text{Im} \hspace{0.1em} }
\newcommand{\ann}{\text{Ann} \hspace{0.1em} }
\newcommand{\rank}{\text{rank} \hspace{0.1em} }
\newcommand{\coker}[1]{\text{coker}\hspace{0.1em}{#1}}
\newcommand{\sgn}{\text{sgn}}
\newcommand{\lcm}{\text{lcm}}
\newcommand{\re}{\text{Re} \hspace{0.1em} }
\newcommand{\ext}[1]{\text{Ext}(#1)}
\newcommand{\Hom}[1]{\text{Hom}(#1)}
\newcommand{\End}[1]{\text{End(#1)}}
\newcommand{\bs}{\setminus}
\newcommand{\rpp}[1]{\mathbb{R}\text{P}^{#1}}
\newcommand{\cpp}[1]{\mathbb{C}\text{P}^{#1}}
\newcommand{\tr}{\text{tr}\hspace{0.1em} }
\newcommand{\inner}[1]{\langle {#1}\rangle}
\newcommand{\tensor}{\otimes}
\newcommand{\Cl}{\text{Cl}}
\renewcommand{\sp}[1]{\text{Sp}_{#1}}
\newcommand{\gl}[1]{\text{GL}_{#1}}
\newcommand{\pgl}[1]{\text{PGL}_{#1}}
\renewcommand{\sl}[1]{\text{SL}_{#1}}
\newcommand{\so}[1]{\text{SO}_{#1}}
\newcommand{\SO}{\text{SO}}
\newcommand{\pso}[1]{\text{PSO}_{#1}}
\renewcommand{\o}[1]{\text{O}_{#1}}
\renewcommand{\sp}[1]{\text{Sp}_{#1}}
\newcommand{\psp}[1]{\text{PSp}_{#1}}
\newcommand{\Span}{\rm Span}
\newcommand{\kron}[2]{\bigl(\frac{#1}{#2}\bigr)}
\newcommand{\leg}[2]{\Biggl(\frac{#1}{#2}\Biggr)}
\DeclareSymbolFont{bbold}{U}{bbold}{m}{n}
\DeclareSymbolFontAlphabet{\mathbbold}{bbold}
\begin{document}
\begin{center}
{\bf {\large{Some Linear Algebra Problems}}}\\
\smallskip
{ \bf {\large{SOLUTIONS}}} \\
Isabel Vogt\\
Last Edited: \today \\
\end{center}
Most of these problems were written for my students in Math 23a/b at Harvard in 2011/2012 and 2012/2013.
\begin{enumerate}
\item Consider a parallelogram spanned by vectors $\vec{{v}}$ and $\vec{{w}}$.
\begin{center}
\begin{tikzpicture}[scale=.7]
\draw[->](0,0)--(1,2)node[left]{$\vec{v}$};
\draw[->](0,0)--(3,0.5)node[below]{$\vec{w}$};
\draw[->] (1,2)--(4,2.5);
\draw[->](3,0.5)--(4,2.5);
\end{tikzpicture}
\end{center}
\begin{enumerate}
\item Prove the ``parallelogram law,'' which says that the sum of the squares of the lengths of the diagonals of the parallelogram is equal to $2(|\vec{v}|^2+|\vec{w}|^2)$.\\
--------
\textbf{Solution}:
We label the diagonals as:
\begin{center}
\begin{tikzpicture}
\draw[->](0,0)--(1,2)node[left]{$\vec{v}$};
\draw[->](0,0)--(3,0.5)node[below]{$\vec{w}$};
\draw[->] (1,2)--(4,2.5);
\draw[->](3,0.5)--(4,2.5);
\draw[->] (0,0)--(4,2.5);
\draw(3,1.5)node{$\vec{v}+\vec{w}$};
\draw[->](3,0.5)--(1,2);
\draw(1.2,1.4)node{$\vec{v}-\vec{w}$};
\end{tikzpicture}
\end{center}
Then it is clear that
\[|\vec{v}+\vec{w}|^2+|\vec{v}-\vec{w}|^2 = (\vec{v}+\vec{w})\cdot (\vec{v}+\vec{w})+(\vec{v}-\vec{w}) \cdot (\vec{v}-\vec{w})=2|\vec{v}|^2+2|\vec{w}|^2+2(\vec{v} \cdot \vec{w})-2(\vec{v} \cdot \vec{w})\]
\item Use vectors to prove that the diagonals of a rhombus are perpendicular.
--------
\textbf{Solution}:
The diagonals are $\vec{v}+\vec{w}$ and $\vec{v}-\vec{w}$ - so let's use the dot product to test for orthogonality:
$$(\vec{v}+\vec{w}) \cdot (\vec{v}-\vec{w}) = |\vec{v}|^2 -|\vec{w}|^2 = 0$$
As $\vec{v}$ and $\vec{w}$ have the same length!\\
\end{enumerate}
\item {\bf T/F}:
\begin{enumerate}
\item For all $\vec{v} \in \mathbb{R}^3$, the set of vectors $\vec{u} \in \mathbb{R}^3$ such that $\vec{u} \times \vec{v}=\vec{0}$ forms a subspace of $\mathbb{R}^3$.
--------
\textbf{Solution}:
{\bf TRUE}\\
We know that for a given $\vec{v} \in \mathbb{R}^3$, the set of $\vec{u}$ such that $\vec{u} \times \vec{v}=0$ are $\alpha\vec{v}$ where $\alpha \in \mathbb{R}$. Furthermore $\mathcal{V}=\{\alpha\vec{v}|\alpha \in \mathbb{R}\}$ is a subspace of $\mathbb{R}^3$ as $\alpha_1\vec{v}+\alpha_2\vec{v}=(\alpha_1+\alpha_2)\vec{v}$ where you will note that $(\alpha_1+\alpha_2) \in \mathbb{R}$ so $(\alpha_1+\alpha_2)\vec{v} \in \mathcal{V}$.
\item The set of all invertible $n \times n$ real matrices forms a subspace of $\mathbb{R}^{n^2}$.
--------
\textbf{Solution}:
{\bf FALSE}\\
Consider $[I_n] \in \mathcal{M}$ (invertible real $n \times n$ matrices). If $\mathcal{M}$ were a subspace of $\mathbb{R}^{n^2}$ then $0 \cdot [I_n] = [0] \in \mathcal{M}$. But $[0]$ is not an invertible matrix in $\mathbb{R}^{n^2}$. Thus $\mathcal{M}$ is not closed and thus not a subspace.
\end{enumerate}
\item A linear transformation $T:\mathbb{R}^3 \mapsto \mathbb{R}^3$ sends the first standard basis vector $\vec{e}_1$ to the vector $\vec{a}_1$. Similarly $T(\vec{e}_2)=\vec{a}_2$ and $T(\vec{e}_3)=\vec{a}_3$. Furthermore $\vec{a}_1+\vec{a}_2+\vec{a}_3=0$. Does there exist a unique $S=T^{-1}$ such that $S\circ T=T\circ S=\mathbb{I}$. Provide a proof.
--------
\textbf{Solution}:
We can write $T$ as a $3 \times 3$ matrix in the form $[\vec{a}_1 \ \vec{a}_2 \ \vec{a}_3]$. We know that $T$ can not be invertible if $\det{T}=0$. To examine this: $\det{T}=\vec{a}_1 \cdot \vec{a}_2 \times \vec{a}_3$. But we can write $\vec{a}_1=-\vec{a}_2-\vec{a}_3$ by the relation above. So $$\det{T}=(-\vec{a}_2-\vec{a}_3) \cdot (\vec{a}_2 \times \vec{a}_3)$$
$$=-\vec{a}_2 \cdot \vec{a}_2 \times \vec{a}_3 -\vec{a}_3 \cdot \vec{a}_2 \times \vec{a}_3$$
$$=-\vec{a}_2 \times \vec{a}_2 \cdot \vec{a}_3 -\vec{a}_2 \cdot \vec{a}_3 \times \vec{a}_3=0$$
Thus $T$ is not invertible and no such $S$ exists.
\item {\bf T/F}: The linear transformation $R_{\theta}:\R^2 \to \R^2$ given by rotation through angle $\theta$ about the origin has at least one real eigenvalue.
------------------------\\
{\bf Solution: FALSE}
For a general $\theta$ this is false. We can either think of this geometrically or algebraically. Geometrically, we know that $R_{\theta}$ rotates the entire plane so only the origin is preserved (unless $\theta=\pi$ or $0$). Thus no subspace is brought into itself except trivially the entire plane, which can't be decomposed into the cartesian product of two preserved 1-dimensional subspaces. Thus $R_{\theta}$ does not have a real eigenvalue. Algebraically, we can compute the determinant of $\lambda I-R_{\theta}$ as
$$\lambda I-R_{\theta} = \begin{bmatrix} \lambda-\cos{\theta} & -\sin{\theta} \\ \sin{\theta} & \lambda - \cos{\theta} \end{bmatrix}$$
So $p(\lambda)=\det(\lambda I-R_{\theta})=\lambda^2-2\lambda\cos\theta+\cos^2\theta+\sin^2\theta=\lambda^2-2\lambda\cos\theta+1$. To see if this has real roots, we take the discriminant $\Delta(p)=4\cos^2\theta-4$. As $|\cos\theta|\leq 1$, $\Delta(p)<0$ unless $\theta=0,\pi$ in which case $\Delta(p)=0$ and any vector in $\R^2$ is an eigenvector with eigenvalue $-1$. Thus, in general, no real roots exist.
\item {\bf T/F}: If $T: \mathbb{R}^n \rightarrow \mathbb{R}^n$ has $n$ linearly independent eigenvectors, then $T$ is invertible.
------------------------\\
{\bf Solution: FALSE}
Consider $T$ the zero matrix $[0]_n$. The $n$ standard basis vectors form an eigenbasis for $T$ with eigenvalue uniformly $0$, however $[0]_n$ is not invertible.
\item {\bf T/F}: Given an orthonormal basis $\{\vec{v}_1,...,\vec{v}_n\}$ for a vector space $V$, if $\vec{w} \in V$ is in terms of the standard basis, then $\vec{w}=c_1\vec{v}_1+...+c_n\vec{v}_n$ where $c_i=w \cdot v_i$.
------------------------\\
{\bf Solution: TRUE}
Well, let's test it. We know that as $\{\vec{v}_1,...,\vec{v}_n\}$ forms a basis for $V$ we can write $\vec{w}=c_1\vec{v}_1+...+c_n\vec{v}_n$ for some scalers $c_1,...,c_n$. Now $$w \cdot \vec{v}_i=c_i\vec{v}_i\cdot \vec{v}_1+...+c_n\vec{v}_n\cdot \vec{v}_i$$
As the set $\{\vec{v}_1,..,\vec{v}_n\}$ are orthonormal, $\vec{v}_i \cdot \vec{v}_j=\delta_{ij}$, where $\delta_{ij}$ is the Kronecker delta function: $\delta_{ij}=0$ for $i \neq j$ and $\delta_{ij}=1$ for $i=j$. Thus in the above sum all the dot products are 0 except $c_i\vec{v}_i \cdot \vec{v}_i=c_i$. So $c_i=w \cdot v_i$.
\item {\bf T/F}: If $A: \mathbb{R}^n \rightarrow \mathbb{R}^n$ and $B: \mathbb{R}^n \rightarrow \mathbb{R}^n$ are two linear transformations such that $A=C^{-1} \circ B \circ C$ for some invertible $C$, then dim ker(A) = dim ker(B)
------------------------\\
{\bf Solution: TRUE}
The short answer is true as $C$ is just a change of basis, and thus $A$ is isomorphic to $B$. Isomorphic transformations have the same nullity. The long answer is think of ker$B$ as a subspace of $\R^n$. Then by Hubbard theorem 2.4.19, the dimension of ker$B$ is well-defined, i.e. it is the same in the basis for $A$. The very very short (intuitive) answer is that if this were false the world would be pretty messed up (change your basis and change the dimension of the kernel?! Blasphemy!).
\item {\bf T/F}: Each of the follow forms a vector space over $\mathbb{R}$ (evaluate them seperately):
\begin{enumerate}
\item $D[\pi,2\pi]$, the space of discontinuous real-valued functions $$d:[\pi, 2\pi] \rightarrow \mathbb{R}$$ under the composition law $(d_1+d_2)(x)=d_1(x)+d_2(x)$ and scaling law $r(d_1(x))=r*d_1(x) \ \forall r \in \mathbb{R}$
------------------------\\
{\bf Solution: FALSE}
For $D[\pi, 2\pi]$ to be a vector space, $$ \forall \underline{\bold{v}} \in D[\pi, 2\pi], a\underline{\bold{v}} \in D[\pi, 2\pi], a \in \mathbb{R}$$
By that logic $0\underline{\bold{v}} \in D[\pi, 2\pi]$, but the zero function is continuous, and so not in $D[\pi, 2\pi]$. Thus $D[\pi, 2\pi]$ is not a vector space.
\item The space of invertible linear transformations $T:\R^n \to \R^n$ under the composition law $(T_1+T_2)(\bold{x})=T_1(\bold{x})+T_2(\bold{x})$ and obvious scaling law by elements of $\R$
------------------------\\
{\bf Solution: FALSE}
Again, $0$ would have to be in this space, but $0$ is not invertible.
\item The set of symmetric matrices $A \in \text{Mat}(3 \times 3)$ with $\text{trace}(A)=0$.
------------------------\\
{\bf Solution: TRUE}
Checking scaler multiplication and vector addition, let's calculate
$$\text{tr}(cA_1+A_2)=c\text{tr}(A_1)+\text{tr}(A_2)\stackrel{\checkmark}{=}0$$
$$(cA_1+A_2)^{T}=cA_1^{T}+A_2^{T}\stackrel{\checkmark}{=}cA_1+A_2$$
So it once again a symmetric traceless matrix.
\item Functions from a set $S=\{1,2,...,n\}$ to $\R$ under the obvious composition and scaling rules.
------------------------\\
{\bf Solution: TRUE}
This is quite obviously isomorphic to $\R^n$. We can send the first element of the set $1$ to anything in $\R$, the same for the second element, etc up to the last nth element. The vector of coefficients in $\R^n$ then represents the real numbers where each element of the set is sent. We then extend the function by linearity.
\item The set of real polynomials of degree less than or equal to 3 with a root at $+3$.
------------------------\\
{\bf Solution: TRUE}
Let's see, the $0$ polynomial satisfies this because if $p(x)=0$, then surely $p(3)=0$. Furthermore, it is closed under addition and scaler multiplication as if $r(x)$ and $q(x) \in V$, then $cr(3)+q(3)=0$ and thus this sum is also in $V$. So we're good! All that we are doing is putting one further constraint on our system which is that the evaluation on $+3$ gives 0. This is isomorphic to just a 3D subspace of $\R^4$ though the origin. As a side note, a basis could be $x-3$, $x^2-9$, $x^3-27$.
\end{enumerate}
\item Does row reduction preserve the kernel and image of a linear transformation $A:\R^n \to \R^m$? If yes, why? If no, what does it preserve?
------------------------\\
{\bf Solution:}
For the kernel, this first statement is actually true - if $A$ is the original transformation and $\tilde{A}$ the row reduced version, then I claim that $A$ and $\tilde{A}$ have the same kernel. This is a very cool result! Why is it true. Well we have a theorem that tells us that the solutions $\vec{x}$ to $A \vec{x}=\vec{b}$ are unchanged by row reduction. That is, $\tilde{A}\vec{x}=\vec{b}'$, where $\vec{b}'$ is the vector post the row operations that reduced $A$. Now, vectors in the kernel are just solutions to $A\vec{v}=0$. So these vectors $\vec{v}$ are also solutions to $\tilde{A}\vec{v}=0'$. But what is $0'$? Row operations only involve taking sums of elements \emph{within} a single column, or multiplying by a scaler, or switching elements \emph{within} a column. In any case, none of these actions are going to change a columns of zeros into anything else, this $0'=0$. And thus $\tilde{A}\vec{v}=0$, for the same $\vec{v}$. This $A$ and $\tilde{A}$ have the same kernel (to be perfectly rigorous, you should show that they are subsets of each other, i.e. imagine $\vec{w}$ in the kernel of $A$, then row reduce and $\vec{w} \in$ ker$(\tilde{A})$, thus ker$(A) \subset$ ker$(\tilde{A})$. How would you show the other direction?)
Now, this is \emph{not} true of the image, as that would mean every image was spanned by some set of $k$ standard basis vectors... no! What is preserved is the \emph{rank}, i.e. the number of independent vectors in the image, the dimension of the image. This must be preserved as row reduction preserves the dimensions of $A$, and the dimension of the kernel, because we have the much stronger claim that it actually preserves the kernel. Thus by rank nullity, it must preserve the rank. \\
\item Given that a linear transformation $T$ acting on a real vector space $V$ has at least one eigenvalue, what is the codomain of $T$? If $T$ has only one eigenvalue what can you say? If $T$ has an eigenbasis without distinct eigenvalues what can you say about geometry of $T$? Say $T:\R^3 \to \R^3$, $|T| \neq 0$, but trace$(T)=0$. If $T$ does not have distinct eigenvalues but has an eigenbasis, what can you say about the eigenvalues of $T$?
------------------------\\
{\bf Solution:}
We know that for $T$ to have an eigenvalue (and thus an eigenvector) $T:V \rightarrow V$, thus the codomain must also be $V$. If $T$ has only 1 eigenvalue (but you don't know whether it has an eigenbasis) then you can't see that much except that it preserves some 1-dim'l subspace (i.e. it takes a line into itself if we think of $\R^n$) through the operation of scaling. However, if we know that it has an eigenbasis, then we know that if it has only one eigenvalue, it preserves all of $V$ by scaling. Thus $T=\lambda I$. If $T$ has an eigenbasis and $2$ repeated eigenvalues, it preserves a 2-dim'l space through scaling, etc. For $\R^3$ this looks like preserving a line and a plane if it has $2$ distinct e-values, preserving three lines if it has $3$ distinct e-values, and preserving all of $\R^3$ (ie acting as scaling) if it has only 2 distinct e-value.
Now, for this last part, we need the theorem that trace$(T)$ is the sum of the eigenvalues of $T$. So if $T:\R^3 \to \R^3$ and tr$(T)=0$, but $|T| \neq 0$, then if $T$ has degeneracy in the eigenvalues, it must just be that 2 are the same, as if all 3 were the same, they would have to be all 0, and thus $T$ would be the zero transformation, which has length 0. So from our trace condition, we know that the eigenvalues must be $\{\lambda, \lambda, -2\lambda\}$.
\item Let $V$ be an $n$-dimensional complex vector space. Consider two linear transformation $A: V \to V$ and $B:V \to V$. Prove that if $A$ and $B$ do no commute (i.e. if $A\circ B \neq B\circ A$) then there cannot exist a basis $\{v_i\}$ for $V$ which is simultaneously an eigenbasis for both $A$ and $B$. (\emph{Hint: proceed by contradiction.})
------------------------\\
{\bf Solution:}
Assume that there exists a basis $\{\vec{v}_1,...,\vec{v}_n\}$ for $V$ which is an eigenbasis of both $A$ and $B$, with $A\vec{v}_i=a_i\vec{v}_1$ and $B\vec{v}_i=b_i\vec{v}_i$. Then any $\vec{w} \in V$ can be written as a linear combination of these basis vectors as $$\vec{w}=c_1\vec{v}_1+...+c_n\vec{v}_n$$ Now consider the linear function $AB-BA$. We act on an arbitrary vector $\vec{w}$ as $$(AB-BA)\vec{w}= AB(c_1\vec{v}_1+...+c_n\vec{v}_n)+BA(c_1\vec{v}_1+...+c_n\vec{v}_n)$$
$$=A(b_1c_1\vec{v}_1+...+b_nc_n\vec{v}_n)-B(a_1c_1\vec{v}_1+...+a_nc_n\vec{v}_n)$$
$$=(a_1b_1c_1\vec{v}_1+...+a_nb_nc_n\vec{v}_n)-(b_1a_1c_1\vec{v}_1+...+b_na_nc_n\vec{v}_n)=0$$
As scaler multiplication is commutative, these two sums are equal and thus the entire action of $(AB-BA)$ on $\vec{w}$ is $0$. However, $\vec{w}$ was an arbitrary vector in $V$, so this must be true for all vectors in $V$. So $(AB-BA)$ is the 0-tranformation, or $AB-BA=0, \Rightarrow AB=BA$. But this contradicts our assumption that they don't commute. Thus a simultaneous eigenbasis can not exist.
\item This problem will lead you through a proof and application of the Buckingham $\pi$ Theorem, one of the most fundamental results in dimensional analysis. Given a system of $n$ physical variables $u_i$ (say the gravitational constant $g$, mass of an object $m$, the length of a string $l$, etc.) in $k$ independent physical dimensions $v_i$ (for example $T$ time, $M$ mas, $L$ length, etc),
\begin{enumerate}
\item Show that the space of fundamental and derived ``units" forms a vector space over $\Q$. (\emph{Hint: if $L$, $T$, $M$ are fundamental units, then $M*L*T^{-2}$ is a derived unit, as is $(M*L*T^{-2})^{3/2}$. How might we represent these derived units in terms of fundamental units? What might we call the ``fundamental units"? Note that multiplying a derived/fundamental unit by a scaler is considered an equivalent unit.})
------------------------\\
{\bf Solution:}
This is a vector space as follows: basis vectors are the fundamental units of the system $U_1,...,U_n$. All derived units are then products of fundamental units as $U_1^{a_1}U_2^{a_2}...U_n^{a_n}$ where $a_i \in \Q$. This is closed under "scaler multiplication" (which is just multiplying the exponents by an element of $\Q$) and "vector addition" (which is addition in the exponents) as $\Q$ is closed. Similarly the distributive law follows from $\Q$, so it is an honest-to-goodness vector space. Note that "0" in this vector space is the unit $U_1^0...U_n^0=1$ - i.e. the dimensionless quantity. \\
\item Now consider a matrix $M$ which we will call the \emph{dimension matrix} for obvious reasons. Each column of $M$ tells how to form the $n$ variables $u_i$ out of the $k$ physical dimensions $v_i$, i.e. the $(s,t)^{th}$ entry of $M$ is the power of the unit $v_s$ in the constant $u_t$.
\begin{enumerate}
\item What are the dimensions of $M$?
------------------------\\
{\bf Solution:}
$M$ represents the $n$ variables $u_i$ in terms of the $k$ physical dimensions, it is thus $k \times n$. If the $i^{\text{th}}$ variable is $u_1^{c_1^i}u_2^{c_2^i}...u_n^{c_n^i}$ then the matrix $M$ is represented as:
$$M= \begin{bmatrix} c_1^1 & c_1^2 & c_1^3 & \cdots & c_1^n \\ c_2^1 & c_2^2 & c_2^3 & \cdots & c_2^n \\ \vdots &&&& \vdots \\ c_k^1 & c_k^2 & c_k^3 & \cdots & c_k^n \end{bmatrix}$$
\item Describe in words what the result of a matrix multiplication $M \begin{bmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{bmatrix}$ is.
------------------------\\
{\bf Solution:}
If we form some new derived physical variable $v_1^{a_1}v_2^{a_2}...v_n^{a_n}$, we represent this in terms of the vector $ \begin{bmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{bmatrix}$. Then the matrix multiplication $M \begin{bmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{bmatrix}$ gives a new $k \times 1$ vector which represents the units of this derived physical variable in terms of the fundamental units - i.e. the first entry will be the number of copies of $u_1$ in the derived variable $v_1^{a_1}v_2^{a_2}...v_n^{a_n}$. \\
\item Apply a theorem to find a formula for the number of independent *dimensionless* parameters $\pi_i$ (combinations of the original $n$ physical variables) in terms of $n$ and some characteristic of the matrix $M$. What are the bounds on the maximum and minimum number of independent dimensionless parameters?
------------------------\\
{\bf Solution:}
A dimensionless parameter is a combination of physical variables which does not depend on any fundamental (or derived for that matter) units - i.e. it is a vector $\begin{bmatrix} a_1 \\ a_2 \\ \vdots \\ a_n \end{bmatrix}$ which is in the kernel of the matrix $M$. We want to know how many independent of these such dimensionless parameters we can find. That is, we want to know the \emph{nullity} of the transformation $M$. \\
To do this, we use the rank-nullity theorem. This tells us that for the matrix $M$, whose domain is $n$-dimensional, the nullity of $M$ is $n-$ rank$(M)$. In particular, the rank of $M$ is at most $k$ and at least $1$ (assuming our physical variable aren't nonsense like all dimensionless). So there are at least $n-k$ independent dimensionless parameters, and at most $n-1$. \\
As a ``historic" note, the buckingham pi theorem is normally quoted as the second to last result - for a physical system of $n$ physical variables in $k$ physical dimensions, there are (at least) $n-k$ dimensionless parameters. Note that even the ``at least" is occasionally dropped, but remember to parse this as ``at least"! \\
\end{enumerate}
\item A standard application of dimensional analysis is to determine a relation for the period of a pendulum. The obvious list of physical quantities here are
\begin{tabular}{c c c}
Description &Variable & Units \\
\hline
length of string & $l$ & $L$ \\
arc of displacement & $s$ & $L$ \\
gravitational constant & $g$ & $L*T^{-2}$ \\
mass at end of string & $m$ & $M$ \\
period of swing & $\tau$ & $T$ \\
\end{tabular} \\
Where $L$ is length, $M$ is mass, and $T$ is time. Use the methods developed in part $(b)$ and in class to find an expression for the period $\tau$ of the pendulum in terms of a *dimensionless constant* times some relation in the above physical variables.
------------------------\\
{\bf Solution:}
We form the matrix described above as $M$ by decomposing the various physical variables in terms of the fundamental units. Using the implicit ordering in the table above:
$$M= \begin{bmatrix} 1 & 1 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & 0 & -2 & 0 &1 \end{bmatrix}$$
To find vectors in the kernel, we will find a basis for the kernel using the methods we derived in class:
$$M= \begin{bmatrix} 1 & 1 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & 0 & -2 & 0 &1 \end{bmatrix} \xrightarrow{\text{row reduce}} \begin{bmatrix} 1 & 1 & 0 & 0 & 1/2 \\ 0 & 0 & 1 & 0 & -1/2 \\ 0 & 0 & 0 & 1 &0 \end{bmatrix}$$
Without having to be fancy we see that (in this ordering) the physical variables $s$ and $\tau$ are non-pivotal. To find a basis for the kernel, we can note that the row reduction (and inspection, really) tells us that $$\begin{bmatrix} 1/2 \\ 0 \\ -1/2 \\ 0 \\ -1\end{bmatrix}, \begin{bmatrix} 1 \\ -1 \\ 0 \\ 0 \\ 0 \end{bmatrix}$$ are in the kernel. As they are independent, we have found a basis!
Let's look a little more closely, we see that the first vector means $\frac{l^{1/2}}{g^{1/2}\tau}$ is dimensionless. Let's call this constant $\frac{1}{c}$. Then
$$\frac{1}{c}=\frac{l^{1/2}}{g^{1/2}\tau} \Rightarrow \tau=c\sqrt{\frac{l}{g}}$$
So we have found an expression for $\tau$ in terms of a dimensionless constant times a combination of our physical variables.
\end{enumerate}
\item Suppose $A: \mathbb{R}^3 \rightarrow \mathbb{R}^3$ is a linear transformation such that for three linearly independent $\vec{v}_1, \vec{v}_2, \vec{v}_3$ we have
$$A\vec{v}_1=\lambda\vec{v}_1, A\vec{v}_2=\lambda\vec{v}_2, A\vec{v}_3=\lambda\vec{v}_3$$
for some $\lambda \in \mathbb{R}$.
\begin{enumerate}
\item In the \emph{eigenbasis}, what does $A$ look like? Justify your answer.
------------------------\\
{\bf Solution:}
In terms of the eigenbasis, we know that $A$ acting of each of the eigenvectors extracts the corresponding column of the matrix presentation. So, quite obviously,
$$A=\begin{bmatrix} \lambda & 0 & 0 \\ 0 & \lambda & 0 \\ 0 & 0 & \lambda \end{bmatrix}$$
\item In the \emph{standard} basis, what does $A$ look like? Justify your answer.
------------------------\\
{\bf Solution:}
Now, we can do this in several different ways: the most algorithmic is to think about changing our basis. First we change from the standard to the eigenbasis. This is achieved by the matrix $P^{-1}= [\vec{v}_1 \ \vec{v}_2 \ \vec{v}_3 ]^{-1}$. Then we transform in the eigenbasis, and then we need to change the basis back to the standard basis which is achieved by $P= [\vec{v}_1 \ \vec{v}_2 \ \vec{v}_3 ]$. So the entire transformation is given by $A_{e_i}=PA_{v_i}P^{-1}$. But we know that $A_{v_i}=\lambda I$. So we manipulate as:
$$A_{e_i}=PA_{v_i}P^{-1}=A_{e_i}=\lambda PIP^{-1} = \lambda I$$
So again, in terms of the standard basis, $A$ looks like:
$$A=\begin{bmatrix} \lambda & 0 & 0 \\ 0 & \lambda & 0 \\ 0 & 0 & \lambda \end{bmatrix}$$
The more intuitive reason is that if there is an eigenbasis with only one eigenvalue, then a 3-dimensional subspace of $\R^3$ is the preserved subspace multiplied by a scaler. The only 3-dimensional subspace of $\R^3$ is $\R^3$ itself, so every vector in $\R^3$ is scaled by $\lambda$ - in particular the standard basis vectors.
\item Give a condition on $\lambda$ that determines when $A$ is an isomorphism.
------------------------\\
{\bf Solution:}
This is already linear, but it will be invertible if and only if $\lambda \neq 0$.
\end{enumerate}
\item Let $T$ be the linear transformation given by
$$T:\R^4 \to \R^3, T\begin{bmatrix} x \\ y \\ z \\w \end{bmatrix} = \begin{bmatrix} 3x+y \\ 2z \\ y+w \end{bmatrix}$$
Furthermore, let $B_1$ be an alternative basis for $\R^4$ given by
$$B_1 =\left\{\begin{bmatrix} 1 \\ 1 \\ 2\\1 \end{bmatrix}, \begin{bmatrix} 0 \\ 1 \\ 0\\3 \end{bmatrix}, \begin{bmatrix} 1 \\ 1 \\ 0\\0 \end{bmatrix}, \begin{bmatrix} 2 \\ 2 \\ 2\\5 \end{bmatrix} \right\}$$
And let $B_2$ be an alternative basis for $\R^3$ given by
$$B_2=\left\{ \begin{bmatrix} 3 \\ 0 \\ 0 \end{bmatrix}, \begin{bmatrix} 1 \\ 1 \\ 2 \end{bmatrix}, \begin{bmatrix} 4 \\ 0 \\ 2 \end{bmatrix} \right\}$$
\begin{enumerate}
\item Find the matrix representation for $T$ with respect to the \emph{standard basis}. Is this unique? Find a basis for the image and kernel of $T$.
------------------------\\
{\bf Solution:}
To find the matrix representation for $T$ with respect to the standard basis vectors we observe how $T$ acts on each of the standard basis vectors. The matrix is:
$$T = \begin{bmatrix} 3 & 1 & 0 & 0 \\ 0 & 0 & 2 & 0 \\ 0 & 1 & 0 & 1 \end{bmatrix}$$
We must determine which variables are pivotal and which are nonpivotal. To do this, we row reduce:
$$T = \begin{bmatrix} 3 & 1 & 0 & 0 \\ 0 & 0 & 2 & 0 \\ 0 & 1 & 0 & 1 \end{bmatrix} \xrightarrow{\text{row reduce}} \begin{bmatrix} 1 & 1/3 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 1 \end{bmatrix}$$
So we see that $x$, $z$, and $w$ are pivotal, whereas $y$ is nonpivotal. So the basis for the image is given by the columns corresponding to action by $\vec{e}_1$, $\vec{e}_2$, $\vec{e}_4$. Namely:
$$\left\{ \begin{bmatrix} 3 \\ 0 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 2 \\ 0 \end{bmatrix}, \begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix} \right\}$$
Row reduction also tells us that $T(\vec{e}_2)=T((1/3)\vec{e}_1+\vec{e}_4)$. (I got this from the second column of the row reduced matrix).
Thus $T(\vec{e}_2-(1/3)\vec{e}_1-\vec{e}_4)=0$ - meaning that $\vec{e}_2-(1/3)\vec{e}_1-\vec{e}_4$ is a basis for the kernel. Let's check:
$$\begin{bmatrix} 3 & 1 & 0 & 0 \\ 0 & 0 & 2 & 0 \\ 0 & 1 & 0 & 1 \end{bmatrix} \begin{bmatrix} -1/3 \\ 1 \\ 0 \\ -1 \end{bmatrix} \stackrel{\checkmark}{=} \begin{bmatrix} 0 \\ 0 \\ 0 \\ 0 \end{bmatrix}$$
\item Determine the matrix representation for $T$ with respect to $B_1$ in the domain and $B_2$ in the codomain in two ways:
\begin{enumerate}
\item Determine how $T$ acts on the basis vectors $B_1$ in terms of $B_2$
------------------------\\
{\bf Solution:}
Let's call the ordered vectors in $B_1$ $\vec{v}_1, \vec{v}_2, \vec{v}_3, \vec{v}_4$, and the vectors in $B_2$ $\vec{w}_1,\vec{w}_2,\vec{w}_3,\vec{w}_4$. Now we know that $T(\vec{v}_1)$ gives us the vector $\begin{bmatrix} 4 \\ 4 \\ 2 \end{bmatrix}$. Similarly:
$$T(\vec{v}_2)=\begin{bmatrix} 1 \\ 0 \\ 4 \end{bmatrix}, T(\vec{v}_3)=\begin{bmatrix} 4 \\ 0 \\ 1 \end{bmatrix}, T(\vec{v}_4)=\begin{bmatrix} 8 \\ 4 \\ 7 \end{bmatrix}$$
Now we want to write these in terms of the basis $B_2$. We will do this by row reduction in tandem:
$$\begin{bmatrix} 3 & 1 & 4 & 4 & 1 & 4 & 8 \\ 0 & 1 & 0 & 4 & 0 & 0 & 4 \\0 & 2 & 2 &2 & 4 & 1 & 7 \end{bmatrix} \xrightarrow{\text{row reduce}} \begin{bmatrix} 1 & 0 & 0 & 4 & -7/3 & 2/3 & 2 \\ 0 & 1 & 0 & 4 & 0 & 0 & 4 \\0 & 0 & 1 & -3 & 2 & 1/2 & -1/2 \end{bmatrix}$$
The second half of this matrix should give us the vectors $T(\vec{v}_i)$ in terms of the $\{\vec{w}_i\}$ - i.e. this is our matrix!!
$$\tilde{T}=\begin{bmatrix} 4 & -7/3 & 2/3 & 2 \\ 4 & 0 & 0 & 4 \\ -3 & 2 & 1/2 & -1/2 \end{bmatrix}$$ \\
\item Write down an expression in terms of change of basis matrices and evaluate it.
------------------------\\
{\bf Solution:}
We want to start in the basis for $\R^4$ given by $B_1$, then change to the standard basis. Then we transform in the standard basis. Finally, we need to change back from the standard basis for $\R^3$ to the basis $B_2$. Let's call the first transformation $P$, and the second $Q^{-1}$. We can easily write down
$$P = \begin{bmatrix} 1 & 0 & 1 & 2 \\ 1 & 1 & 1 & 2 \\ 2 & 0 & 0 & 2 \\ 1 & 3 & 0 & 5 \end{bmatrix}, Q=\begin{bmatrix} 3 & 1 & 4 \\ 0 & 1 & 0 \\ 0 & 2 & 2 \end{bmatrix}$$
So we invert and find:
$$Q^{-1}= \begin{bmatrix} 1/3 & 1 & -2/3 \\ 0 & 1 & 0 \\ 0 & -1 & 1/2 \end{bmatrix}$$
So all together:
$$\tilde{T}=Q^{-1}TP=\begin{bmatrix} 1/3 & 1 & -2/3 \\ 0 & 1 & 0 \\ 0 & -1 & 1/2 \end{bmatrix} \begin{bmatrix} 3 & 1 & 0 & 0 \\ 0 & 0 & 2 & 0 \\ 0 & 1 & 0 & 1 \end{bmatrix} \begin{bmatrix} 1 & 0 & 1 & 2 \\ 1 & 1 & 1 & 2 \\ 2 & 0 & 0 & 2 \\ 1 & 3 & 0 & 5 \end{bmatrix}$$
Sure enough, we find again that:
$$\tilde{T} \stackrel{\checkmark}{=} \begin{bmatrix} 4 & -7/3 & 2/3 & 2 \\ 4 & 0 & 0 & 4 \\ -3 & 2 & 1/2 & -1/2 \end{bmatrix}$$
\end{enumerate}
\item Does $T$ admit an \emph{eigenbasis}?
------------------------\\
{\bf Solution:}
Nope, its domain and codomain are not the same space.
\end{enumerate}
\item Let $V$ be a finite dimensional vector space. Prove that any ordered spanning set of vectors can be reduced to a basis by removing vectors from the set, and any ordered linearly independent set of vectors can be expanded to a basis by adding vectors in $V$.
------------------------\\
{\bf Solution:}
\begin{thm}
Every set of vectors $\vec{v}_1,...,\vec{v}_m$ that spans $V$ can be reduced to a basis for $V$ by removing a subset of the vectors.
\end{thm}
\emph{Proof}.
We begin with a set $\tilde{B}=(\vec{v}_1,...,\vec{v}_m)$. Now we carry out an algorithm to pare down this spanning set to a basis: \\
{\bf Step 1}: If $\vec{v}_1=0$ then remove $\vec{v}_1$, if not leave $\tilde{B}$ unchanged \\
{\bf Step 2}: If $\vec{v}_2 \in \Span{\vec{v}_1}$ then remove $\vec{v}_2$, if not leave $\tilde{B}$ unchanged \\
{\bf Step $i$:} If $\vec{v}_i \in \Span{\vec{v}_1,...,\vec{v}_{i-1}}$ then remove $\vec{v}_i$, if not leave $\tilde{B}$ unchanged \\
If we continue the algorithm through $m$ steps, $\Span{\tilde{B}} = \Span{\vec{v}_1,...,\vec{v}_m}$ by construction. Furthermore no vector is in the span of the previous vectors, thus it is linearly independent. So $\tilde{B}$ is a basis for $V$.
\begin{thm}
Every linearly independent set of vectors $\vec{v}_1,...,\vec{v}_n$ in a finite dimensional vector space $V$ can be extended to a basis for $V$ by addition a set of vectors.
\end{thm}
\emph{Proof}.
By definition of $V$ being finite dimensional, there exists a spanning set of a finite number of vectors $\vec{w}_1,...,\vec{w}_m$ for $V$. We will construct a basis for $V$ using the following algorithm. First, let $\tilde{B}=(\vec{v}_1,\vec{v}_2,...,\vec{v}_n)$:\\
{\bf Step 1}: If $\vec{w}_1 \not\in \Span{\vec{v}_1,...,\vec{v}_n}$, then add $\vec{w}_1$ to $\tilde{B}$; if not leave $\tilde{B}$ unchanged \\
{\bf Step 2}: If $\vec{w}_2 \notin \Span{\vec{v}_1,...,\vec{v}_n,\vec{w}_1}$, then add $\vec{w}_2$ to $\tilde{B}$; if not leave $\tilde{B}$ unchanged \\
{\bf Step $i$}: If $\vec{w}_i \notin \Span{\vec{v}_1,...,\vec{v}_n,\vec{w}_1...\vec{w}_{i-1}}$, then add $\vec{w}_i$ to $\tilde{B}$; if not leave $\tilde{B}$ unchanged \\
After $m$ steps of the algorithm we have a set which is still linearly independent as no vector is in the span of the previous vectors. Further more it is a spanning set as it contains $\Span{\vec{w}_1,...,\vec{w}}$, which by definition spans $V$. Thus $\tilde{B}$ is a basis for $V$.
\item {\bf T/F}: If $A$ is a $3 \times 3$ matrix with an eigenbasis but only 2 distinct eigenvalues, then for any $\vec{w} \in \mathbb{R}^3$, $\{ \vec{w}, A\vec{w}, A^2\vec{w}\}$ are linearly dependent. \\
--------------------\\
{\bf Solution: TRUE}
Call the basis of eigenvectors $\{ \vec{v}_1, \vec{v}_2, \vec{v}_3\}$ \\
Without loss of generality let $A\vec{v}_1 = \lambda_1, A\vec{v}_2 = \lambda_1, A\vec{v}_3 = \lambda_2$ \\
Any $\vec{w}$ can be represented as a linear combo of these basis vectors:
$$\vec{w} = a\vec{v}_1 + b\vec{v}_2 + c\vec{v}_3$$
Then:
$$A\vec{w} = aA\vec{v}_1 + bA\vec{v}_2 + cA\vec{v}_3$$
$$ = a\lambda_1\vec{v}_1 + b\lambda_1\vec{v}_2 + c\lambda_2\vec{v}_3$$
And:
$$A^2\vec{w} = A(a\lambda_1\vec{v}_1 + b\lambda_1\vec{v}_2 + c\lambda_2\vec{v}_3)$$
$$ = a\lambda_1A\vec{v}_1 + b\lambda_1A\vec{v}_2 + c\lambda_2A\vec{v}_3$$
$$ = a\lambda_1^2\vec{v}_1 + b\lambda_1^2\vec{v}_2 + c\lambda_2^2\vec{v}_3$$
To check independence we put these into a matrix (in the eigenbasis) and see if we could row reduce to the identity:
$$\begin{bmatrix} a & a\lambda_1 & a\lambda_1^2 \\ b & b\lambda_1 & b\lambda_1^2 \\ c & c\lambda_2 & b\lambda_2^2 \end{bmatrix}$$
Clearly, b(row 1) - a(row 2) yields a row of 0s. Thus this matrix can not row reduce to the identity and $A^2\vec{w}, A\vec{w}, \vec{w}$ must be dependent. \\
\item The operation $\times$ (the cross product) is a linear operator from $(\R^3,\R^3) \to \R^3$.
--------------------\\
{\bf Solution: FALSE}
The operator is \emph{multi}linear. That means it is linear in each component, if you fix the other components. To see why this is wrong as stated (a linear transformation) consider:
$$(\vec{v},\vec{w}) \mapsto \vec{v} \times \vec{w}$$
$$c(\vec{v},\vec{w}) = (c\vec{v},c\vec{w}) \mapsto c^2(\vec{v} \times \vec{w}) \neq c(\vec{v} \times \vec{w}) $$
\item {\bf Approximations}
\begin{enumerate}
\item True or False: for every $n \times n$ real matrix $A$, if $A$ has an eigenvalue, then there is an explicit formula for this eigenvalue in terms of the coefficients of $A$ (i.e. something along the lines of the quadratic formula)
--------------------\\
{\bf Solution: FALSE}
For $5 \times 5$ matrices and above, there is the possibility that the polynomials won't have a closed-form solution for the roots, as quadratics do. \\
\item As you may have discovered, it is something of a pain to determine the eigenvalues of an $n \times n$ matrix of dimension greater than 3 using the formulas we derived in class; as $n$ becomes even larger, you can imagine that this becomes unwieldy even for a computer. As such there are many eigenvalue-computing algorithms that \emph{approximate} the eigenvalues of a matrix. \\
Assume that $A$ is diagonalizable (i.e. an eigenbasis exists) with $\lambda_1$ the \emph{unique} eigenvalue of greatest magnitude. For a random (you can assume ``nice'', but see part D) vector $b_0$, we define the recursive relation
$$b_{k+1}=\frac{Ab_k}{|Ab_k|}$$
\begin{enumerate}
\item First, find an expression for $b_{k+1}$ in terms of the original $b_0$. \\
--------------------\\
{\bf Solution: }
$$b_{k+1}=\frac{A^{k+1}b_0}{|A^{k+1}b_0|}$$
\item As $k \to \infty$, what can you hypothesis about $b_{k+1}$? Use what you know about $A$ to prove your claim. In the process, if you haven't approximated $\lambda_1$ and an associated unit eigenvector, you might want to reconsider. \\
We hypothesize that $b_{k+1}$ will be a unit eigenvector with eigenvalue $\lambda_1$. We can prove this as follows:
First write $b_0$ in the eigenbasis. Thus
$$b_0=c_1\vec{v}_1+c_2\vec{v}_2+...+c_n\vec{v}_n$$
We order the eigenvectors so that $\lambda_1>\lambda_2\geq \lambda_2...$. Then
$$A^{k+1}b_0=c_1\lambda_1^{k+1}\vec{v}_1+c_2\lambda_2^{k+1}\vec{v}_2+...+c_n\lambda_n^{k+1}\vec{v}_n$$
$$A^{k+1}b_0=c_1\lambda_1^{k+1}\left(\vec{v}_1+\frac{c_2}{c_1}\left(\frac{\lambda_2}{\lambda_1}\right)^{k+1}\vec{v}_2+...+\frac{c_n}{c_1}\left(\frac{\lambda_n}{\lambda_1}\right)^{k+1}\vec{v}_n\right)$$
So this converges to a multiple of $\vec{v}_1$ as all of the eigenvalue fractions are less than 1. After normalization this is a unit.
We get $\lambda_1$ by the ratio of components of $b_k$ and $Ab_k$. \\
\item \emph{Bonus}: What is the (approximate) rate of convergence of the above algorithm? How does this compare to other algorithms we have seen in this class. (Note: your answer will depend on some characteristic of $A$).
--------------------\\
{\bf Solution: }
This converges geometrically with ratio $\frac{\lambda_2}{\lambda_1}$. This is fast if $\lambda_2 <<\lambda_1$ and slow if they are similar.
\item Where could the above algorithm go wrong (think about $b_0$). \\
--------------------\\
{\bf Solution: }
If $b_0$ did not have a component along $\vec{v}_1$. Luckily this happens with measure 0 in the space of vectors and thus probability 0.
\end{enumerate}
\end{enumerate}
\item {\bf The Trace}
\begin{enumerate}
\item Working from the definition of matrix multiplication, prove that $\tr(AB)=\tr(BA)$. \\
--------------------\\
{\bf Solution: }
Call $C=AB$, then
$$\tr(C)=\sum_i\left(\sum_k a_{i,k}b_{k,i}\right)$$
Call $D=BA$, then
$$\tr(D)=\sum_i\left(\sum_k b_{i,k}a_{k,i}\right)$$
These two are the same as the elements of $\R$ commute and so they can both be written as
$$\sum_{i,k}a_{i,k}b_{k,i}$$
\item Use this to prove that the trace of a matrix $A$ is preserved under change of basis. This is very good because it says that the trace is an \emph{algebraic invariant} of the operator $A$ that is independent of coordinate system. \\
--------------------\\
{\bf Solution: }
Let $A'=P^{-1}AP$ (so $A'$ is related to $A$ through a change of basis). Then
$$\tr(A')=\tr\left( (P^{-1}A)P\right)=\tr\left( P(P^{-1}A)\right)=\tr(A)$$
This means trace is an algebraic invariant. It joins the ranks of the likes of the kernel, the rank, the nullity, the determinant, etc. \\
\item Use the above analysis to conclude that if $A$ has an eigenbasis with eigenvalues $\{\lambda_i\}$, then $$\tr(A)=\sum_i \lambda_i$$
--------------------\\
{\bf Solution: }
This is true in the eigenbasis, so it is true in any basis.
\end{enumerate}
\item {\bf Symmetric Matrices}
\begin{enumerate}
\item For $A$ a symmetric matrix which admits an eigenbasis with unique eigenvalues. Prove that for $v_i$ and $v_j$ eigenvectors, $$v_i \cdot v_j =0 \text{ for } i \neq j$$
This says that the eigenvectors are pairwise orthogonal.
--------------------\\
{\bf Solution: }
Consider $\vec{v}_i$ and $\vec{v}_j$ eigenvectors
$$\lambda_i\vec{v}_i \cdot \vec{v}_j=A\vec{v}_i \cdot \vec{v}_j=(A\vec{v}_i)^T \vec{v}_j = \vec{v}_i^TA^T\vec{v}_j=\vec{v}_i^TA\vec{v}_j=\vec{v}_i \cdot A\vec{v}_j=\vec{v}_i\cdot \lambda_j\vec{v}_j$$
If $\lambda_i \neq \lambda_j$, then $\vec{v}_i \cdot \vec{v}_j=0$.
\end{enumerate}
\end{enumerate}
\end{document}