|
| 1 | +\documentclass{ximera} |
| 2 | +\input{../preamble.tex} |
| 3 | + |
| 4 | +\title{Coordinate Mappings} \license{CC BY-NC-SA 4.0} |
| 5 | + |
| 6 | +\begin{document} |
| 7 | +\begin{abstract} |
| 8 | +\end{abstract} |
| 9 | +\maketitle |
| 10 | + |
| 11 | +\begin{onlineOnly} |
| 12 | +\section*{Coordinate Mappings} |
| 13 | +\end{onlineOnly} |
| 14 | + |
| 15 | +Recall that a transformation $T:\mathbb{R}^n\rightarrow \mathbb{R}^m$ is called a \dfn{linear transformation} if the following are true for all vectors ${\bf u}$ and ${\bf v}$ in $\mathbb{R}^n$, and scalars $k$. |
| 16 | +\begin{equation*} |
| 17 | +T(k{\bf u})= kT({\bf u}) |
| 18 | +\end{equation*} |
| 19 | +\begin{equation*} |
| 20 | +T({\bf u}+{\bf v})= T({\bf u})+T({\bf v}) |
| 21 | +\end{equation*} |
| 22 | + |
| 23 | +We generalize this definition as follows. |
| 24 | + |
| 25 | +\begin{definition}\label{def:lintransgeneral} |
| 26 | +Let $V$ and $W$ be vector spaces. A transformation $T:V\rightarrow W$ is called a \dfn{linear transformation} if the following are true for all vectors ${\bf u}$ and ${\bf v}$ in $V$, and scalars $k$. |
| 27 | +\begin{equation*} |
| 28 | +T(k{\bf u})= kT({\bf u}) |
| 29 | +\end{equation*} |
| 30 | +\begin{equation*} |
| 31 | +T({\bf u}+{\bf v})= T({\bf u})+T({\bf v}) |
| 32 | +\end{equation*} |
| 33 | +\end{definition} |
| 34 | + |
| 35 | +\subsection*{Linearity of Coordinate Mappings} |
| 36 | +Transformations that map vectors to their coordinate vectors with respect to some ordered basis will prove to be of great importance. We will start by showing that such transformations are linear. |
| 37 | + |
| 38 | +If $V$ is a vector space, and $\mathcal{B}=\{\vec{v}_1, \ldots ,\vec{v}_n\}$ is an ordered basis for $V$ then any vector $\vec{v}$ of $V$ can be uniquely expressed as $\vec{v}=a_1\vec{v}_1+\ldots +a_n\vec{v}_n$ for some scalars $a_1, \ldots ,a_n$. Vector $[\vec{v}]_{\mathcal{B}}$ in $\RR^n$ given by |
| 39 | +$$[\vec{v}]_{\mathcal{B}}=\begin{bmatrix}a_1\\a_2\\\vdots\\a_n\end{bmatrix}$$ |
| 40 | +is said to be the \dfn{coordinate vector for $\vec{v}$ with respect to the ordered basis $\mathcal{B}$}. (See Definition \ref{def:coordvector}.) |
| 41 | + |
| 42 | +It turns out that the transformation $T:V\rightarrow \RR^n$ defined by $T(\vec{v})=[\vec{v}]_{\mathcal{B}}$ is linear. Before we prove linearity of $T$, consider the following example. |
| 43 | + |
| 44 | +\begin{example}\label{ex:abstvectsplintranscoordvect1} |
| 45 | +Let $\mathcal{B}=\left\{\begin{bmatrix}1&0\\0&0\end{bmatrix}, \begin{bmatrix}0&1\\0&0\end{bmatrix}, \begin{bmatrix}0&0\\1&0\end{bmatrix}, \begin{bmatrix}0&0\\0&1\end{bmatrix}\right\}$ be an ordered basis for $\mathbb{M}_{2,2}$. (You should do a quick mental check that $\mathcal{B}$ is a legitimate basis.) Define $T:\mathbb{M}_{2,2}\rightarrow \RR^4$ by $T(A)=[A]_{\mathcal{B}}$. Find $T\left(\begin{bmatrix}-2&3\\1&-5\end{bmatrix}\right)$. |
| 46 | +\begin{explanation} |
| 47 | +We need to find the coordinate vector for $\begin{bmatrix}-2&3\\1&-5\end{bmatrix}$ with respect to $\mathcal{B}$. |
| 48 | +$$\begin{bmatrix}-2&3\\1&-5\end{bmatrix}=-2\begin{bmatrix}1&0\\0&0\end{bmatrix}+ 3\begin{bmatrix}0&1\\0&0\end{bmatrix}+ \begin{bmatrix}0&0\\1&0\end{bmatrix}+ (-5)\begin{bmatrix}0&0\\0&1\end{bmatrix}$$ |
| 49 | +This gives us: |
| 50 | +$$T\left(\begin{bmatrix}-2&3\\1&-5\end{bmatrix}\right)=\left[\begin{bmatrix}-2&3\\1&-5\end{bmatrix}\right]_{\mathcal{B}}=\begin{bmatrix}-2\\3\\1\\-5\end{bmatrix}$$ |
| 51 | +\end{explanation} |
| 52 | +\end{example} |
| 53 | + |
| 54 | +\begin{theorem}\label{th:coordvectmappinglinear} |
| 55 | +Let $V$ be an $n$-dimensional vector space, and let $\mathcal{B}$ be an ordered basis for $V$. Then $T:V\rightarrow \RR^n$ given by $T(\vec{v})=[\vec{v}]_{\mathcal{B}}$ is a linear transformation. |
| 56 | +\end{theorem} |
| 57 | +\begin{proof} |
| 58 | +First observe that Theorem \ref{th:uniquerep} of \href{https://ximera.osu.edu/linearalgebradzv3/LinearAlgebraInteractiveIntro/VSP-0060/main}{Bases and Dimension of Abstract Vector Spaces} guarantees that there is only one way to represent each element of $V$ as a linear combination of elements of $\mathcal{B}$. Thus each element of $V$ maps to exactly one element of $\RR^n$, as long as the order in which elements of $\mathcal{B}$ appear is taken into account. This proves that $T$ is a function, or a transformation. We will now prove that $T$ is linear. |
| 59 | + |
| 60 | +Let $\vec{v}$ be an element of $V$. We will first show that $T(k\vec{v})=kT(\vec{v})$. Suppose $\mathcal{B}=\{\vec{v}_1, \ldots ,\vec{v}_n\}$, then $\vec{v}$ can be written as a unique linear combination: |
| 61 | +$$\vec{v}=a_1\vec{v}_1+ \ldots +a_n\vec{v}_n$$ |
| 62 | +We have: |
| 63 | +\begin{align*} |
| 64 | + T(k\vec{v})&=T(k(a_1\vec{v}_1+ \ldots +a_n\vec{v}_n))\\ |
| 65 | + &=T((ka_1)\vec{v}_1+ \ldots +(ka_n)\vec{v}_n)\\ |
| 66 | + &=\begin{bmatrix}ka_1\\\vdots\\ka_n\end{bmatrix}=k\begin{bmatrix}a_1\\\vdots\\a_n\end{bmatrix}=kT(\vec{v}) |
| 67 | +\end{align*} |
| 68 | +We leave it to the reader to verify that $T(\vec{v}+\vec{w})=T(\vec{v})+T(\vec{w})$. (See Practice Problem \ref{prob:completeproofoflin}.) |
| 69 | +\end{proof} |
| 70 | + |
| 71 | +\subsection*{Invertibility of Coordinate Mappings} |
| 72 | +Consider a linear transformation $T:\RR^2\rightarrow \RR^2$ that scales all input vectors by a factor of two, and a linear transformation $S:\RR^2\rightarrow \RR^2$ that scales all input vectors by a factor of one half. The composite functions $S\circ T$ and $T\circ S$ are both identity transformations. $S$ and $T$ are clearly inverses of each other. Diagrammatically, we can represent $T$ and $S$ as follows: |
| 73 | + |
| 74 | +\begin{center} |
| 75 | +\begin{tikzpicture} |
| 76 | + \node[] at (0, -1.2) (top) {$T$ doubles each input}; |
| 77 | + \node[] at (-3, -2.5) (left1) {$\vec{v}$}; |
| 78 | + \node[] at (3, -2.5) (right1) {$2\vec{v}$}; |
| 79 | + \node[] at (0, -3.7) (bottom) {$S$ halves each input}; |
| 80 | + \draw [->,line width=0.5pt,-stealth] (left1.north east)to[out=30, in=150](right1.north west); |
| 81 | + \draw [->,line width=0.5pt,-stealth] (right1.south west)to[out=210, in=330](left1.east); |
| 82 | + \end{tikzpicture} |
| 83 | + \end{center} |
| 84 | + |
| 85 | +This gives us a way of thinking about an inverse of $T$ as a transformation that ``undoes" the action of $T$ by ``reversing" the mapping arrows. We will now use these intuitive ideas to understand which linear transformations are invertible and which are not. |
| 86 | + |
| 87 | +Given an arbitrary linear transformation $T:V\rightarrow W$, ``reversing the arrows" |
| 88 | + may not always result in a transformation. Recall that transformations are functions. The figures below show two ways in which our attempt to ``reverse" $T$ may fail to produce a function. |
| 89 | + |
| 90 | + First, if two distinct vectors $\vec{v}_1$ and $\vec{v}_2$ map to the same vector $\vec{w}$ in $W$, then reversing the arrows gives us a mapping that is clearly not a function. |
| 91 | + |
| 92 | + |
| 93 | +\begin{center} |
| 94 | +\begin{tikzpicture} |
| 95 | +\fill[blue, opacity=0.3] (-5.5,-2) rectangle (2,2); |
| 96 | + \node[] at (-5, 1.2) (topleft) {$\vec{v}_1$}; |
| 97 | + \node[] at (-5, -1.2) (bottomleft) {$\vec{v}_2$}; |
| 98 | + \node[] at (0, 0) (cleft1) {$T(\vec{v}_1)=T(\vec{v}_2)=\vec{w}$}; |
| 99 | + |
| 100 | +% \node[gray] at (-5, 0) (comment) {(Two distinct elements map to $\vec{w}$)}; |
| 101 | + \draw [->,line width=0.5pt,-stealth] (topleft.east) to (cleft1.north west); |
| 102 | + \draw [->,line width=0.5pt,-stealth] (bottomleft.east) to (cleft1.south west); |
| 103 | + \end{tikzpicture} |
| 104 | + \end{center} |
| 105 | + |
| 106 | + \begin{center} |
| 107 | +\begin{tikzpicture} |
| 108 | +\fill[blue, opacity=0.3] (-5.5,-2) rectangle (2,2); |
| 109 | + \node[] at (-5, 1.2) (topleft) {$\vec{v}_1$}; |
| 110 | + \node[] at (-5, -1.2) (bottomleft) {$\vec{v}_2$}; |
| 111 | + \node[] at (0, 0) (cleft1) {$T(\vec{v}_1)=T(\vec{v}_2)=\vec{w}$}; |
| 112 | +% \node[gray] at (-6, 0) (comment) {(Reversing the arrows does not produce a function)}; |
| 113 | + \draw [->,line width=0.5pt,-stealth] (cleft1.north west) to (topleft.east); |
| 114 | + \draw [->,line width=0.5pt,-stealth] (cleft1.south west) to (bottomleft.east); |
| 115 | + % \node[] at (-4, -2.5) (caption) {Figure 1. When two distinct elements map to $\vec{w}$,}; |
| 116 | + % \node[] at (-4, -3) (caption) {reversing the arrows does not produce a function.}; |
| 117 | + \end{tikzpicture} |
| 118 | + \end{center} |
| 119 | + |
| 120 | + Based on this diagram, it is reasonable to conjecture that for a transformation to be invertible, the transformation must be such that each output is the image of exactly one input. Such transformations are called \dfn{one-to-one}. |
| 121 | + |
| 122 | +\begin{definition}[One-to-One]\label{def:onetoone} A linear transformation $T:V\rightarrow W$ is \dfn{one-to-one} if |
| 123 | +$$T(\vec{v}_1)=T(\vec{v}_2)\quad \text{implies that}\quad \vec{v}_1=\vec{v}_2$$ |
| 124 | +\end{definition} |
| 125 | + |
| 126 | +Second, observe that our definition of an inverse of $T:V\rightarrow W$ requires that the domain of the inverse transformation be $W$. (Definition \ref{def:inverse}, \href{https://ximera.osu.edu/linearalgebradzv3/LinearAlgebraInteractiveIntro/LTR-0030/main}{Composition and Inverses of Linear Transformations}) If there is a vector $\vec{b}$ in $W$ that is not an image of any vector in $V$, then $\vec{b}$ cannot be in the domain of an inverse transformation. |
| 127 | + |
| 128 | +\begin{center} |
| 129 | +\begin{tikzpicture} |
| 130 | +\fill[orange, opacity=0.5] (-2.5,1.6) rectangle (1,3.4); |
| 131 | + \node[] at (0, 3) (topleft) {$\vec{b}$}; |
| 132 | + \node[] at (-2, 2) (bottomleft) {$\vec{v}$}; |
| 133 | + \node[] at (0, 2) (cleft1) {$T(\vec{v})$}; |
| 134 | +% \node[gray] at (2, 3) (comment) {(Nothing maps to $\vec{b}$)}; |
| 135 | +\fill[orange, opacity=0.5] (-2.5,-0.4) rectangle (1,1.4); |
| 136 | + \draw [->,line width=0.5pt,-stealth] (bottomleft.east) to (cleft1.west); |
| 137 | + \node[] at (0, 1) (topright) {$\vec{b}$}; |
| 138 | + \node[] at (-2, 0) (bottomleft) {$\vec{v}$}; |
| 139 | + \node[] at (-2, 1) (topleft) {?}; |
| 140 | + \node[] at (0, 0) (cleft1) {$T(\vec{v})$}; |
| 141 | +% \node[gray] at (3, 1) (comment) {($\vec{b}$ has nothing to map ``back" to)}; |
| 142 | + |
| 143 | + \draw [->,line width=0.5pt,-stealth] (cleft1.west) to (bottomleft.east); |
| 144 | + \draw [->,line width=0.5pt,-stealth] (topright.west) to (topleft.east); |
| 145 | +% \node[] at (0, -1) (caption) {Figure 2. What happens when nothing maps to $\vec{b}$.}; |
| 146 | + \end{tikzpicture} |
| 147 | + \end{center} |
| 148 | + |
| 149 | +The above figure makes a convincing case that for a transformation to be invertible every element of the codomain must have something mapping to it. Transformations such that every element of the codomain is an image of some element of the domain are called \dfn{onto}. |
| 150 | + |
| 151 | +\begin{definition}[Onto]\label{def:onto} A linear transformation $T:V\rightarrow W$ is \dfn{onto} if for every element $\vec{w}$ of $W$, there exists an element $\vec{v}$ of $V$ such that $T(\vec{v})=\vec{w}$. |
| 152 | +\end{definition} |
| 153 | + |
| 154 | +\begin{theorem}\label{th:isomeansinvert} Let $V$ and $W$ be vector spaces, and let $T:V\rightarrow W$ be a linear transformation. Then $T$ has an inverse if and only if $T$ is one-to-one and onto. |
| 155 | +\end{theorem} |
| 156 | +\begin{proof} |
| 157 | +We will first assume that $T$ is one-to-one and onto, and show that there exists a transformation $S:W\rightarrow V$ such that $S\circ T=\id_V$ and $T\circ S=\id_W$. Because $T$ is onto, for every $\vec{w}$ in $W$, there exists $\vec{v}$ in $V$ such that $T(\vec{v})=\vec{w}$. Moreover, because $T$ is one-to-one, vector $\vec{v}$ is the only vector that maps to $\vec{w}$. To stress this, we will say that for every $\vec{w}$, there exists $\vec{v}_{\vec{w}}$ such that $T(\vec{v}_{\vec{w}})=\vec{w}$. (Since every $\vec{v}$ maps to exactly one $\vec{w}$, this notation makes sense for elements of $V$ as well.) We can now define $S:W\rightarrow V$ by $S(\vec{w})=\vec{v}_{\vec{w}}$. |
| 158 | +Then |
| 159 | +$$(S\circ T)(\vec{v}_{\vec{w}})=S(T(\vec{v}_{\vec{w}}))=S(\vec{w})=\vec{v}_{\vec{w}}$$ |
| 160 | +$$(T\circ S)(\vec{w})=T(S(\vec{w}))=T(\vec{v}_{\vec{w}})=\vec{w}$$ |
| 161 | +We conclude that $S\circ T=\id_V$ and $T\circ S=\id_W$. Therefore $S$ is an inverse of $T$. |
| 162 | + |
| 163 | +We will now assume that $T$ has an inverse $S$ and show that $T$ must be one-to-one and onto. |
| 164 | +Suppose $$T(\vec{v}_1)=T(\vec{v}_2)$$ then $$S(T(\vec{v}_1))=S(T(\vec{v}_2))$$ |
| 165 | +but then |
| 166 | +$$\vec{v}_1=\vec{v}_2$$ |
| 167 | +We conclude that $T$ is one-to-one. |
| 168 | + |
| 169 | +Now suppose that $\vec{w}$ is in $W$. We need to show that some element of $V$ maps to $\vec{w}$. Let $\vec{v}=S(\vec{w})$. Then |
| 170 | +$$T(\vec{v})=T(S(\vec{w}))=(T\circ S)(\vec{w})=\id_W(\vec{w})=\vec{w}$$ |
| 171 | +We conclude that $T$ is onto. |
| 172 | +\end{proof} |
| 173 | + |
| 174 | +\begin{theorem}\label{ex:coordmapiso} |
| 175 | +Let $V$ be a vector space, and let $\mathcal{B}$ be an ordered basis for $V$. Then the linear transformation $T:V\rightarrow \RR^n$ given by $T(\vec{v})=[\vec{v}]_{\mathcal{B}}$ is invertible. |
| 176 | +\end{theorem} |
| 177 | +\begin{proof} |
| 178 | +We leave the proof of this result to the reader. |
| 179 | +\end{proof} |
| 180 | +%%%%%%%%%%%%%%%%%%%%%%% |
| 181 | + |
| 182 | + |
| 183 | + |
| 184 | +\begin{example}\label{example:isomorph} |
| 185 | +Recall that the set of all polynomials of degree $2$ or less, together with polynomial addition and scalar multiplication, is a vector space, denoted by $\mathbb{P}^2$. Let $\mathcal{B}=\{1, x, x^2\}$. You should do a quick mental check that $\mathcal{B}$ is a basis of $\mathbb{P}^2$. |
| 186 | + |
| 187 | +Define a transformation $T:\mathbb{P}^2\rightarrow \RR^3$ by |
| 188 | +$T(a+bx+cx^2)=\begin{bmatrix}a\\b\\c\end{bmatrix}$. In other words, $T$ maps each element of $\mathbb{P}^2$ to its coordinate vector with respect to the ordered basis $\mathcal{B}$. |
| 189 | + |
| 190 | +The diagram below illustrates the actions of $T$ and $T^{-1}$ on several elements. |
| 191 | + |
| 192 | + \begin{center} |
| 193 | + \begin{tikzpicture} |
| 194 | + \fill[blue, opacity=0.3] (0,0) rectangle (5,5); |
| 195 | + \fill[orange, opacity=0.5] (6,0) rectangle (11,5); |
| 196 | + |
| 197 | + \node[] at (0.5, 4.5) (p2) {$\mathbb{P}^2$}; |
| 198 | + \node[] at (10.5, 4.5) (r3) {$\RR^3$}; |
| 199 | + |
| 200 | + \node[] at (2.5, 4.2) (p_1) {$1+2x-3x^2$}; |
| 201 | + \node[] at (1.2, 2.8) (p_2) {$-4$}; |
| 202 | + \node[] at (3, 1.8) (p_3) {$x+5$}; |
| 203 | + \node[] at (2, 0.6) (p_4) {$a+bx+cx^2$}; |
| 204 | + |
| 205 | + \node[] at (8.5, 4.2) (v_1) {$\begin{bmatrix}1\\2\\-3\end{bmatrix}$}; |
| 206 | + \node[] at (7.2, 3.2) (v_2) {$\begin{bmatrix}-4\\0\\0\end{bmatrix}$}; |
| 207 | + \node[] at (9, 1.9) (v_3) {$\begin{bmatrix}5\\1\\0\end{bmatrix}$}; |
| 208 | + \node[] at (7.5, 0.8) (v_4) {$\begin{bmatrix}a\\b\\c\end{bmatrix}$}; |
| 209 | + |
| 210 | + \draw [->,line width=0.5pt,-stealth] (3.6, 4.2)to[out=10, in=170](7.9,4.2); |
| 211 | + \draw [->,line width=0.5pt,-stealth] (p_2.east)to[out=10, in=170](v_2.west); |
| 212 | + \draw [->,line width=0.5pt,-stealth] (p_3.east)to[out=10, in=170](v_3.west); |
| 213 | + \draw [->,line width=0.5pt,-stealth] (p_4.east)to[out=10, in=170](v_4.west); |
| 214 | + |
| 215 | + \draw [->,line width=0.5pt,-stealth] (7.9,4.1)to[out=190, in=350](3.6, 4.1); |
| 216 | + \draw [->,line width=0.5pt,-stealth] (6.5,3.1)to[out=190, in=350](1.6,2.7 ); |
| 217 | + \draw [->,line width=0.5pt,-stealth] (8.5,1.8)to[out=190, in=350](3.6,1.7 ); |
| 218 | + \draw [->,line width=0.5pt,-stealth] (7,0.7)to[out=190, in=350](3,0.5 ); |
| 219 | + |
| 220 | + \draw [->,line width=0.5pt,-stealth] (8.5,-0.1)to[out=200, in=340](2.5,-0.1 ); |
| 221 | + \draw [->,line width=0.5pt,-stealth] (2.5,5.1)to[out=20, in=160](8.5, 5.1); |
| 222 | + |
| 223 | + \node[] at (5.5, 6) {$T$}; |
| 224 | + \node[] at (5.5, -1) {$T^{-1}$}; |
| 225 | + \end{tikzpicture} |
| 226 | +\end{center} |
| 227 | + |
| 228 | +\end{example} |
| 229 | + |
| 230 | +\subsection*{Coordinate Isomorphisms} |
| 231 | + |
| 232 | +Invertible linear transformations, such as the coordinate mapping, are useful because they preserve the structure of interactions between elements as we move back and forth between two vector spaces, allowing us to answer questions about one vector space in a different vector space. In particular, any question related to linear combinations can be addressed in this fashion. This includes questions concerning linear independence, span, basis and dimension. Specifically, for coordinate mappings, it is easy to see that the following property holds. |
| 233 | + |
| 234 | +\begin{theorem}\label{th:coordmappingisoprop} |
| 235 | + Let $V$ be an $n$-dimensional vector space, and let $T:V\rightarrow \RR^n$ be the coordinate mapping with respect to some ordered basis $\mathcal{B}$ of $V$. Then |
| 236 | + the set of vectors $\{\vec{x}_1,\vec{x}_2,\dots ,\vec{x}_k\}$ of $V$ is linearly independent if and only if the set $\{T(\vec{x}_1),T(\vec{x}_2),\dots ,T(\vec{x}_k)\}$ is linearly independent in $\RR^n$. |
| 237 | +\end{theorem} |
| 238 | + |
| 239 | +\begin{definition}\label{def:isomorphism} Let $V$ and $W$ be vector spaces. If there exists an invertible linear transformation $T:V\rightarrow W$ we say that $V$ and $W$ are \dfn{isomorphic} and write $V\cong W$. The invertible linear transformation $T$ is called an \dfn{isomorphism}. |
| 240 | +\end{definition} |
| 241 | + |
| 242 | +It is worth pointing out that if $T:V\rightarrow W$ is an isomorphism, then $T^{-1}:W\rightarrow V$, being linear and invertible, is also an isomorphism. |
| 243 | + |
| 244 | +Because coordinate mappings are isomorphisms, we have the following fundamental result. |
| 245 | + |
| 246 | +\begin{theorem}\label{th:ndimisotorn} |
| 247 | +Every $n$-dimensional vector space is isomorphic to $\RR^n$. |
| 248 | +\end{theorem} |
| 249 | + |
| 250 | + |
| 251 | + |
| 252 | +\section*{Practice Problems} |
| 253 | + |
| 254 | +\begin{problem}\label{prob:symmMatLinTrans} |
| 255 | +Recall that the set $V$ of all symmetric $2\times 2$ matrices is a subspace of $\mathbb{M}_{2,2}$. In Example \ref{ex:symmetricmatsubspace} of \href{https://ximera.osu.edu/linearalgebradzv3/LinearAlgebraInteractiveIntro/VSP-0060/main}{Bases and Dimension of Abstract Vector Spaces} we demonstrated that $\mathcal{B} = \left\{ |
| 256 | +\begin{bmatrix} |
| 257 | +1 & 0 \\ |
| 258 | +0 & 0 |
| 259 | +\end{bmatrix}, \begin{bmatrix} |
| 260 | +0 & 0 \\ |
| 261 | +0 & 1 |
| 262 | +\end{bmatrix}, \begin{bmatrix} |
| 263 | +0 & 1 \\ |
| 264 | +1 & 0 |
| 265 | +\end{bmatrix} |
| 266 | +\right\}$ is a basis for $V$. Define $T:V\rightarrow \RR^3$ by $T(A)=[A]_{\mathcal{B}}$. Find $T(I_2)$ and $T\left(\begin{bmatrix}2&-3\\-3&1\end{bmatrix}\right)$. |
| 267 | + |
| 268 | +Answer: |
| 269 | +$$T(I_2)=\begin{bmatrix}\answer{1}\\\answer{1}\\\answer{0}\end{bmatrix}$$ |
| 270 | +$$T\left(\begin{bmatrix}2&-3\\-3&1\end{bmatrix}\right)=\begin{bmatrix}\answer{2}\\\answer{1}\\\answer{-3}\end{bmatrix}$$ |
| 271 | +\end{problem} |
| 272 | + |
| 273 | +\begin{problem}\label{prob:coordvector} |
| 274 | +Let $V$ be a subspace of $\RR^3$ with a basis $\mathcal{B}=\left\{\begin{bmatrix}2\\1\\-1\end{bmatrix}, \begin{bmatrix}0\\3\\2\end{bmatrix}\right\}$. Find the coordinate vector, $[\vec{v}]_{\mathcal{B}}$, for $\vec{v}=\begin{bmatrix}4\\-1\\-4\end{bmatrix}$. |
| 275 | +$$[\vec{v}]_{\mathcal{B}}=\begin{bmatrix}\answer{2}\\\answer{-1}\end{bmatrix}$$ |
| 276 | + |
| 277 | +If the order of the basis elements in $\mathcal{B}$ were switched to form a new basis |
| 278 | +$$\mathcal{B}'=\left\{\begin{bmatrix}0\\3\\2\end{bmatrix}, \begin{bmatrix}2\\1\\-1\end{bmatrix} \right\}$$ |
| 279 | +How would this affect the coordinate vector for $\vec{v}$? |
| 280 | + |
| 281 | +$$[\vec{v}]_{\mathcal{B}'}=\begin{bmatrix}\answer{-1}\\\answer{2}\end{bmatrix}$$ |
| 282 | +\end{problem} |
| 283 | + |
| 284 | + |
| 285 | + |
| 286 | +\begin{problem}\label{prob:polylintranscoordvect} Verify that |
| 287 | +$\mathcal{B}=\{x^{2}, x + 1, 1 - x - x^{2}\}$ is a basis for $\mathbb{P}^2$. Define $T:\mathbb{P}^2\rightarrow \RR^3$ by $T(p(x))=[p(x)]_{\mathcal{B}}$. Find $T(0)$, $T(x+1)$ and $T(x^2-3x+1)$. |
| 288 | + |
| 289 | +Answer: |
| 290 | +$$T(0)=\begin{bmatrix}\answer{0}\\\answer{0}\\\answer{0}\end{bmatrix}$$ |
| 291 | +$$T(x+1)=\begin{bmatrix}\answer{0}\\\answer{1}\\\answer{0}\end{bmatrix}$$ |
| 292 | +$$T(x^2-3x+1)=\begin{bmatrix}\answer{3}\\\answer{-1}\\\answer{2}\end{bmatrix}$$ |
| 293 | +\end{problem} |
| 294 | + |
| 295 | +\begin{problem}\label{prob:lintransandbasis4} Let $V$ and $W$ be vector spaces, and let $\mathcal{B}_V=\{\vec{v}_1, \vec{v}_2, \vec{v}_3, \vec{v}_4\}$ and $\mathcal{B}_W=\{\vec{w}_1,\vec{w}_2, \vec{w}_3\}$ be ordered bases of $V$ and $W$, respectively. Suppose $T:V\rightarrow W$ is a linear transformation such that: $$T(\vec{v}_1)=\vec{w}_2$$ $$T(\vec{v}_2)=2\vec{w}_1-3\vec{w}_2$$ |
| 296 | +$$T(\vec{v}_3)=\vec{w}_2+\vec{w}_3$$ |
| 297 | +$$T(\vec{v}_4)=-\vec{w}_1$$ |
| 298 | +If $\vec{v}=-2\vec{v}_1+3\vec{v}_2-\vec{v}_4$, express $T(\vec{v})$ as a linear combination of vectors of $\mathcal{B}_W$. |
| 299 | +$$T(\vec{v})=\answer{7}\vec{w}_1-\answer{11}\vec{w}_2+\answer{0}\vec{w}_3$$ |
| 300 | +Find $[\vec{v}]_{\mathcal{B}_V}$ and $[T(\vec{v})]_{\mathcal{B}_{W}}$ |
| 301 | +$$[\vec{v}]_{\mathcal{B}_V}=\begin{bmatrix}\answer{-2}\\\answer{3}\\\answer{0}\\\answer{-1}\end{bmatrix},\quad [T(\vec{v})]_{\mathcal{B}_{W}}=\begin{bmatrix}\answer{7}\\\answer{-11}\\\answer{0}\end{bmatrix}$$ |
| 302 | +\end{problem} |
| 303 | + |
| 304 | +\begin{problem}\label{prob:useisoshowlinind} |
| 305 | +Let |
| 306 | +$$\mathcal{S}=\left\{\begin{bmatrix}1&-3\\-2&2\end{bmatrix}, \begin{bmatrix}4&-2\\1&5\end{bmatrix}, \begin{bmatrix}5&5\\8&4\end{bmatrix}\right\}$$ |
| 307 | +Is $\mathcal{S}$ linearly independent in $\mathbb{M}_{2,2}$? \wordChoice{\choice[correct]{Yes}\choice{No}} |
| 308 | +\end{problem} |
| 309 | + |
| 310 | +\begin{problem}\label{prob:basism22iso} |
| 311 | +Let |
| 312 | +$$\mathcal{S}=\left\{\begin{bmatrix}1&2\\3&4\end{bmatrix}, \begin{bmatrix}5&6\\7&8\end{bmatrix}, \begin{bmatrix}9&10\\11&12\end{bmatrix}, \begin{bmatrix}13&14\\15&16\end{bmatrix}\right\}$$ |
| 313 | +Is $\mathcal{S}$ a basis for $\mathbb{M}_{2,2}$? \wordChoice{\choice{Yes}\choice[correct]{No}} |
| 314 | +\end{problem} |
| 315 | + |
| 316 | +\end{document} |
0 commit comments