\documentclass{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\newcommand{\R}{{\mathbb R}}
\newcommand{\points}[1]{\phantom{.}\hfill \textbf{(#1~points)}}
\begin{document}
\begin{center}
\begin{huge}
MATH 609-602: Numerical Methods
\end{huge}
\end{center}
\begin{tabular}{ll}
Lecturer: & Prof. Wolfgang Bangerth \\
& Blocker Bldg., Room 507D \\
& (979) 845 6393 \\
& \texttt{bangerth@math.tamu.edu}\\[5pt]
Teaching Assistant: & Seungil Kim \\
& Blocker Bldg., Room 507A \\
& (979) 862 3259 \\
& \texttt{sgkim@math.tamu.edu}
\end{tabular}
\section*{Homework assignment 8 -- due Tuesday 11/1/2005}
\paragraph{Problem 1 (Best polynomial approximation).}
Compute, analytically (i.e. with exact values, not numerical floating point
values), the best polynomial approximation of degree 4 on the
interval $[-1,1]$ to the following functions:
\begin{itemize}
\item[a)] $f(x)=\frac 1x$;
\item[a)] $f(x)=e^x$.
\end{itemize}
Plot your best approximation $p_4(x)$ together with $f(x)$.
\points{5}
\paragraph{Problem 2 (Gram-Schmidt orthogonalization).}
Define the following scalar product between matrices $A,B\in \R^{2\times 2}$:
\begin{align*}
\left = \sum_{i=1}^2\sum_{j=1}^2 A_{ij} B_{ij},
\end{align*}
and corresponding norm
\begin{align*}
\|A\|^2 = \left = \sum_{i=1}^2\sum_{j=1}^2 A_{ij}^2.
\end{align*}
Starting with matrices
\begin{align*}
A_1 =
\begin{pmatrix}
1 & 1 \\ 1 & 1
\end{pmatrix},
\qquad
A_2 =
\begin{pmatrix}
1 & 1 \\ 1 & 0
\end{pmatrix},
\qquad
A_3 =
\begin{pmatrix}
1 & 1 \\ 0 & 1
\end{pmatrix},
\qquad
A_4 =
\begin{pmatrix}
1 & 0 \\ 1 & 1
\end{pmatrix},
\end{align*}
compute four matrices $B_i,1\le i \le 4$ that are orthonormal onto each
other, i.e. for which $\left=\delta_{ij}$ holds. Write the
identity matrix $I = \begin{pmatrix}
1 & 0 \\ 0 & 1
\end{pmatrix}$ as $I=\sum_{i=1}^4 \beta_i B_i$ and give the coefficients
$\beta_i$.
\points{4}
\paragraph{Problem 3 (Least-squares approximation and other norms).} In class,
we defined the least-square approximating polynomial $p_n(x)$ as that
polynomial that minimized the error
\begin{align*}
e_2 = \sum_{i=1}^N |p_n(x_i)-y_i|^2,
\end{align*}
where we used the $l_2$ norm of the difference between $p_n(x_i)$ and $y_i$
(i.e. we squared the difference, and summed over it). It was shown that this
then leads to a linear problem for finding the expansion coefficients. On the
other hand, if we had chosen any other exponent, the problem would have been
nonlinear.
Take the same points from last week again:
\begin{center}
\begin{tabular}{c||c|c|c|c|c|c|c|c|c|c}
$x_i$ & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 \\ \hline
$y_i$ & 1.51 & 2.01 & 2.49 & 2.98 & 3.51 & 4.01 & 4.49 & 5.02 & 5.52 & 5.98
\end{tabular}
\end{center}
Find the polynomials $p^q_1(x)=c_0+c_1x$ that minimize the $l_q$-norms
\begin{align*}
e_q = \sum_{i=1}^N |p_n(x_i)-y_i|^q,
\end{align*}
for $q=1, q=2, q=4$. (For $q=2$, this is the solution of Problem 4 of last
week's homework.) In addition, compute $p_1^\infty(x)=c_0+c_1x$ that minimizes
the infinity norm
\begin{align*}
e_q = \max_{i\le i \le N} |p_n(x_i)-y_i|.
\end{align*}
Plot the $p_1^q(x)$ together in one plot in which you also show
the 10 data points.
Repeat these computations for the following data set (the third to last data
point has been changed: some large measurement error has occured, or someone
made a mistake transfering the device reading to the data sheet; or maybe this
was what the experiment really gave):
\begin{center}
\begin{tabular}{c||c|c|c|c|c|c|c|c|c|c}
$x_i$ & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 \\ \hline
$y_i$ & 1.51 & 2.01 & 2.49 & 2.98 & 3.51 & 4.01 & 4.49 & 5.82 & 5.52 & 5.98
\end{tabular}
\end{center}
Comment on the suitability of the solutions you've found for approximating the
two data sets.
\textbf{Note:} To compute each of these polynomials, you have to find the
coefficients $c_0,c_1$ that minimize the respective error $e_q$ that can be
expressed as a function of $c_i$ by substituting $p_1^q(x)=c_0+c_1x$. In
general, you will not be able to find these coefficients exactly except for
the case $q=2$. In particular, for $q=1,\infty$ you can't even find them by
looking for points at which $\frac{\partial e_q}{\partial c_0}=\frac{\partial
e_q}{\partial c_1}=0$, since $e_q$ is not differentiable. In this case, feel
free to get approximate values of the coefficients by plotting $e_q$ as a
function of $c_i$ and visually determining values for which it is minimal.
For $q=4$, one ends up with an error function $e_q$ that is quartic in $c_i$,
i.e. nonlinear but differentiable. Determine its minimum either visually, or
by letting your favorite math program find it using a minimum/root finder such
as Newton's method.
\points{7}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End: