\documentclass{article}
\usepackage{amsmath}
\usepackage{amsfonts}
\newcommand{\R}{{\mathbb R}}
\newcommand{\points}[1]{\phantom{.}\hfill\textbf{(#1~points)}}
\begin{document}
\begin{center}
\begin{huge}
MATH 609-602: Numerical Methods
\end{huge}
\end{center}
\begin{tabular}{ll}
Lecturer: & Prof. Wolfgang Bangerth \\
& Blocker Bldg., Room 507D \\
& (979) 845 6393 \\
& \texttt{bangerth@math.tamu.edu}\\[5pt]
Teaching Assistant: & Seungil Kim \\
& Blocker Bldg., Room 507A \\
& (979) 862 3259 \\
& \texttt{sgkim@math.tamu.edu}
\end{tabular}
\section*{Homework assignment 9 -- due Tuesday 11/8/2005}
\paragraph{Problem 1 (Finite difference approximation of the derivative).}
Take the function defined by
\begin{align*}
f(x) = \left\{
\begin{array}{ll}
\frac 12 x^3 + x^2 & \text{for $x<0$} \\
x^3 & \text{for $x\ge 0$.}
\end{array} \right.
\end{align*}
Compute a finite difference approximation to $f'(x_0)$ at $x_0=1$
with both the one-sided and the symmetric two-sided formula. Use step sizes
$h=1,\frac 12, \frac 14, \ldots, \frac 1{64}$. Determine
experimentally the convergence orders you observe as $h\rightarrow 0$.
Repeat these computations for $x_0=0$. What convergence orders do you observe?
Why?
\points{4}
\paragraph{Problem 2 (Derivatives of an implicit function).}
Let $f(x)$ be defined implicitly as follows: for every $x>0$, $f(x)$ is
that value $y$ for which
\begin{align}
\label{eq:1}
ye^y=x.
\end{align}
In other words, every time one wants to
evaluate $f(x)$ for a particular value $x$, one has to solve equation
\eqref{eq:1} for $y$. This can be done using Newton's method, for example, or
any of the other root finding algorithms we had in class applied to the
function $g(y)=ye^y-x$.
Plot $f(x)$ for $0\le x\le 10$. Compute an accurate approximation to $f'(2)$.
\points{4}
\paragraph{Problem 3 (Integration of an implicit function).} Let $f(x)$ be
defined as in Problem 2. Compute
\begin{align*}
\int_0^{10} f(x) \; dx
\end{align*}
using both the box rule as well as the trapezoidal rule for step sizes
$h=1,\frac 12, \frac 14, \frac 18, \ldots, \frac 1{32}$. Determine the order
of convergence for both methods.
\points{4}
\paragraph{Problem 4 (A proof).}
In last week's homework, you were asked to find the $l_\infty$ best
approximating linear function to 10 data points. Let's simplify the situation
a little bit: assume we had only wanted to find a \textit{constant} best
approximation, i.e.~a function $p_0(x)=c_0$, to all these data points. Then,
this involves finding the coefficient $c_0^\ast$ for which the function
\begin{align*}
e(c_0) = \max_{1\le i\le N} |c_0-y_i|
\end{align*}
takes on its minimum. If one wants to phrase this differently, one could also
say that we are looking for the optimal coefficient $c_0^\ast$ for which
\begin{align*}
e(c_0^\ast) = \min_{c_0} \max_{1\le i\le N} |c_0-y_i|.
\end{align*}
One could wonder if there is indeed only a single such value $c_0$, or if it
may be possible to have a number of \textit{different} values for $c_0$ for
which the corresponding functions $p_0(x)=c_0$ are all best approximations to
the data points.
Prove that the function $e(c_0)$ defined above has only a single minimum and
that consequently there is exactly one, well-defined best $l_\infty$
approximation among the constant functions. (Hint: Try your hands first on the
case where there are only two data points, i.e.
$e(c_0)=\max\{|c_0-y_1|,|c_0-y_2|\}$ and then generalize to $N$ data points.)
Comment on what happens if you were looking for linear approximations
$p_1(x)=c_0+c_1x$ with corresponding error function
\begin{align*}
e(c_0,c_1) = \max_{1\le i\le N} |c_0+c_1x_i-y_i|.
\end{align*}
Does this two-dimensional function have a single, unique minimum as well?
\points{4}
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End: