\documentclass{amsart} \begin{document} {\noindent\small {\em Electronic Journal of Differential Equations}, Vol.\ 1997(1997), No.~24, pp. 1--20.\newline ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu \newline ftp ejde.math.swt.edu (login: ftp) 147.26.103.110 or 129.120.3.113} \thanks{\copyright 1997 Southwest Texas State University and University of North Texas.} \vspace{1.5cm} \title[\hfilneg EJDE--1997/24\hfil Initial value problems for nonresonant delay equations]{Initial value problems for nonlinear nonresonant delay differential equations with possibly infinite delay} \author{Lance D. Drager} \address{Lance D. Drager \hfil\break Department of Mathematics and Statistics\\ Texas Tech University \hfil\break Lubbock, TX 79409-1042 USA} \email{drager@math.ttu.edu} \author{William Layton} \address{William Layton \hfil\break Department of Mathematics\\ University of Pittsburgh \hfil\break Pittsburgh, PA 15260 USA} \email{wjl+@pitt.edu} \date{} \thanks{Submitted August 14, 1997. Published December 19, 1997.} \thanks{The second author was partially supported by NSF Grant DMS--9400057} \subjclass{Primary 34K05, 34K20, 34K25} \keywords{Delay differential equation, infinite delay, initial value problem, \hfil\break\indent nonresonance, asymptotic stability, exponential asymptotic stability} \begin{abstract} We study initial value problems for scalar, nonlinear, delay differential equations with distributed, possibly infinite, delays. We consider the initial value problem $$\begin{cases} x(t) = \varphi(t), & t \leq 0\\[2\jot] {\displaystyle x'(t) + \int_{0}^{\infty} g(t, s, x(t), x(t-s))\, d \mu(s) = f(t),} & t\geq 0, \end{cases} $$ where $\varphi$ and $f$ are bounded and $\mu$ is a finite Borel measure. Motivated by the nonresonance condition for the linear case and previous work of the authors, we introduce conditions on $g$. Under these conditions, we prove an existence and uniqueness theorem. We show that under the same conditions, the solutions are globally asymptotically stable and, if $\mu$ satisfies an exponential decay condition, globally exponentially asymptotically stable. \end{abstract} \maketitle \newtheorem{thm}{Theorem}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{lemma}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \numberwithin{equation}{section} % Notation \newcommand{\gnr}{\textsf{GNR}} \newcommand{\reals}{{\mathbb{R}}} \newcommand{\im}{\operatorname{Im}} \newcommand{\putinvert}[1]{{\lvert #1 \rvert}} \let\abs=\putinvert \newcommand{\putinVert}[1]{{\lVert #1\rVert}} \let \Norm=\putinVert \newcommand{\lset}{\left\{\,} \newcommand{\rset}{\,\right\}} \newcommand{\bcspace}[1]{{BC}^{#1}} \let\bc=\bcspace \newcommand{\bcz}{\bc{0}} \renewcommand{\subset}{\subseteq} \renewcommand{\supset}{\supseteq} \newcommand{\inv}{{\mathstrut -1}} \newcommand{\cspace}[1]{{C^{#1}}} \newcommand{\normi}[1]{\norm{#1}_{1}} \newcommand{\normbox}[1]{\norm{#1}_{\infty}} \newcommand{\nop}[1]{{N_{#1}}} \newcommand{\bee}[1]{{B(#1)}} \newcommand{\scrG}{{\mathcal{G}}} \newcommand{\gspace}{\scrG} \newcommand{\gclass}[1]{\gspace_{#1}} \newcommand{\gcond}[1]{\textsf{G#1}} \newcommand{\bcx}{\bc{}} \newcommand{\bcv}{\bcx_{0}[0,\infty)} \newcommand{\varphibar}{\bar{\varphi}} \newcommand{\zspace}[1]{Z(#1)} \newcommand{\zspacep}[1]{Z_{+}(#1)} \section{Introduction} %%%%%%%%% section 1 In this paper we will study the initial value problem for scalar, nonlinear, delay differential equations with possibly infinite delay. We will consider problems of the form \begin{equation} \label{e:ivp} \begin{cases} x(t) = \varphi(t), & t \leq 0\\[2\jot] {\displaystyle x'(t) + \int_{0}^{\infty} g(t, s, x(t), x(t-s))\, d \mu(s) = f(t),} & t\geq 0. \end{cases} \end{equation} We assume that $\varphi$ is bounded and continuous on $(-\infty,0]$, $f$ is bounded and continuous on $[0,\infty)$, and that $\mu$ is a positive, finite Borel measure on $[0,\infty)$. As usual, $x'(0)$ in \eqref{e:ivp} is to be interpreted as a right-hand derivative. In all the cases we consider, $g$ will be continuous. In this paper, we will give conditions on $g$ that will ensure that the initial value problem \eqref{e:ivp} has a unique maximally defined solution, which is defined on the entire real line $\reals$. We will show that the same conditions on $g$ ensure that the solutions of \eqref{e:ivp} are asymptotically stable, i.e., if $x_{1}$ and $x_{2}$ are solutions of \eqref{e:ivp} for different initial conditions $\varphi_{1}$ and $\varphi_{2}$ (but with the same forcing function $f$), then $x_{1}(t)-x_{2}(t)\to 0$ as $t\to \infty$. We will also show that if \eqref{e:ivp} has exponentially fading memory, i.e., $\mu$ decays exponentially, then every solution of \eqref{e:ivp} is exponentially asymptotically stable, i.e., if $x_{1}$ and $x_{2}$ are solutions of \eqref{e:ivp} for different initial conditions, then \begin{equation*} \abs{x_{1}(t)-x_{2}(t)} \leq C e^{-\lambda t}, \qquad t\geq 0, \end{equation*} for some constants $C$ and $\lambda >0$. In the rest of this introduction, we will describe the conditions that we will place on $g$. To motivate these conditions, it will help to recall some previous work of the authors in \cite{DLjde} (for related work of the authors see \cite{DLlib,DLpol,DLfa,DLnr, DLM,L}). In \cite{DLjde}, the authors studied delay differential equations of the form \begin{equation} \label{e:olddde} x'(t) + \int_{-\infty}^{\infty} g(x(t), x(t-s)) \, d \mu(s) = f(t), \qquad t\in \reals\,, \end{equation} under a generalized nonresonance condition Condition~\gnr. Under this condition, it was shown that \eqref{e:olddde} has a unique solution that is defined and bounded on all of $\reals$. We want to briefly recall the statement and motivation of Condition~\gnr. A very special case of \eqref{e:olddde} is the linear constant coefficient equation \begin{equation} \label{e:simple} x'(t) + a x(t) + b x(t-\tau) = 0, \end{equation} where $a, b \in\reals$. This equation can be analyzed by classical techniques, \cite{BC,El,ElN}. In particular, \eqref{e:simple} has a nontrivial bounded solution if and only if the characteristic equation $z+a + be^{-\tau z}=0$ has a root on the imaginary axis. Thus, it can be shown that the set $C_{\tau}$ of pairs $(a,b)$ in the $ab$-plane for which \eqref{e:simple} has a nontrivial bounded solution consists of the line $a+b=0$ and the multi-branch parameterized curve \begin{equation*} (a,b)=\frac{1}{\tau}(-\theta \cot(\theta), \theta \csc(\theta)). \end{equation*} It is known that if $(a,b)$ lies to the right of $C_{\tau}$ the zero solution of \eqref{e:simple} is globally asymptotically stable (i.e., all of the roots of the characteristic equation are in the left half plane), see \cite{El,ElN,H}. As $\tau$ varies, $C_{\tau}$ will sweep out the region $R'$ consisting of the two quadrants above and below the lines $a \pm b=0$, with $a+b=0$ included and $a-b=0$ excluded. See Figure~\ref{fig:ctau}, which shows a few branches of $C_{\tau}$ and shows the line $a-b=0$ as dotted. \input epsf \newcommand{\EPSFileScaled}[2][0pt]{% \epsfxsize=#1\relax\epsffile{#2}} \begin{figure} \centerline{\EPSFileScaled[4.5in]{lfid1.eps}} \caption{The set $C_{\tau}$\label{fig:ctau}} \end{figure} Equation \eqref{e:simple} is a special case of \eqref{e:olddde} with $g(x,y) = a x + b y$ ($\mu$ is the Dirac measure at $\tau$). In this case, $\lset (a,b)\rset$ is the image of the gradient of $g$, $\nabla{g}$. In the nonlinear case, the image of $\nabla{g}$ will be more than a single point. To get results for all delays, we want the image of $\nabla{g}$ to avoid $R'$. As the {\em first part of Condition~\gnr}, we required the somewhat stronger condition that the image of $\nabla{g}$ be disjoint from \begin{equation*} R = \lset (a,b)\in \reals^{2} \mid \abs{a} \leq \abs{b}\rset, \end{equation*} the closure of $R'$ (in this paper, the image will have to lie to the right of $R$). It is possible that $\im(\nabla{g})$, the image of $\nabla{g}$, comes arbitrarily close to $R$. We need some control on how fast $\im(\nabla{g})$ approaches $R$. This is measured as follows. For $\rho\geq 0$, define \begin{equation*} Q(\rho) = \lset (x,y)\in \reals^{2} \mid \abs{x}\leq \rho, \abs{y}\leq \rho \rset. \end{equation*} and let $G(\rho)=\nabla{g}(Q(\rho))$. Let $\alpha, \beta\colon \reals^{2}\to \reals$ be the linear functionals \begin{align*} \alpha(a,b) & = a-b\\ \beta(a,b) & = a + b. \end{align*} The boundary lines of $R$ are $\alpha=0$ and $\beta=0$. The region to the right of $R$ is described by $\alpha>0$ and $\beta>0$, while the region to the left of $R$ is described by $\alpha<0$ and $\beta<0$. For $\rho\geq 0$ we define \begin{equation} \label{e:astdef} \begin{aligned} \alpha_{\ast} (\rho) &= \inf \lset \alpha(a,b)\mid (a,b) \in G(\rho)\rset \\ \alpha^{\ast}(\rho) & = \sup\lset \alpha(a,b)\mid (a,b)\in G(\rho) \rset\\ \beta_{\ast}(\rho) & = \inf \lset \beta(a,b)\mid (a,b)\in G(\rho)\rset\\ \beta^{\ast}(\rho) &= \sup \lset \beta(a,b) \mid (a,b)\in G(\rho)\rset. \end{aligned} \end{equation} Consider the case where $\im(\nabla{g})$ lies to the right of $R$. In this case, we define \begin{equation} \label{e:rsdef} \begin{aligned} r(\rho) &= \min\lset \alpha_{\ast}(\rho), \beta_{\ast}(\rho)\rset\\ s(\rho) & = \max \lset \alpha^{\ast}(\rho), \beta^{\ast}(\rho)\rset. \end{aligned} \end{equation} See Figure~\ref{fig:abplane} for an illustration. Clearly $r$ is a positive non-increasing function. If $\im(\nabla{g})$ comes arbitrarily close to $R$, we will have $r(\rho)\to 0$ as $\rho\to \infty$, and the rate at which $r$ goes to zero is a measure of how fast $\im(\nabla{g})$ approaches $R$. As the {\em second part of Condition~\gnr}, we assume that \begin{equation*} \sup \lset \rho r(\rho) \mid \rho\geq 0\rset = \infty. \end{equation*} Similar definitions can be made in the case where $\im(\nabla{g})$ lies to the left of $R$, but these will not be needed in this paper. We do not need to impose any assumptions on $s(\rho)$, but it will figure in our proofs. We want to extend Condition~\gnr{} to allow $g$ to depend explicitly on $t$ and $s$, as in \eqref{e:ivp}. This is necessary for the techniques we will use in analyzing the initial value problem, as well as desirable for greater generality. For brevity, we will refer to the case where $g$ does not depend explicitly on $t$ and $s$ as the ``time independent case.'' Our extended condition also takes into account another consideration. Since the method of steps does not apply to \eqref{e:ivp}, it is not clear that we have unique continuation of solutions for \eqref{e:ivp}. In order to prove uniqueness, it will be necessary to consider solutions defined on intervals with a finite upper endpoint. These considerations lead us to the following definition of the class of functions $g$ we will consider. \begin{defn} For $0
0$ such that $\Norm{x},\Norm{y}\leq M$. Then, if $(\xi,\eta)
\in Q(\rho)$, the point $(\xi + x(t,s), \eta+y(t,s))$ is
in $Q(\rho+M)$ for all $(t,s)$. Thus, if we let $H(\rho)$ denote
the image of $[0,p)\times [0,\infty)\times Q(\rho)$ under $\nabla{h}$,
we have
\begin{equation}
\label{e:hsubg}
H(\rho) \subset G(\rho+M).
\end{equation}
Thus, $H(\rho)$ is a bounded set whose closure lies to the right of
$R$, so \gcond{4} is satisfied.
If we let $r_{h}$ and $r_{g}$ denote the $r$-functions for $h$ and
$g$ respectively, \eqref{e:hsubg} shows that $r_{g}(\rho+M)\leq
r_{h}(\rho)$. Thus, to show that $\sup \rho r_{h}(\rho)=\infty$,
it will suffice to show that $\sup \rho r_{g}(\rho + M)=\infty$.
To prove this, let $A>0$ be arbitrary. Since $\sup \rho
r_{g}(\rho)=\infty$, we can find some $\sigma$ such that
$\sigma r_{g}(\sigma) \geq A + M r_{g}(0)$. We must have
$\sigma > M$, for otherwise $\sigma r_{g}(\sigma) \leq M r_{g}(0)$,
since $r_{g}$ is non-increasing. Thus, we can write $\sigma = \rho +
M$ for $\rho>0$. We have
\begin{equation*}
(\rho + M) r_{g}(\rho + M) \geq A + M r_{g}(0)
\end{equation*}
which implies that
\begin{equation*}
\rho r_{g}(\rho+M) \geq A + M [ r_{g}(0)-r_{g}(\rho + M)].
\end{equation*}
The right hand side is greater that or equal to $A$, since $r_{g}$
is non-increasing. Since $A$ was arbitrary, we conclude that
$\sup \rho r_{g}(\rho+M)=\infty$. Thus, $h$ satisfies $\gcond{5}$.
\end{proof}
We should also observe the following lemma, whose proof is
straight forward.
\begin{lemma}
\label{thm:restrict}
Suppose that $g\in \gclass{p}$ and that $00$, but
another choice will be useful in our stability analysis.
Suppose that $x$ is a solution of the initial value problem
\eqref{e:ivpp}. Define $y = x - \varphibar$. Then $y$ is bounded and
continuous and $y$ is continuously differentiable on $[0,p)$.
Of course, $y(t)=0$ for $t\in (-\infty,0]$. If we substitute
$x = y + \varphibar$ in the initial value problem, for $t\in [0,p)$ we obtain
\begin{equation}
\label{e:eured1}
y'(t) + \varphibar'(t) + \int_{0}^{\infty}
g(t,s, y(t)+\varphibar(t), y(t-s)+\varphibar(t-s))\, d\mu(s)
= f(t)\,.
\end{equation}
Let $m = \mu[0,\infty)$ denote the total mass of $\mu$, a
notation that will be used for the rest of the paper. Then, for
$t\in [0,p)$ we can rewrite \eqref{e:eured1} as
$$
y'(t) + \int_{0}^{\infty} [g(t,s, y(t)+\varphibar(t),
y(t-s)+\varphibar(t-s)) + \varphibar'(t)/m] \, d\mu(s)
= f(t)\,.
$$
Thus, if we define $h$ by
\begin{equation*}
h(t,s, \xi, \eta) = g(t,s, \xi+\varphibar(t), \eta + \varphibar(t-s))
+ \varphibar'(t)/m\,,
\end{equation*}
we see that $y$ is a solution of the initial value problem
\begin{equation}
\label{e:eunewip}
\begin{cases}
y(t) = 0, & t\in (-\infty,0]\\[2\jot]
{\displaystyle
y'(t) + \int_{0}^{\infty} h(t,s, y(t), y(t-s))\, d\mu(s) = f(t),
}
& t\in [0,p).
\end{cases}
\end{equation}
By the $\scrG$-lemma, $h$ is again in $\gclass{p}$.
Conversely, if $y$ is a bounded solution of \eqref{e:eunewip},
then $x=y+\varphibar$ is a bounded solution of \eqref{e:ivpp}.
Thus, to prove Proposition~\ref{thm:eu1}, it will suffice
to prove the following Proposition.
\begin{prop}
\label{thm:euprop}
Suppose that $g\in \gclass{p}$ and that $\mu$ is a finite positive
Borel measure on $[0,\infty)$. Then, for every $f\in
\bcx[0,\infty)$, there is a unique function $x\in \bcx(-\infty,p)$
that satisfies the initial value problem
\begin{equation}
\label{e:ivpp2}
\begin{cases}
x(t) = 0, & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d \mu(s) = f(t),
}
& t\in [0,p).
\end{cases}
\end{equation}
\end{prop}
The proof of this Proposition will occupy most of the rest of this
section. We begin by introducing the function spaces we will use.
Let $\bc{1}[0,p)$ denote the space of functions $f\in \bcx[0,p)$
such that $f$ is differentiable and $f'\in \bcx[0,p)$.
Let $X_{p}$ be the space of functions $x\in \bcx(-\infty,p)$ such that
$x=0$ on the negative half-axis $(-\infty, 0]$. This is a closed
subspace of $\bcx(-\infty,p)$ and hence a Banach space in the
supremum norm. Finally, let $X_{p}^{1}$ denote the space
of functions $x\in X_{p}$ such that the restriction of $x$
to $[0,p)$ is in $\bc{1}[0,p)$.
The simplest case of the initial value problem \eqref{e:ivpp2}
is the problem
\begin{equation*}
\begin{cases}
x(t) = 0, & t\in (-\infty,0]\\
x'(t) + a x(t) = f(t), & t\in [0, p),
\end{cases}
\end{equation*}
where $a>0$.
This is, of course, easy to solve by elementary means. The results
are summarized in the next lemma.
\begin{lemma}
For $a>0$, let $L_{a}\colon X_{p}^{1}\to \bcx[0,p)$ be the operator
defined by
\begin{equation*}
L_{a}x(t) = x'(t) + a x(t), \qquad t\in [0,p).
\end{equation*}
Then, $L_{a}$ is invertible, with the inverse given by
\begin{equation*}
L_{a}^{\inv} f(t ) = \begin{cases}
0, & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
\int_{0}^{t} e^{a(s-t)} f(s)\, ds,
}
& t\in [0,p).
\end{cases}
\end{equation*}
From this formula we get the supremum norm estimate
\begin{equation}
\label{e:laest}
\Norm{L_{a}^{\inv} f} \leq \frac{1}{a} \Norm{f}.
\end{equation}
\end{lemma}
We will now introduce some operators that will be useful in the proof.
If $a>0$ and $x\in X_{p}$, define a function $N_{a}(x)$
on $[0,p)\times [0,\infty)$ by
\begin{equation*}
N_{a}(x)(t,s) = a x(t) - g(t,s, x(t),x(t-s)).
\end{equation*}
By Lemma~\ref{thm:pluginbounded}, this function is bounded,
so we have a nonlinear operator $N_{a}\colon X_{p}\to
\bcx([0,p)\times [0,\infty))$.
We will use the notation $\bee{\rho}$ for the closed ball of
radius $\rho$ centered at the origin in $X_{p}$. If $x\in
\bee{\rho}$, then $(x(t), x(t-s))\in Q(\rho)$ for all $t$ and $s$.
Thus, if $x,y\in \bee{\rho}$ and we apply Lemma~\eqref{thm:basicest},
we have
\begin{eqnarray*}
\lefteqn{ \abs{[a x(t) -g(t,s, x(t), x(t-s))]- [ a y(t) - g(t,s, y(t),
y(t-s))]} }
\\
&\leq& K(a,\rho) \max\lset\abs{x(t)-y(t)}, \abs{x(t-s)-y(t-s)}\rset\\
&\leq& K(a,\rho) \Norm{x-y},
\end{eqnarray*}
since both terms in the maximum are bounded by $\Norm{x-y}$. Thus,
we have the estimate
\begin{equation}
\label{e:naest}
\Norm{N_{a}(x)-N_{a}(y)} \leq K(a,\rho) \Norm{x-y},
\qquad x,y\in \bee{\rho}.
\end{equation}
For $a>0$, we define a nonlinear operator $M_{a}\colon X_{p}\to \bcx[0,p)$ by
\begin{equation*}
M_{a}(x)(t) = a x(t) - \int_{0}^{\infty} g(t,s, x(t),x(t-s))\,
d\mu(s), \qquad t\in [0,p).
\end{equation*}
Recalling the notation $m=\mu[0,\infty)$, we can rewrite this as
\begin{align*}
M_{a}(x)(t) &= \int_{0}^{\infty} [(a/m) x(t) - g(t,s, x(t), x(t-s))] \,
d\mu(s) \\
&=\int_{0}^{\infty} N_{a/m}(x)(t,s) \, d\mu(s).
\end{align*}
Hence, by applying \eqref{e:naest} we obtain the estimate
\begin{equation}
\label{e:maest}
\Norm{M_{a}(x)-M_{a}(y)} \leq m K(a/m,\rho)\Norm{x-y},
\qquad x,y\in \bee{\rho}.
\end{equation}
We next show that \eqref{e:ivpp2} can be reduced to a fixed point
problem in $X_{p}$. Suppose that $x\in X_{p}$ is a solution of
\eqref{e:ivpp2}. Then, if $a>0$ is arbitrary, we have, for
$t\geq 0$,
\begin{equation*}
x'(t) + a x(t) = a x(t) - \int_{0}^{\infty} g(t,s, x(t), x(t-s))\,
d\mu(s) + f(t)
\end{equation*}
We may rewrite this as
\begin{equation*}
L_{a} x = M_{a}(x) + f
\end{equation*}
Since $L_{a}$ is invertible, this is equivalent to
\begin{equation*}
x = T_{a}(x)\,,
\end{equation*}
where $T_{a}\colon X_{p}\to X_{p}$ is defined by
\begin{equation*}
T_{a}(x) = L_{a}^{\inv} M_{a}(x) + L_{a}^{\inv}f.
\end{equation*}
Conversely, if $x\in X_{p}$ and $x=T_{a}(x)$, then $x\in
X_{p}^{1}\subset X_{p}$, and we may reverse the steps to conclude
that $x$ is a solution of \eqref{e:ivpp2}. Thus, we have the
following lemma.
\begin{lemma}
\label{thm:cae}
The following conditions are equivalent.
\begin{enumerate}
\item
$x\in X_{p}$ is a solution of \eqref{e:ivpp2}.
\item
$x\in X_{p}$ and $T_{a}(x)=x$ for \emph{all} $a>0$.
\item
$x\in X_{p}$ and $T_{a}(x)=x$ for \emph{some} $a>0$.
\end{enumerate}
\end{lemma}
To show that one of the operators $T_{a}$ has a fixed point, we
will use the Contraction Mapping Lemma.
From \eqref{e:maest} and \eqref{e:laest}, we get the estimate
\begin{equation}
\label{e:taest}
\Norm{T_{a}(x)-T_{a}(y)} \leq \frac{m}{a} K(a/m,\rho) \Norm{x-y},
\qquad x,y\in \bee{\rho}\,.
\end{equation}
We now make a specific choice of $a$. For $\rho\geq 0$, define
\begin{equation*}
a(\rho) = m\frac{r(\rho)+s(\rho)}{2}\,.
\end{equation*}
It is then easily calculated that
\begin{equation*}
K(a(\rho)/m, \rho) = \frac{s(\rho)-r(\rho)}{2}
\end{equation*}
and thus that
\begin{equation}
\label{e:ammk}
\frac{a(\rho)}{m} - K(a(\rho)/m,\rho) = r(\rho) > 0\,.
\end{equation}
In particular, $K(a(\rho)/m, \rho)0$.}
\end{equation}
For $f\in \bcx[0,\infty)$ and $\varphi\in \bcx(-\infty,0]$, consider
the initial value problem
\begin{equation}
\label{e:easivp}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s,x(t),x(t-s))\, d\mu(s) = f(t),
}
& t\in [0,\infty).
\end{cases}
\end{equation}
Then, if $x_{1}$ and $x_{2}$ are two solutions of \eqref{e:easivp}
for different initial conditions $\varphi=\varphi_{1}$ and $\varphi=\varphi_{2}$
respectively, then there are constants $C\geq 0$ and $\lambda >0$
such that
\begin{equation*}
\abs{x_{1}(t)-x_{2}(t)} \leq C e^{-\lambda t}, \qquad t\geq 0.
\end{equation*}
\end{thm}
To prove this theorem, we let $y=x_{2}-x_{1}$ and make the same
reduction we made in Section~\ref{s:as} in going from Theorem~\ref{thm:as} to
Proposition~\ref{thm:asp1}. Thus, to prove Theorem~\ref{thm:eas}, it will
suffice to prove the following proposition.
\begin{prop}
\label{thm:easp2}
Suppose that $g\in\gspace$ and that $\mu$ is a finite Borel measure
on $[0,\infty)$ which satisfies the condition \eqref{e:mucond}.
Suppose, also, that $g(\cdot,\cdot,0,0)=0$.
If $\varphi\in \bcx(-\infty,0]$ and $x$ is the solution of the
initial value problem
\begin{equation}
\label{e:easivp1}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty,0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t),x(t-s))\, d\mu(s) = 0,
}
& t\in [0,\infty),
\end{cases}
\end{equation}
then there are constants $C\geq 0$ and $\lambda >0$ such that
\begin{equation*}
\abs{x(t)} \leq C e^{-\lambda t}, \qquad t\geq 0.
\end{equation*}
\end{prop}
To prove this proposition, we next make the same reduction that
we made in Section~\ref{s:as} in passing from
Proposition~\ref{thm:asp1} to Proposition~\ref{thm:as3}.
Thus, suppose that $x$ is the solution of \eqref{e:easivp1}.
Choose an extension $\varphibar$ of $\varphi$ which is $\cspace{1}$
on $[0,\infty)$ and has support bounded above. Define $y=x-\varphibar$,
so $y=0$ on $(-\infty,0]$. As before, we may write the equation
satisfied by $y$ in the form \eqref{e:redtoz}. In our present
context, we need to show that the function on the right hand
side of \eqref{e:redtoz} is exponentially decreasing. This is
is no problem for $\varphibar'$, since the support of $\varphibar$ is
bounded above. Thus, we need the following lemma.
\begin{lemma}
Suppose that $g\in \gspace$ and that $g(\cdot, \cdot, 0, 0)=0$.
Let $\mu$ satisfy Condition \eqref{e:mucond}. Define a function
$f$ by
\begin{equation}
\label{e:rhsf}
f(t) = \int_{0}^{\infty} g(t,s,\varphibar(t), \varphibar(t-s))\, d\mu(s)\,,
\end{equation}
where $\varphibar\in \bcx(-\infty,\infty)$ and the support of $\varphibar$
is bounded above by $b>0$. Then there is a constant $C\geq 0$ such
that
\begin{equation*}
\abs{f(t)} \leq C e^{-\lambda_{0} t},\qquad t\geq 0\,.
\end{equation*}
\end{lemma}
\begin{proof}[Proof of Lemma]
Consider first the $\mu$-measure of the interval $[t,\infty)$.
Let $K$ denote the value of the integral in \eqref{e:mucond}. Then,
for any $t \geq 0$ we have
\begin{align*}
e^{\lambda_{0}t} \mu[t,\infty) &= \int_{t}^{\infty} e^{\lambda_{0}t} \,
d\mu(s)\\
& \leq \int_{t}^{\infty} e^{\lambda_{0}s}\, d\mu(s)\\
& \leq \int_{0}^{\infty} e^{\lambda_{0} s}\, d\mu(s) = K,
\end{align*}
and so
\begin{equation*}
\mu[t,\infty) \leq K e^{-\lambda_{0} t}.
\end{equation*}
Next consider the function $g(t,s,\varphibar(t),\varphibar(t-s))$.
If $t>b$, this function will be zero if $t-s> b$, since
$g(t,s,0,0)\equiv 0$. Thus, the integrand in \eqref{e:rhsf} is
nonzero only for $s\geq t-b$. Then we have
\begin{equation*}
\abs{f(t)} \leq \int_{t-b}^{\infty} \abs{g(t,s,
\varphibar(t),\varphibar(t-s))}
\,d\mu(s).
\end{equation*}
The integrand is bounded by some constant $C$, so we have
\begin{equation*}
\abs{f(t)} \leq C \mu[t-b,\infty) \leq CKe^{\lambda_{0}b} e^{-\lambda_{0} t},
\end{equation*}
for $t>b$. Since $\abs{f}$ is bounded on $[0,b]$, this completes
the proof of the lemma.
\end{proof}
To return to the discussion prior to the lemma,
by applying this lemma, we see that $y=x-\varphibar$ is a solution of an
initial value problem of the form \eqref{e:ivph2}, where $h$
is defined by \eqref{e:redtozfun} and $f$ is exponentially
decreasing. If we show that the solution of this initial value
problem is exponentially decreasing, $x=y+\varphibar$ will be
exponentially decreasing, since the support of $\varphibar$ is bounded
above. Thus, to prove Proposition~\ref{thm:easp2}, it will suffice
to prove the following proposition.
\begin{prop}
\label{thm:easp3}
Suppose that $g\in\gspace$ and $g(\cdot,\cdot,0,0)=0$.
Let $\mu$ be a finite positive Borel
measure on $[0,\infty)$ that satisfies \eqref{e:mucond}. Suppose
that $f\in \bcx[0,\infty)$ and that there is a constant $K$
such that $\abs{f(t)}\leq K e^{-\lambda_{0}t}$. Let $x$ be the
solution of the initial value problem
\begin{equation}
\label{e:easivpf}
\begin{cases}
x(t) = 0, & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t),x(t-s))\, d\mu(s) = f(t),
}
& t\in [0,\infty).
\end{cases}
\end{equation}
Then, for $\lambda>0$ sufficiently small, there is a constant $C$
such that
\begin{equation*}
\abs{x(t)} \leq C e^{-\lambda t}.
\end{equation*}
\end{prop}
To prove this proposition, we first introduce some spaces of
exponentially decreasing functions. For $\lambda>0$, let $\zspace{\lambda}$
denote the space of functions $x\in X$ such that $\abs{x(t)}\leq
C e^{-\lambda t}$ for some constant $C$ and all $t$ (recall that
elements of $X$ are zero on $(-\infty,0]$).
We will be able to show that $\zspace{\lambda}$ is invariant under
the operators $T_{a}$ for sufficiently small $\lambda$. This is
not sufficient to prove the proposition, since $\zspace{\lambda}$
is not closed in $X$. Indeed, if $X_{c}\subset X$ denotes
the set of functions in $X$ with compact support, then
$X_{c}\subset \zspace{\lambda} \subset X_{0}$ for all $\lambda$,
but the closure of $X_{c}$ in $X$ is $X_{0}$. To deal
with this problem, we put a norm on $\zspace{\lambda}$ and
show that an appropriate $T_{a}$ is a contraction in this
norm.
Let $e_{\lambda}(t) = e^{\lambda t}$. A function $x\in X$ is in
$\zspace{\lambda}$ if and only if $e_{\lambda}x$ is bounded.
We define the norm on
$\zspace{\lambda}$ by
\begin{equation*}
\Norm{x}_{\lambda} = \Norm{e_{\lambda} x} = \sup\lset e^{\lambda t} \abs{x(t)}
\mid t\in\reals\rset.
\end{equation*}
Since $e_{\lambda}\geq 1$ where $x\ne 0$, we see that
$\Norm{x}\leq \Norm{x}_{\lambda}$, so the inclusion of $\zspace{\lambda}$
into $X$ is continuous. The mapping $\zspace{\lambda}\to X\colon
x \mapsto e_{\lambda} x$ is a (bijective) isometry, so $\zspace{\lambda}$
is a Banach space. If $\lambda_{1}<\lambda_{2}$, then $e^{\lambda_{1}t}\leq
e^{\lambda_{2}t }$ where $x$ is not zero, so $\Norm{x}_{\lambda_{1}}\leq
\Norm{x}_{\lambda_{2}}$. Thus, $\zspace{\lambda_{2}}\subset
\zspace{\lambda_{1}}$ and the inclusion is continuous.
By the definition of the norm, if $x\in \zspace{\lambda}$, we have
\begin{equation*}
\abs{x(t)} \leq \Norm{x}_{\lambda} e^{-\lambda t}.
\end{equation*}
We make similar definitions for spaces of exponentially decreasing
functions on $[0,\infty)$. Thus, $\zspacep{\lambda}$ will denote
that space of functions $f\in \bcx[0,\infty)$ such that $e_{\lambda}f$
is bounded and we equip $\zspacep{\lambda}$ with the norm
$\Norm{f}_{\lambda}=\Norm{e_{\lambda}f}$.
We next make some estimates for our operators on the spaces of exponentially
decreasing functions.
We first consider the operators $L_{a}^{\inv}$. Suppose that
$f\in\zspacep{\lambda}$, where $\lambda < a$. Then $L_{a}^{\inv}f(t)$
is zero for $t\leq 0$, and for $t\geq 0$, we have
\begin{align*}
\abs{L_{a}^{\inv}f(t)} &\leq \int_{0}^{t} e^{-a s} \abs{f(t-s)}\,
ds \\
& \leq \int_{0}^{t} e^{-a s}\Norm{f}_{\lambda} e^{-\lambda(t-s)}\, ds\\
& = \Norm{f}_{\lambda} e^{-\lambda t} \int_{0}^{\infty} e^{-as}e^{\lambda s} \,ds.
\end{align*}
The last integral has the value
\begin{equation*}
\frac{1}{a-\lambda} [1 - e^{-(a-\lambda)t}],
\end{equation*}
which is less than $1/(a-\lambda)$, since $a-\lambda>0$. Thus, we have
\begin{equation}
\label{e:easla}
\Norm{L_{a}^{\inv} f}_{\lambda} \leq \frac{1}{a-\lambda} \Norm{f}_{\lambda},
\qquad f\in \zspacep{\lambda},\ \lambda < a.
\end{equation}
We next turn to the operators $N_{a}$. If $x,y \in \zspace{\lambda}\cap
\bee{\rho}$ and $(t,s)\in [0,\infty)\times [0,\infty)$, we may
apply Lemma~\ref{thm:basicest} to conclude
\begin{eqnarray*}
\lefteqn{ \abs{[a x(t) - g(t,s, x(t), x(t-s))] - [a y(t) - g(t,s,y(t),y(t-s))]} }
\\
&\leq& K(a,\rho) \max\lset \abs{x(t)-y(t)},
\abs{x(t-s)-y(t-s)}\rset \\
&\leq& K(a,\rho) \max\lset \Norm{x-y}_{\lambda} e^{-\lambda t},
\Norm{x-y}_{\lambda} e^{-\lambda(t-s)}\rset\\
&=& K(a,\rho) \Norm{x-y}_{\lambda} e^{-\lambda t} e^{\lambda s}.
\end{eqnarray*}
Thus, for $x,y \in \zspace{\lambda} \cap \bee{\rho}$,
\begin{equation}
\label{e:easnaest}
\abs{N_{a}(x)(t,s) - N_{a}(y)(t,s)}
\leq K(a,\rho) \Norm{x-y}_{\lambda}
e^{-\lambda t} e^{\lambda s}\,.
\end{equation}
Now consider the operator $M_{a}\colon X\to \bcx[0,\infty)$.
Suppose that $x,y\in \zspace{\lambda}\cap \bee{\rho}$,
where $\lambda \leq \lambda_{0}$.
Then we have
\begin{align*}
\abs{M_{a}(x)(t)-M_{a}(y)(t)} & \leq
\int_{0}^{\infty} \abs{N_{a/m}(x)(t,s)-N_{a/m}(y)(t,s)}\,d\mu(s)\\
& \leq \int_{0}^{\infty} K(a/m,\rho) \Norm{x-y}_{\lambda} e^{-\lambda t}
e^{\lambda s}\, d\mu(s)\\
& = K(a/m,\rho) \Norm{x-y}_{\lambda} e^{-\lambda t} \int_{0}^{\infty}
e^{\lambda s}\, d\mu(s)\,,
\end{align*}
where the last integral is finite because $\lambda \leq \lambda_{0}$.
In particular, if we set $y=0$ and note that $N_{a}(0)=0$ and
$M_{a}(0)=0$ (because $g(\cdot,\cdot,0,0)=0$), we see that
$M_{a}(x)\in \zspacep{\lambda}$ if $x\in \zspace{\lambda}$ and $\lambda \leq
\lambda_{0}$. We also conclude that for
$x,y \in \zspace{\lambda}\cap \bee{\rho}$ and $\lambda \leq \lambda_{0}$,
\begin{equation}
\label{e:easmaest}
\Norm{M_{a}(x)-M_{a}(y)}_{\lambda} \leq K(a/m,\rho) \Norm{x-y}_{\lambda}
\int_{0}^{\infty} e^{\lambda s}\, d\mu(s)\,.
\end{equation}
Consider the initial value problem \eqref{e:easivpf}, so $f\in
\zspacep{\lambda_{0}}$.
Assume that $\lambda\leq \lambda_{0}$ and $\lambda < a$.
If $x\in\zspace{\lambda}$ then, from the results above,
$M_{a}(x)\in\zspacep{\lambda}$, $f\in \zspacep{\lambda}$ and
$L_{a}^{\inv} M_{a}(x)$ and $L_{a}^{\inv}f$ are in $\zspace{\lambda}$.
Thus, $\zspace{\lambda}$ is invariant under the operator
$T_{a}(x) =L_{a}^{\inv}M_{a}(x)+L_{a}^{\inv}f$. For
$x,y \in \zspace{\lambda}\cap \bee{\rho}$, we have the
estimate
\begin{equation}
\label{e:eastaest}
\Norm{T_{a}(x)-T_{a}(y)}_{\lambda} \leq
\frac{1}{a-\lambda}K(a/m,\rho) \Norm{x-y}_{\lambda}\int_{0}^{\infty}
e^{\lambda s} \, d\mu(s)\,.
\end{equation}
We know that we can find a $\rho>0$ such that
$T_{a(\rho)}(\bee{\rho})\subset \bee{\rho}$. If we fix such a
$\rho$ and assume $\lambda < a(\rho)$ and $\lambda < \lambda_{0}$,
the set $\zspace{\lambda}\cap \bee{\rho}$ is invariant under
$T_{a(\rho)}$. The set $\zspace{\lambda}\cap \bee{\rho}$
is closed in $\zspace{\lambda}$, because the inclusion of
$\zspace{\lambda}$ into $X$ is continuous. For $x,y\in \zspace{\lambda}
\cap \bee{\rho}$, we have
\begin{equation*}
\Norm{T_{a(\rho)}(x)-T_{a(\rho)}(y)} \leq \frac{1}{a(\rho)-\lambda}
K(a(\rho)/m, \rho) \Norm{x-y}_{\lambda} \int_{0}^{\infty} e^{\lambda s}\, d\mu(s).
\end{equation*}
Thus, $T_{a(\rho)}$ is Lipschitz on $\zspace{\lambda}\cap \bee{\rho}$
with Lipschitz constant
\begin{equation*}
\sigma(\lambda) = \frac{1}{a(\rho)-\lambda} K(a(\rho)/m, \rho)
\int_{0}^{\infty} e^{\lambda s}\, d\mu(s).
\end{equation*}
But $\sigma$ is a continuous, nondecreasing, function of $\lambda$
and
\begin{equation*}
\sigma(0) = \frac{1}{a(\rho)}K(a(\rho)/m, \rho) m,
\end{equation*}
which we know is strictly less than one. Thus, $\sigma(\lambda)<1$ for
$\lambda >0$ sufficiently small.
We conclude that if we choose $\lambda$ sufficiently small that
$\lambda < \lambda_{0}$, $\lambda < a(\rho)$ and $\sigma(\lambda)<1$, then
$T_{a(\rho)}$ leaves $\zspace{\lambda}\cap \bee{\rho}$ invariant
and is a contraction on this closed subset of $\zspace{\lambda}$.
Thus, $T_{a(\rho)}$ has a fixed point $x\in \zspace{\lambda}$,
which is precisely the solution of the initial value
problem \eqref{e:easivpf}. This completes the proof
of Proposition~\ref{thm:easp3} and hence the proof
of Theorem~\ref{thm:eas}.
\begin{thebibliography}{99}
\bibitem{BC}
\textsc{R.~Bellman and K.~L.~Cooke},
``Differential-Difference Equations,''
Academic Press, New York, 1963.
\bibitem{CH}
\textsc{S.-N. Chow and J.~K.~Hale}, ``Methods of Bifurcation Theory,''
Springer-Verlag, New York, 1982.
\bibitem{DLlib}
\textsc{L.~D.~Drager and W.~Layton}, On non-linear difference
approximations to non-linear functional differential equations,
\textsl{Libertas Mathematica}, Vol.~III (1983) 45--65.
\bibitem{DLpol}
\textsc{L.~D.~Drager and W.~Layton}, Non-resonance in functional
differential equations with small time lag, \emph{in}
``Functional-Differential Systems and Related Topics~III,''
ed.\mbox{} M.~Kisielewicz, Higher College of Engineering, Zielona,
Gora, Poland, 1983, 65--78.
\bibitem{DLfa}
\textsc{L.~D.~Drager and W.~Layton}, Non-linear delay differential
equations and function algebras, \emph{in} ``Differential
Equations,'' ed.\mbox{} I.~W.~Knowles and R.~T.~Lewis, North Holland,
Amsterdam, 1984.
\bibitem{DLnr}
\textsc{L.~D.~Drager and W.~Layton}, Some results on non-resonant
non-linear delay differential equations,
\emph{in} ``Trends in the Theory and Practice of Non-linear
Analysis,'' ed.\mbox{} V.~Lakshmikantham, North Holland,
Amsterdam, 1985, 131--136.
\bibitem{DLjde}
\textsc{L.~D.~Drager and W.~Layton}, Bounded solutions of delay
differential equations subject to a generalized nonresonance
condition, \textsl{J. Differential Equations},
\textbf{131}(1996), no.~1, 132--169.
\bibitem{DLM}
\textsc{L.~D.~Drager, W.~Layton and R.~M.~Mattheij}, Asymptotics
of numerical methods for non-linear evolution equations,
\emph{in} ``Trends in the Theory and Practice of Non-linear
Analysis,'' ed. V.~Lakshmikantham, North Holland,
Amsterdam, 1985, 137--144.
\bibitem{El}
\textsc{L.~E.~El'sgol'ts},
``Introduction to the Theory of Differential Equations with Deviating
Arguments,'' Holden-Day, San Francisco, 1966.
\bibitem{ElN}
\textsc{L.~E.~El'sgol'ts and S.~B.~Norkin}
``Introduction to the Theory and Application of Differential
Equations with Deviating Arguments,'' Academic Press, New York,
1973.
\bibitem{H}
\textsc{J.~K.~Hale}, ``Theory of Functional Differential Equations,''
Second Edition, Springer-Verlag, New York, 1977.
\bibitem{L}
\textsc{William Layton}, Existence of almost periodic solutions to
delay differential equations with Lipschitz nonlinearities,
\textsl{J. Differential Equations}, \textbf{55}(1984),
no.~2, 151--164.
\end{thebibliography}
\end{document}