\documentclass[reqno]{amsart} \usepackage{hyperref} \AtBeginDocument{{\noindent\small \emph{Electronic Journal of Differential Equations}, Vol. 2012 (2012), No. 86, pp. 1--14.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu} \thanks{\copyright 2012 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2012/86\hfil Random dynamical systems] {Random dynamical systems on time scales} \author[C. Lungan , V. Lupulescu \hfil EJDE-2012/86\hfilneg] {Cristina Lungan, Vasile Lupulescu} % in alphabetical order \address{Cristina Lungan \newline Gheorghe Tatarascu School of Targu Jiu, 23 August 47, Romania} \email{crisslong@yahoo.com} \address{Vasile Lupulescu \newline Constantin Brancusi University of Targu Jiu, Republicii 1, Romania} \email{lupulescu\_v@yahoo.com} \thanks{Submitted January 12, 2012. Published May 31, 2012.} \subjclass[2000]{34N05, 37H10, 26E70} \keywords{Differential equation; random variable; time scale} \begin{abstract} The purpose of this paper is to prove the existence and uniqueness of solution for random dynamic systems on time scales. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{remark}[theorem]{Remark} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \allowdisplaybreaks \section{Introduction} The theory of dynamic systems on time scales allows us to study both continuous and discrete dynamic systems simultaneously. Since Hilger's initial work \cite{hil-88} there has been significant growth in the theory of dynamic systems on time scales, covering a variety of different qualitative aspects. We refer to the books \cite{boh,boh2}, and the papers \cite{agra,agra2,til,til1}. In recent years, some authors studied stochastic differential equations on time scales \cite{bohs,gro,san}. The main theoretical and practical aspects of probability theory and stochastic differential equations can be found in books \cite{ch,ol}. The organization of this paper is as follows. Section 2 presents a few definitions and concepts of time scales. Also, the notion of stochastic process on a time scale is introduced. In Section 3 we prove the existence and uniqueness of solution for the random dynamic systems on time scales. \subsection*{Preliminaries} By a \emph{time scale} $\mathbb{T}$ we mean any closed subset of $\mathbb{R}$. Then $\mathbb{T}$ is a complete metric space with the metric defined by $d(t,s):=|t-s|$ for $t,s\in \mathbb{T}$. Since a time scale $\mathbb{T}$ is not connected in generally, we need the concept of jump operators. The \emph{forward} \emph{jump operator} $\sigma : \mathbb{T}\to \mathbb{T}$ is defined by $\sigma (t):=\inf \{s\in \mathbb{T}:s>t\}$, while the \emph{backward jump operator} $\rho :\mathbb{T }\to \mathbb{T}$ is defined by $\rho (t):=\sup \{s\in \mathbb{T} :st,$ we say $t$ is a \emph{right-scattered point}, while if $\rho (t)\inf \mathbb{T}$ and $\rho (t)=t$, is called a \emph{left-dense point}. Points that are right-dense and left-dense at the same time will be called \emph{dense points}. The set $\mathbb{T}^{\kappa }$ is defined to be $\mathbb{T}^{\kappa }=\mathbb{T} \setminus \{m\}$ if $\mathbb{T}$ has a left-scattered maximum $m$, otherwise $\mathbb{T}^{\kappa }=\mathbb{T}$.\ Given a time scale interval $[a,b]_{\mathbb{T}}:=\{t\in \mathbb{T}:a\leq t\leq b\}$, then $[a,b]_{\mathbb{ T}}^{\kappa }$ denoted the interval $[a,b]_{\mathbb{T}}$ if $a<\rho (b)=b$ and denote the interval $[a,b)_{\mathbb{T}}$ if $a<\rho (b)0$, then we define the following neighborhoods of $t_0$: $U_{\mathbb{T}}(t_0,\delta ):=(t_0-\delta ,t_0+\delta )\cap \mathbb{T}$, $U_{\mathbb{T}}^{+}(t_0,\delta ):=[t_0,t_0+\delta )\cap \mathbb{T}$, and $U_{\mathbb{T} }^{-}(t_0,\delta ):=(t_0-\delta ,t_0]\cap \mathbb{T}$. \begin{definition}[\cite{boh}] \label{def1} \rm A function $f:\mathbb{T}\to \mathbb{R}$ is called \emph{regulated} if its right-sided limits exist (finite) at all right-dense points in $\mathbb{T}$, and its left-sided limits exist (finite) at all left-dense points in $\mathbb{T}$. A function $f:\mathbb{T}\to \mathbb{R}$ is called \emph{rd-continuous } if it is continuous at all right-dense points in $\mathbb{T}$ and its left-sided limits exist (finite) at all left-dense points in $\mathbb{T}$. \end{definition} Obviously, a continuous function is rd-continuous, and a rd-continuous function is regulated (\cite[Theorem 1.60]{boh}). \begin{definition} \label{def2}\rm A function $f:[a,b]_{\mathbb{T}}\times \mathbb{R}\to \mathbb{R}$ is called Hilger continuous if $f$ is continuous at each point $(t,x)$ where $t$ is right-dense, and the limits \[ \lim_{(s,y)\to (t^{-},x)} f(s,y)\quad\text{and}\quad \lim_{y\to x} f(t,y) \] both exist and are finite at each point $(t,x)$ where $t$ is left-dense. \end{definition} \begin{definition}[\cite{boh}] \label{def3} \rm Let $f:\mathbb{T}\to \mathbb{R}$ and $t\in \mathbb{T}^{\kappa }$. Let $f^{\Delta }(t)\in \mathbb{R}$ (provided it exists) with the property that for every $\varepsilon >0$, there exists $\delta >0$ such that \begin{equation} | f(\sigma (t))-f(s)-f^{\Delta }(t)[\sigma (t)-s]| \leq \varepsilon | \sigma (t)-s| \label{a-delta} \end{equation} for all $s\in U_{\mathbb{T}}(t,\delta )$. We call $f^{\Delta }(t)$ the \emph{delta} (or \emph{Hilger}) derivative ($\Delta $-derivative for short) of $f$ at $t$. Moreover, we say that $f$ is delta differentiable ($ \Delta $-differentiable for short) on $\mathbb{T}^{\kappa }$ provided $f(t)$ exists for all $t\in \mathbb{T}^{\kappa }$. \end{definition} The following result will be very useful. \begin{proposition}[{\cite[Theorem 1.16]{boh}}] \label{prop1} Assume that $f:\mathbb{T}\to \mathbb{R}$ and $t\in \mathbb{T}^{\kappa }$. \begin{itemize} \item[(i)] If $f$ is $\Delta$-differentiable at $t$, then $f$ is continuous at $t$. \item[(ii)] If $f$ is continuous at $t$ and $t$ is right-scattered, then $f$ is $\Delta $-differentiable at $t$ with \[ f^{\Delta }(t)=\frac{f(\sigma (t))-f(t)}{\sigma (t)-t}. \] \item[(iii)] If $f$ is $\Delta $-differentiable at $t$ and $t$ is right-dense then \[ f^{\Delta }(t)=\lim_{s\to t} \frac{f(t)-f(s)}{t-s}. \] \item[(iv)] If $f$ is $\Delta $-differentiable at $t$, then $f(\sigma (t))=f(t)+\mu (t)f^{\Delta }(t)$. \end{itemize} \end{proposition} It is known \cite{guse1} that for every $\delta >0$ there exists at least one partition $P:a=t_0\delta $ and $\rho (t_i)=t_{i-1}$. For given $\delta >0$ we denote by $\mathcal{P}([a,b)_{\mathbb{T}},\delta )$ the set of all partitions $P:a=t_00$ there exists $\delta >0$ such that $|S-I|<\varepsilon $ for every Riemann $\Delta $-sum $S$ of $f$ corresponding to a partition $P\in \mathcal{P}([a,b)_{\mathbb{T}},\delta )$ independent of the way in which we choose $\xi_i\in [ t_{i-1},t_i)_{\mathbb{T}}$, $i=1,2,\dots,n$. It is easily seen that such a number $I$ is unique. The number $I$ is the Riemann $\Delta$-integral of $f$ from $a$ to $b$, and we will denote it by $\int_a^{b}f(t)\Delta t$. \end{definition} \begin{proposition}[{\cite[Theorem 5.8]{guse}}] \label{prop2} A bounded function $f:[a,b)_{\mathbb{T}}\to \mathbb{R}$ is Riemann $\Delta $-integrable on $[a,b)_{\mathbb{T}}$ if and only if the set of all right-dense points of $[a,b)_{\mathbb{T}}$ at which $f$ is discontinuous is a set of $\Delta$-measure zero. \end{proposition} It is no difficult to see that every regulated function on a compact interval is bounded (see \cite[Theorem 1.65]{boh}). Then we get that every regulated function $f:[a,b]_{\mathbb{T}}\to \mathbb{R}$, is Riemann $\Delta $-integrable from $a$ to $b$. \begin{proposition}[{\cite[Theorem 5.8]{hil-90}}] \label{prop3} Assume that $a,b\in \mathbb{T}$, $aa)<1$, it follows that not all solutions $X(\cdot ,\omega )$ are well defined on some common interval $[0,a)$. \end{example} \begin{example} \label{examp2} \rm Let $(\Omega ,\mathcal{F},P)$ be a complete probability measure space and let $\Omega _0\notin \mathcal{F}$. It is easy to check that, for each $\omega \in \Omega $, the function $X(\cdot,\cdot ):[0,1]_{\mathbb{R}}\times \Omega \to \mathbb{R}$, given by \[ X(t,\omega )=\begin{cases} 0 &\text{if } \omega \in \Omega _0 \\ t^{3/2} &\text{if }\omega \in \Omega \setminus \Omega _0, \end{cases} \] is a solution of the initial-value problem \begin{gather*} X^{\Delta }(t,\omega )=\frac{3}{2}X(t,\omega ), \quad t\in [ 0,\infty)_{\mathbb{R}} \\ X(0,\omega )=0. \end{gather*} But $X(\cdot ,\cdot )$ is not a stochastic process. Indeed, we have that \[ \{\omega \in \Omega ;X(1,\omega )\in [ -\frac{1}{2},\frac{1}{2} ]\}=\Omega _0\notin \mathcal{F}, \] that is, $\omega \mapsto X(1,\omega )$ is not a measurable function. \end{example} Using Propositions \ref{prop4} and \ref{prop5} and \cite[Lemma 2.3]{til1}, it is easy to prove the following result. \begin{lemma} \label{lem3}. A time scale stochastic process $X(\cdot ,\cdot ):[a,b]_{\mathbb{T}}^{\kappa }\times \Omega \to \mathbb{R}$ is the solution of the problem \eqref{ivp} if and only if $X(\cdot ,\cdot )$ is a continuous time scale stochastic process and it satisfies the following random integral equation \begin{equation} X(t,\omega )=_PX_0(\omega )+\int_a^{t}f(s,X(s,\omega ),\omega )\Delta s , t\in [ a,b]_{\mathbb{T}}. \label{inteq} \end{equation} \end{lemma} The following results is known as Gronwall's inequality on time scale and will be used in this paper. \begin{lemma}[{\cite[Lemma 3.1]{til}}] \label{lem4} Let an rd-continuous time scale stochastic processes $X(\cdot ,\cdot ),Y(\cdot,\cdot ):[a,b]_{\mathbb{T}}^{\kappa }\times \Omega \to \mathbb{R}_{+} $ be such that \[ X(t,\omega )\leq _PY(t,\omega )+\int_a^{t}q(s)X(s,\omega )\Delta s, \quad t\in [ a,b]_{\mathbb{T}}, \] where $1+\mu (t)q(t)\neq 0$, for all $t\in [ a,b]_{\mathbb{T}}$. Then we have \[ X(t,\omega )\leq _PY(t,\omega )+e_{q}(t,a)\int_a^{t}q(s)Y(s,\omega ) \frac{1}{e_{q}(\sigma (s),a)}\Delta s, \quad t\in [ a,b]_{\mathbb{T}}. \] \end{lemma} \begin{theorem} \label{thm1} Let $f:[a,b]_{\mathbb{T}}^{\kappa}\times \mathbb{R}\times \Omega \to \mathbb{R}$ satisfy {\rm (H1)--(H2)} and assume that there exists a rd-continuous time scale stochastic process $L(\cdot ,\cdot ):[a,b]_{\mathbb{T}}^{\kappa }\times \Omega \to \mathbb{R}$ such that \begin{equation} |f(t,x,\omega )-f(t,y,\omega )|\leq L(t,\omega )|x-y| \label{Lip} \end{equation} for every $t\in [ a,b]_{\mathbb{T}}^{\kappa }$ and every $x,y\in \mathbb{R}$ with $P.1$. Let $X_0:\Omega \to \mathbb{R}$ a random variable such that \begin{equation} |f(t,X_0(\omega ),\omega )|\leq _PM, \quad t\in [ a,b]_{\mathbb{T} }^{\kappa }, \label{M} \end{equation} where $M>0$ is a constant. Then problem \eqref{ivp} has a unique solution. \end{theorem} \begin{proof}. To prove the theorem we apply the method of successive approximations (see \cite{til}). For this, we define a sequence of functions $X_n(\cdot ,\cdot ):[a,b]_{\mathbb{T}}^{\kappa }\times \Omega \to \mathbb{R}$, $n\in \mathbb{N}$, as follows: \begin{equation} \begin{gathered} X_0(t,\omega )=X_0(\omega ) \\ X_n(t,\omega )=X_0(\omega )+\int_a^{t}f(s,X_{n-1}(s,\omega ),\omega )\Delta s, \quad n\geq 1, \end{gathered}\label{succ} \end{equation} for every $t\in [ a,b]_{\mathbb{T}}^{\kappa }$ and every $\omega \in \Omega $. First, using \eqref{M} and the Lemma \ref{lem1}, we observe that \begin{align*} |X_1(t,\omega )-X_0(t,\omega )| &\leq \big|\int_a^{t}f(s,X_0(\omega ),\omega )\Delta s\big| \leq \int_a^{t}| f(s,X_0(\omega ),\omega )| \Delta s \\ &\leq \int_a^{t}| f(s,X_0(\omega ),\omega )| ds\leq _PM(t-a) \\ &\leq M(b-a),\quad t\in [ a,b]_{\mathbb{T}}. \end{align*} We prove by induction that for each integer $n\geq 2$ the following estimate holds \begin{equation} |X_n(t,\omega )-X_{n-1}(t,\omega )|\leq _PM\widetilde{L}(\omega )\frac{ (t-a)^n}{n!}\leq M\widetilde{L}(\omega )\frac{(b-a)^n}{n!},\text{ }t\in [ a,b]_{\mathbb{T}}, \label{ind} \end{equation} where $\widetilde{L}(\omega )=\sup_{[a,b]_{\mathbb{T}}} L(t,\omega )$. Suppose that \eqref{ind} holds for $n=k\geq 2$. Then, using \eqref{Lip}, \eqref{M} and Lemma \ref{lem1}, we obtain \begin{align*} |X_{k+1}(t,\omega )-X_{k}(t,\omega )| &\leq \int_a^{t}|f(s,X_{k}(s,\omega ),\omega )-f(s,X_{k-1}(s,\omega ),\omega )|\Delta s \\ &\leq _P\widetilde{L}(\omega )\int_a^{t}|X_{k}(s,\omega )-X_{k-1}(s,\omega )|\Delta s\\ &\leq _P\widetilde{L}(\omega )\frac{M}{k!} \int_a^{t}(s-a)^{k}\Delta s \\ &\leq \widetilde{L}(\omega )\frac{M}{k!}\int_a^{t}(s-a)^{k}ds\\ &=M \widetilde{L}(\omega )\frac{(t-a)^{k+1}}{(k+1)!}\\ &\leq M\widetilde{L}(\omega ) \frac{(b-a)^{k+1}}{(k+1)!},\quad t\in [ a,b]_{\mathbb{T}}. \end{align*} Thus, \eqref{ind} is true for $n=k+1$ and so \eqref{ind} holds for all $n\geq 2$. Further, we show that for every $n\in \mathbb{N}$ the functions $X_n(\cdot ,\omega ):[a,b]_{\mathbb{T}}\to \mathbb{R}$ are continuous with $P.1$. Let $\varepsilon >0$ and $t,s\in [ a,b]_{\mathbb{T}}$ be such that $|t-s|<\varepsilon /M$. We have \begin{align*} |X_1(t,\omega )-X_1(s,\omega )| &=| \int_a^{t}f(\tau,X_0(\omega ),\omega )\Delta \tau -\int_a^{s}f(\tau ,X_0(\omega ),\omega )\Delta \tau | \\ &=| \int_{s}^{t}f(\tau ,X_0(\omega ),\omega )\Delta \tau | \\ &\leq \int_{s}^{t}| f(\tau ,X_0(\omega),\omega )| \Delta \tau \\ &\leq \int_{s}^{t}| f(\tau ,X_0(\omega ),\omega )| d\tau \\ &\leq _PM|t-s|<\varepsilon \end{align*} and so $t\mapsto X_1(t,\omega )$ is continuous with $P.1$. Since for each $n\geq 2$ \begin{align*} &|X_n(t,\omega )-X_n(s,\omega )|\\ &=| \int_a^{t}f(\tau ,X_{n-1}(\tau ,\omega ),\omega )\Delta \tau -\int_a^{s}f(\tau ,X_{n-1}(\tau ,\omega ),\omega )\Delta \tau | \\ & \leq \int_{s}^{t}| f(\tau ,X_{n-1}(\tau ,\omega ),\omega )| \Delta \tau \\ &\leq \int_{s}^{t}| f(\tau ,X_0(\omega ),\omega )| \Delta \tau +\int_{s}^{t}| f(\tau ,X_{n-1}(\tau ,\omega ),\omega ) -f(\tau ,X_0(\omega ),\omega )| \Delta \tau \\ &\leq \int_{s}^{t}| f(\tau ,X_0(\omega ),\omega )|\Delta \tau \\ &\quad +\sum_{k=1}^{n-1}\int_{s}^{t}| f(\tau ,X_{k}(\tau ,\omega ),\omega )-f(\tau ,X_{k-1}(\tau ,\omega ),\omega )| \Delta \tau \end{align*} then, by induction, we obtain \[ |X_n(t,\omega )-X_n(s,\omega )|\leq _PM( 1+\sum_{k=1}^{n-1}\frac{\widetilde{L}(\omega )^{k-1}(b-a)^{k}}{k!} ) |t-s|\to 0 \] as $s\to t$ with $P.1$. Therefore, for every $n\in \mathbb{N}$ the function $X_n(\cdot ,\omega ):[a,b]_{\mathbb{T}}\times \Omega \to \mathbb{R}$ is continuous with $P.1$. Now, using Lemma \ref{lem3} and \eqref{succ}, we deduce that the functions $X_n(t,\cdot ):\Omega \to \mathbb{R}$ are measurable. Consequently, it follows that for every $n\in \mathbb{N}$ the function $X_n(\cdot ,\cdot ):[a,b]_{\mathbb{T}}\times \Omega \to \mathbb{R}$ is a time scale stochastic process. Further, we shall show that the sequence $(X_n(t,\cdot ))_{n\in \mathbb{N}} $ is uniformly convergent with $P.1$. Denote \[ Y_n(t,\omega )=|X_{n+1}(t,\omega )-X_n(t,\omega )|\text{, \ }n\in \mathbb{N}. \] Since \[ Y_n(t,\omega )-Y_n(s,\omega )\leq _P\widetilde{L}(\omega )\int_{s}^{t}| X_n(\tau ,\omega )-X_{n-1}(\tau ,\omega )| \Delta \tau \] then, reasoning as above, we deduce that the functions $t\mapsto Y_n(t,\omega )$ are continuous with $P.1$. Now, using \eqref{ind}, we obtain \[ \underset{t\in [ a,b]_{\mathbb{T}}}{\sup }| X_n(t,\omega )-X_{m}(t,\omega )| \leq \sum_{k=m}^{n-1}\underset{t\in [ a,b]_{\mathbb{T}}}{\sup }Y_{k}(t,\omega )\leq _PM\sum_{k=m}^{n-1}\frac{\widetilde{L}(\omega )^{k}(b-a)^{k+1}}{ (k+1)!} \] for all $n>m>0$. Since the series $\sum_{n=1}^{\infty }\widetilde{L} (\omega )^{n-1}(b-a)^n/n!$ converges with $P.1$, then for each $ \varepsilon >0$ there exists $n_0\in \mathbb{N}$ such that \begin{equation} \underset{t\in [ a,b]_{\mathbb{T}}}{\sup }| X_n(t,\omega )-X_{m}(t,\omega )| \leq _P\varepsilon \text{ \ \ for all } n,m\geq n_0. \label{a} \end{equation} Hence, since $([a,b]_{\mathbb{T}},|\cdot |)$ is a complete metric space, it follows that there exists $\Omega _0\subset \Omega $ such that $P(\Omega _0)=1$ and for every $\omega \in \Omega _0$ the sequence $(X_n(t,\cdot ))_{n\in \mathbb{N}}$ is uniformly convergent. For $\omega \in \Omega _0$ denote $\widetilde{X}(t,\omega )=\underset{n\to \infty }{\lim } X_n(t,\omega )$. Next, we define the function $X(\cdot ,\cdot ):[a,b]_{ \mathbb{T}}\times \Omega \to \mathbb{R}$ as follows: $X(\cdot ,\omega )=\widetilde{X}(\cdot ,\omega )$ if $\omega \in \Omega _0$, and $ X(\cdot ,\omega )$ as an arbitrary function if $\omega \in \Omega \setminus \Omega _0$. Obviously, $X(\cdot ,\omega )$ is continuos with $P.1$. Since, by Lemma \ref{lem2} and \eqref{succ}, the functions $\omega \to X_n(\cdot ,\omega )$ are measurable and $X(t,\omega )=\underset{n\to \infty }{ \lim }X_n(t,\omega )$ for every $t\in [ a,b]_{\mathbb{T}}$\ with $P.1 $, we deduce that $\omega \to X(t,\omega )$ is measurable for every $ t\in [ a,b]_{\mathbb{T}}$. Therefore, $X(\cdot ,\cdot ):[a,b]_{\mathbb{ T}}\times \Omega \to \mathbb{R}$ is a continuous time scale stochastic process. We show that $X(\cdot ,\cdot )$ satisfies the random integral equation \eqref{inteq}. For each $n\in \mathbb{N}$ we put $ G_n(t,\omega )=f(t,X_n(t,\omega ),\omega )$, $t\in [ a,b]_{\mathbb{ T}}$, $\omega \in \Omega $. Then $G_n(t,\omega )$ is rd-continuous time scale stochastic process, and we have that \[ \sup_{t\in [ a,b]_{\mathbb{T}}} | G_n(t,\omega )-G_{m}(t,\omega )| \leq _P\widetilde{L}(\omega ) \sup_{t\in [ a,b]_{\mathbb{T}}} | X_n(t,\omega )-X_{m}(t,\omega )| , \quad t\in [ a,b]_{\mathbb{T}} \] for all $n,m\geq n_0$. Using \eqref{a} we infer that the sequence $(G_n(\cdot ,\omega ))_{n\in \mathbb{N}}$ is uniformly convergent with $P.1$. If we take $m\to \infty $, then for each $\varepsilon >0$ there exists $n_0\in \mathbb{N}$ such that for every $n\geq n_0$ we have \[ \sup_{t\in [ a,b]_{\mathbb{T}}} | G_n(t,\omega )-f(t,X(t,\omega ),\omega )| \leq _P\widetilde{L}(\omega ) \sup_{t\in [ a,b]_{\mathbb{T}}} | X_n(t,\omega )-X(t,\omega )| \text{, \ }t\in [ a,b]_{\mathbb{T}} \] and so $\lim_{n\to \infty } | G_n(t,\omega )-f(t,X(t,\omega ),\omega )| =0$ for all $t\in [ a,b]_{\mathbb{T}}$\ with $P.1$. Also, it easy to see that \[ \sup_{t\in [ a,b]_{\mathbb{T}}} |\int_a^{t}G_n(s,\omega )\Delta s-\int_a^{t}f(s,X(s,\omega ),\omega )\Delta s| \leq _P \widetilde{L}(\omega )\int_a^{t}| X_n(s,\omega )-X(s,\omega )| \Delta s. \] Since the sequence $X(t,\omega )=\lim_{n\to \infty } X_n(t,\omega )$ uniformly with $P.1$, then it follows that \[ \lim_{n\to \infty } |\int_a^{t}G_n(s,\omega )\Delta s-\int_a^{t}f(s,X(s,\omega ),\omega )\Delta s| =0 \quad \forall t\in [ a,b]_{\mathbb{T}}\; \text{with }P.1. \] Now, we have \begin{align*} &\sup_{t\in [ a,b]_{\mathbb{T}}} | X(t,\omega )-(X_0(\omega )+\int_a^{t}f(s,X(s,\omega ),\omega )\Delta s) | \\ &\leq \sup_{t\in [ a,b]_{\mathbb{T}}} | X(t,\omega )-X_n(t,\omega )| \\ &\quad +\underset{t\in [ a,b]_{\mathbb{T}}}{\sup }| X_n(t,\omega )-(X_0(\omega )+\int_a^{t}f(s,X_{n-1}(s,\omega ),\omega )\Delta s) | \\ &\quad +\sup_{t\in [ a,b]_{\mathbb{T}}} | \int_a^{t}f(s,X_{n-1}(s,\omega ),\omega )\Delta s-\int_a^{t}f(s,X(s,\omega ),\omega )\Delta s| . \end{align*} Using the two previous convergence \[ \big| X(t,\omega )-\big(X_0(\omega )+\int_a^{t}f(s,X(s,\omega ),\omega )\Delta s\big) \big| =0 \text{ for all }t\in [ a,b]_{\mathbb{T}}\ \text{with }P.1; \] that is, $X(\cdot ,\cdot )$ satisfies the random integral equation \eqref{inteq}. Then, by Lemma \ref{lem3}, it follows that $X(\cdot ,\cdot )$ is the solution of \eqref{ivp}. Finally, we show the uniqueness of the solution. For this, we assume that $X(\cdot ,\cdot ),Y(\cdot ,\cdot ):[a,b]_{\mathbb{T}}\times \Omega \to \mathbb{R}$ are two solutions of \eqref{inteq}. Since \[ |X(t,\omega )-Y(t,\omega )|\leq _P\int_a^{t}\widetilde{L}(\omega )|X(s,\omega )-Y(s,\omega )|ds, \quad t\in [ a,b]_{\mathbb{T}}, \] from Lemma \ref{lem4}, it follows that $|X(t,\omega )-Y(t,\omega )|\leq _P0$, $t\in [ a,b]_{\mathbb{T}}$ and so, the proof is complete. \end{proof} Let $\mathbb{T}$ be an upper unbounded time scale. Then under suitable conditions we can extend the notion of the solution of (2.1) from $[a,b]_{\mathbb{T}}^{\kappa }$ to $[a,\infty )_{\mathbb{T}}:=[a,\infty )\cap \mathbb{T}$, if we define $f$ on $[a,\infty )_{\mathbb{T}}\times \mathbb{R}\times \Omega $ and show that the solution exists on each $[a,b]_{\mathbb{T}}$ where $b\in (a,\infty )_{\mathbb{T}}$, $a<\rho (b)$. \begin{theorem} \label{thm2} Assume that $f:[a,\infty )_{\mathbb{T}}\times \mathbb{R}\times \Omega \to \mathbb{R}$ satisfies the assumptions of Theorem \ref{thm1} on each interval $[a,b]_{\mathbb{T}}$ with $b\in (a,\infty )_{\mathbb{T}}$, $a<\rho (b)$. If there is a constant $M>0$ such that $| f(t,x,\omega )|\leq _PM$ for all $(t,x)\in [ a,b)_{\mathbb{T}}\times \mathbb{R}$, then the problem \eqref{ivp} has a unique solution on $[a,\infty )_{\mathbb{T}}$. \end{theorem} \begin{proof} Let $X(t,\cdot )$ be the solution of \eqref{ivp} which exists on $[a,b)_{\mathbb{T}}$ with $b\in (a,\infty )_{\mathbb{T}}$, $ a<\rho (b)$, and the value of $b$ cannot be increased. First, we observe that $b$ is a left-scattered point, then $\rho (b)\in (a,b)_{\mathbb{T}}$ and the solution $X(t,\cdot )$ exists on $[a,\rho (b)]_{\mathbb{T}}$. But then the solution $X(t,\cdot )$ exists also on $[a,b]_{\mathbb{T}}$, namely by putting \begin{align*} X(b,\omega ) &=_P X(\rho (b),\omega )+\mu (b)X^{\Delta }(\rho (b),\omega ) \\ &=_P X(\rho (b),\omega )+\mu (b)f(\rho (b),X(\rho (b),\omega ),\omega ). \end{align*} If $b$ is a left-dense point, then their neighborhoods contain infinitely many points to the left of $b$. Then, for any $t,s\in (a,b)_{\mathbb{T}}$ such that $s\sigma (b), \\ X(b,\omega )=_PX_{b}(\omega ). \end{gather*} By Theorem \ref{thm1}, one gets that $X(t,\omega )$ can be continued beyond $b$, contradicting our assumptions. Hence every solution $X(t,\omega )$ of e\ref{ivp} exists on $[a,\infty )_{\mathbb{T}}$ and the proof is complete. \end{proof} \section{Random linear systems on time scales} Let $a:\Omega \to \mathbb{R}$ be a positively regressive random variable; that is, $1+\mu (t)a(\omega )>0$ for all $t\in \mathbb{T}$ and $\omega \in \Omega $. Then, by Lemma \ref{lem2}, the function $(t,\omega )\mapsto e_{a(\omega )}(t,t_0)$ defined by \[ e_{a(\omega )}(t,t_0)=_P\Big(\int_{t_0}^{t}\frac{\log (1+\mu (\tau )a(\omega ))}{\mu (\tau )}\Delta \tau\Big) , \quad t_0,t\in \mathbb{T}, \] is a continuous time scale stochastic process. For each fixed $\omega \in \Omega $, the sample path $t\mapsto e_{a(\omega )}(t,t_0)$ is the exponential function on time scales (see \cite{boh}). It easy to check that the stochastic process $(t,\omega )\mapsto e_{a(\omega )}(t,t_0)$ is a solution of the initial value problem (for deterministic case, see \cite[Theorem 2.33]{boh}) \begin{equation} \begin{gathered} X^{\Delta }(t,\omega )=_Pa(\omega )X(t,\omega ), \quad t\in [t_0,b]_{\mathbb{T}}^{\kappa } \\ X(t_0,\omega )=_P1. \end{gathered} \label{exp} \end{equation} If $a:\Omega \to \mathbb{R}$ is bounded with $P.1$ then, by the Theorems \ref{thm1}] and \ref{thm2}, it follows that \eqref{exp} has a unique solution on $[t_0,\infty )_{\mathbb{T}}$. Further, consider the following nonhomogeneous initial value problem \begin{equation} \begin{gathered} X^{\Delta }(t,\omega )=_Pa(\omega )X(t,\omega )+h(t,\omega ), \quad t\in [ t_0,b]_{\mathbb{T}}^{\kappa } \\ X(t_0,\omega )=_PX_0(\omega ), \end{gathered} \label{ivp-l} \end{equation} where $a:\Omega \to \mathbb{R}$ is a positively regressive random variable, $X_0:\Omega \to \mathbb{R}$ is a bounded random variable, and $h(,\cdot ,):[a,b]_{\mathbb{T}}^{\kappa }\times \Omega \to \mathbb{R}$ is a rd-continuous time scale stochastic process. \begin{theorem} \label{thm3} Suppose that $a:\Omega \to \mathbb{R}$ is a positively regressive and bounded random variable, $X_0:\Omega \to \mathbb{R}$ is a bounded random variable, and $h(,\cdot ,):[t_0,\infty )_{\mathbb{T}}\times \Omega \to \mathbb{R}$ is a rd-continuous time scale stochastic process. If there is a constant $\nu >0$ such that $| h(t,\omega )| \leq _P\nu $ for all $t\in [ t_0,b)_{ \mathbb{T}}$ with $b\in (t_0,\infty )_{\mathbb{T}}$, $t_0<\rho (b)$, then the initial-value problem \eqref{ivp-l} has a unique solution on $[t_0,\infty )_{\mathbb{T}}$. \end{theorem} \begin{proof} First, we observe that we put $f(t,x,\omega ):=a(\omega )x+h(t,\omega )$, then $f$ satisfies the conditions ($H_1$) and ($H_{2}$). Moreover, \[ | f(t,x,\omega )-f(t,y,\omega )| \leq _P|a(\omega )| | x-y| \] for every $t\in [ t_0,\infty )_{\mathbb{T}}$ and every $x,y\in \mathbb{R}$. Therefore, by the Theorem \ref{thm1}, it follows that \eqref{ivp-l} has a unique solution on $[t_0,b]_{\mathbb{T}}^{\kappa }$. Further, let $X(t,\cdot )$ be the solution of \eqref{ivp-l} which exists on $[t_0,b)_{\mathbb{T}}$ with $b\in (t_0,\infty )_{\mathbb{T}}$, $t_0<\rho (b)$. Also, let $N>0$ be such that $| a(\omega )| \leq _PN$. Then we have \begin{gather*} | X(t,\omega )| \leq | X(t_0,\omega )| +\int_{t_0}^{t}| a(\omega )X(s,\omega )| \Delta s+\int_{t_0}^{t}| h(s,\omega )| \Delta s\leq _P \\ 1+\nu (t-t_0)+N\int_{t_0}^{t}| X(s,\omega )| \Delta s. \end{gather*} Then, by the \cite[Corollary 6.8]{boh}, it follows that \[ | X(t,\omega )| \leq _P(1+\frac{\nu }{N})e_{N}(t,t_0)- \frac{\nu }{N}\leq (1+\frac{\nu }{N})e_{N}(b,t_0). \] Hence $| f(t,X(t,\omega ),\omega )| \leq _PM:=\nu +(1+ \frac{\nu }{N})e_{N}(b,t_0)$. Proceeding as in the proof of the Theorem \ref{thm2} it follows that the unique solution of \eqref{ivp-l} exists on $[t_0,\infty )_{\mathbb{T}}$. \end{proof} \begin{theorem}[Variation of Constants] \label{thm4} A continuous time scale stochastic process $X(\cdot ,\cdot ):[t_0,\infty )_{\mathbb{T}}\times \Omega \to \mathbb{R}$ is a solution of the initial-value problem \eqref{ivp-l} if and only if \[ X(t,\omega )=_Pe_{a(\omega )}(t,t_0)X_0(\omega )+\int_{t_0}^{t}e_{a(\omega )}(t,\sigma (s))h(s,\omega )\Delta s, t\in [ t_0,\infty )_{\mathbb{T}}. \] \end{theorem} \begin{proof} Multiplying $X^{\Delta }(t,\omega)=_Pa(\omega )X(t,\omega )+h(t,\omega )$ by $e_{a(\omega )}(t_0,\sigma(t))$, we obtain that \[ X^{\Delta }(t,\omega )e_{a(\omega )}(t_0,\sigma (t))-a(\omega )X(t,\omega )e_{a(\omega )}(t_0,\sigma (t))=_Ph(t,\omega )e_{a(\omega )}(t_0,\sigma (t)); \] that is, \[ [ X(t,\omega )e_{a(\omega )}(t_0,t)]^{\Delta }=_Ph(t,\omega )e_{a(\omega )}(t_0,\sigma (t)). \] Integrating both sides of the last equality from $t_0$ to $t$, it follows that \[ X(t,\omega )e_{a(\omega )}(t_0,t)-X(t_0,\omega )e_{a(\omega )}(t_0,t_0)=_P\int_{t_0}^{t}e_{a(\omega )}(t_0,\sigma (s))h(s,\omega )\Delta s. \] Multiplying the last equality by $e_{a(\omega )}(t,t_0)$, we obtain \eqref{ivp-l}. \end{proof} \begin{corollary} \label{coro1} Let $X_0:\Omega \to \mathbb{R}$ be a bounded random variable. If the positively regressive random variable $a:\Omega \to \mathbb{R}$ is bounded with $P.1$, then the unique solution of the initial-value problem \begin{gather*} X^{\Delta }(t,\omega )=_Pa(\omega )X(t,\omega ), \quad t\in [t_0,\infty )_{\mathbb{T}} \\ X(t_0,\omega )=_PX_0(\omega ) \end{gather*} is given by \[ X(t,\omega )=_Pe_{a(\omega )}(t,t_0)X_0(\omega ), t\in [t_0,\infty )_{\mathbb{T}}. \] \end{corollary} \begin{remark} \label{rmk2} \rm Let $X_0:\Omega \to \mathbb{R}$ be a bounded random variable. If the positively regressive random variable $a:\Omega \to \mathbb{R}$ is bounded with $P.1$, then the unique solution of the initial-value problem \begin{gather*} X^{\Delta }(t,\omega )=_P-a(\omega )X^{\sigma }(t,\omega ), \quad t\in[ t_0,\infty )_{\mathbb{T}} \\ X(t_0,\omega )=_PX_0(\omega ) \end{gather*} is given by \[ X(t,\omega )=_Pe_{\ominus a(\omega )}(t,t_0)X_0(\omega ), t\in [ t_0,\infty )_{\mathbb{T}}, \] where $\ominus a(\omega )=-\frac{a(\omega )}{1+\mu (t)a(\omega )}$ (see \cite{boh}) and $X^{\sigma }(t,\omega )=X(\sigma (t),\omega )$. Indeed, we have (see \cite{boh}) \begin{align*} X^{\Delta }(t,\omega ) &=_P\Big(\frac{1}{e_{\ominus a(\omega )}(t,t_0) }\Big) ^{\Delta }X_0(\omega )=_P-\frac{a(\omega )}{e_{a(\omega )}(\sigma (t),t_0)}X_0(\omega ) \\ &=_P-a(\omega )e_{\ominus a(\omega )}(\sigma (t),t_0)X_0(\omega )=_P-a(\omega )X^{\sigma }(t,\omega ). \end{align*} \end{remark} \begin{theorem}[Variation of Constants] \label{thm5} Suppose that $a:\Omega \to \mathbb{R}$ is a positively regressive and bounded random variable, $X_0:\Omega \to \mathbb{R}$ is a bounded random variable, and $h(,\cdot ,):[t_0,\infty )_{\mathbb{T}}\times \Omega \to \mathbb{R}$ is a rd-continuous time scale stochastic process. If there is a constant $\nu >0$ such that $| h(t,\omega )| \leq _P\nu $ for all $ t\in [ t_0,b)_{\mathbb{T}}$ with $b\in (t_0,\infty )_{\mathbb{T}}$, $t_0<\rho (b)$, then the initial-value problem \begin{equation} \begin{gathered} X^{\Delta }(t,\omega )=_P-a(\omega )X^{\sigma }(t,\omega )+h(t,\omega ),\quad t\in [ t_0,\infty )_{\mathbb{T}} \\ X(t_0,\omega )=_PX_0(\omega ), \end{gathered} \label{aivp} \end{equation} has a unique solution on $[t_0,\infty )_{\mathbb{T}}$ given by \begin{equation} X(t,\omega )=_Pe_{\ominus a(\omega )}(t,t_0)X_0(\omega )+\int_{t_0}^{t}e_{\ominus a(\omega )}(t,s)h(s,\omega )\Delta s, t\in [ t_0,\infty )_{\mathbb{T}}. \label{aivp-s} \end{equation} \end{theorem} \begin{proof} Multiplying the both sides of the equation in \eqref{aivp} by $e_{a(\omega )}(t,t_0)$. Then we have \begin{align*} (e_{a(\omega )}(t,t_0)X(t,\omega )) ^{\Delta } &=_Pe_{a(\omega )}(t,t_0)X^{\Delta }(t,\omega )+a(\omega )e_{a(\omega )}(t,t_0)X^{\sigma }(t,\omega ) \\ &=_Pe_{a(\omega )}(t,t_0)[X^{\Delta }(t,\omega )+a(\omega )X^{\sigma }(t,\omega )] \\ &=_Pe_{a(\omega )}(t,t_0)h(t,\omega ). \end{align*} Next, we integrate both sides from $t_0$ to $t$ and we infer that \[ e_{a(\omega )}(t,t_0)X(t,\omega )-e_{a(\omega )}(t_0,t_0)X(t_0,\omega )=_P\int_{t_0}^{t}e_{a(\omega )}(s,t_0)h(s,\omega )\Delta s; \] that is, \[ e_{a(\omega )}(t,t_0)X(t,\omega )=_PX_0(\omega )+\int_{t_0}^{t}e_{a(\omega )}(s,t_0)h(s,\omega )\Delta s. \] Since $$ e_{a(\omega )}(t_0,t)=\frac{1}{e_{a(\omega )}(t,t_0)}=e_{\ominus a(\omega )}(t,t_0), e_{a(\omega )}(t_0,t)e_{a(\omega )}(t,t_0)=1 $$ (see \cite[Theorem 2.36]{boh}), then multiplying the both sides of the last equality by $e_{a(\omega )}(t_0,t)$, we obtain \eqref{aivp-s}. \end{proof} \begin{example} \label{examp3} \rm Let us consider $\Omega =(0,1)$, $\mathcal{F}$ the $\sigma $-algebra of all Borel subsets of $\Omega $, $P$ the Lebesgue measure on $\Omega $, and the following initial-value problem \begin{equation} \begin{gathered} X^{\Delta }(t,\omega )=_P\omega X(t,\omega )+e_{\omega }(t,0), \quad t\in [ 0,\infty )_{\mathbb{T}} \\ X(0,\omega )=_P1-\omega . \end{gathered} \label{ex-3} \end{equation} Then, by the Theorems \ref{thm2} and \ref{thm3}, the initial value problem \eqref{ex-3} has a unique solution on $[0,\infty )_{\mathbb{T}}$, given by \[ X(t,\omega )=_P(1-\omega )e_{\omega }(t,0)+\int_0^{t}e_{\omega }(t,\sigma (s))e_{\omega }(s,0)\Delta s; \] that is, \[ X(t,\omega )=_Pe_{\omega }(t,0)\Big[ 1-\omega +\int_0^{t}\frac{1}{1+\mu (s)\omega }\Delta s\Big] , \quad t\in [ 0,\infty )_{\mathbb{T}}. \] \end{example} Next, consider two particular cases. If $\mathbb{T}=\mathbb{R}$, then $\mu (t)=0$ for all $t\in \mathbb{N}$, and $e_{\omega }(t,0)=e^{\omega t}$. Moreover, in this case we have \[ \int_0^{t}\frac{1}{1+\mu (s)\omega }\Delta s=\int_0^{t}ds=t. \] It follows that the initial-value problem \begin{gather*} X^{\Delta }(t,\omega )=_P\omega X(t,\omega )+e^{\omega t}, \quad t\in [ 0,\infty ) \\ X(0,\omega )=_P1-\omega , \end{gather*} has the solution $X(t,\omega )=(1-\omega +t)e^{\omega t}$, $t\in [ 0,\infty )$. If $\mathbb{T}=\mathbb{N}$, then $\mu (n)=1$ for all $n\in \mathbb{N}$, and $e_{\omega }(n,0)=(1+\omega )^n$. Moreover, in this case we have \[ \int_0^{t}\frac{1}{1+\mu (s)\omega }\Delta s=\sum_{s\in [ 0,n)}\frac{ 1}{1+\omega }=\frac{n}{1+\omega }. \] It follows that the difference initial-value problem \begin{gather*} X_{n+1}(\omega )=_P(1+\omega )X_n(\omega )+(1+\omega )^n, \quad n\in \mathbb{N} \\ X_0(\omega )=_P1-\omega , \end{gather*} has the solution $X_n(\omega )=(1-\omega +\frac{n}{1+\omega })(1+\omega )^n$, $n\in \mathbb{N}$. \begin{example} \label{examp4} \rm Let us consider $\Omega =(0,1)$, $\mathcal{F}$ the $\sigma $-algebra of all Borel subsets of $\Omega $, $P$ the Lebesgue measure on $\Omega $, and the initial-value problem \begin{equation} \begin{gathered} X^{\Delta }(t,\omega )=_P-\omega X^{\sigma }(t,\omega )+e_{\ominus \omega }(t,t_0),\quad t\in [ t_0,\infty )_{\mathbb{T}} \\ X(t_0,\omega )=_P1-\omega . \end{gathered} \label{ex-4} \end{equation} The initial-value problem \eqref{ex-4} has a unique solution on $[t_0,\infty )_{\mathbb{T}}$, given by \[ X(t,\omega )=_P(1-\omega )e_{\ominus \omega }(t,t_0)+\int_0^{t}e_{\ominus \omega }(t,s)e_{\ominus \omega }(s,t_0)\Delta s; \] that is, \[ X(t,\omega )=_P(1-\omega -t_0+t) e_{\ominus \omega }(t,t_0) \text{, \ }t\in [ t_0,\infty )_{\mathbb{T}}. \] If $\mathbb{T}=h\mathbb{N}$ with $h>0$, then $\mu (t)=h$ for all $t\in h \mathbb{N}$, and $e_{\ominus \omega }(t,0)=(1+\omega h)^{-t/h}$. It follows that the $h$-difference initial-value problem \begin{gather*} X_{t+h}(\omega )=_P\frac{1}{1+\omega h}X_t(\omega )+h(1+\omega h)^{-t/h-1}, \quad t\in h\mathbb{N} \\ X_0(\omega )=_P1-\omega , \end{gather*} has the unique solution $X_t(\omega )=_P(1-\omega +t) (1+\omega h)^{-t/h}$, $t\in h\mathbb{N}$. If $\mathbb{T}=2^{\mathbb{N}}$, then $\mu (t)=t$ for all $t\in 2^{\mathbb{N}}$, and $e_{\ominus \omega }(t,0)=\prod_{s\in [ 0,t)}(1+\omega s)^{-1}$. It follows that the $2$-difference initial value problem \begin{gather*} X_t(\omega )=_P(1+\omega t)X_{2t}(\omega )-t\prod_{s\in [ 1,t)}(1+\omega s)^{-1},\quad t\in 2^{\mathbb{N}} \\ X_1(\omega )=_P1-\omega , \end{gather*} has the unique solution $X_t(\omega )=_P(1-\omega +t) \prod_{s\in [ 1,t)}(1+\omega s)^{-1}$, $t\in 2^{\mathbb{N}}$. \end{example} \begin{thebibliography}{00} \bibitem{agra} R. P. Agarwal, M. Bohner; \emph{Basic calculus on time scales and some of its applications}, Results Math. 35(1999) 3--22. \bibitem{agra2} R. P. Agarwal, M. Bohner, D. O'Regan, A. Peterson; \emph{Dynamic equations on time scales: a survey}, J. Comput. Appl. Math. 141(1--2) (2002) 1--26. \bibitem{boh} M. Bohner, A. Peterson; \emph{Dynamic Equations on Time Scales: An Introduction with Applications}, Birkh\"{a}user, Boston, 2001. \bibitem{boh2} M. Bohner, A. Peterson; \emph{Advances in Dynamic Equations on Time Scales}, Birkh\"{a}user, Boston, 2003. \bibitem{bohs} M. Bohner, S. Sanyal; \emph{The Stochastic Dynamic Exponential and Geometric Brownian Motion on Isolated Time Scales}, Communications in Mathematical Analysis 8(3)(2010) 120-135. \bibitem{ch} K. L. Chung; \emph{Elementary Probability Theory with Stochastic Processes}, Springer, 1975. \bibitem{gro} D. Grow, S. Sanyal; \emph{Brownian Motion indexed by a Time Scale}, Stochastic Analysis and Applications. Accepted, November 2010. \bibitem{guse} G. Sh. Guseinov; \emph{Integration on time scales}, J. Math. Anal. Appl. 285(2003) 107--127. \bibitem{guse1} G. Sh. Guseinov, B. Kaymakcalan; \emph{Basics of Riemann delta and nabla integration on time scales}, J. Difference Equations Appl. 8(2002) 1001--1017. \bibitem{hil-88} S. Hilger; \emph{Ein Ma\ss kettenkalk\"{u}l mit Anwendung auf Zentrumsmannigfaltigkeiten}, Ph.D. Thesis, Universit\"{a}t W \"{u}rzburg, 1988. \bibitem{hil-90} S. Hilger; \emph{Analysis on measure chains-a unified approach to continuous and discrete calculus}, Results Math. 18(1990) 18--56. \bibitem{ol} B. K. \O ksendal; \emph{Stochastic Differential Equations: An Introduction with Applications}, 4th ed., Springer, 1995. \bibitem{san} S. Sanyal; \emph{Stochastic Dynamic Equations}. PhD dissertation, Missouri University of Science and Technology, 2008. \bibitem{til} C. C. Tisdell, A. H. Zaidi; \emph{Successive approximations to solutions of dynamic equations on time scales}, Communications on Applied Nonlinear Analysis 16(1)(2009) 61--87. \bibitem{til1} C. C. Tisdell, A. H. Zaidi; \emph{Basic qualitative and quantitative results for solutions to nonlinear, dynamic equations on time scales with an application to economic modelling}, Nonlinear Analysis, 68(11)(2008) 3504--3524. \end{thebibliography} \end{document}