\documentclass[reqno]{amsart} \usepackage{hyperref} \AtBeginDocument{{\noindent\small \emph{Electronic Journal of Differential Equations}, Vol. 2011 (2011), No. 159, pp. 1--14.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu} \thanks{\copyright 2011 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2011/159\hfil Second-order differential inclusions] {Stability of second-order differential inclusions} \author[H. Gonz\'alez\hfil EJDE-2011/159\hfilneg] {Henry Gonz\'alez} \address{Henry Gonz\'alez \newline Faculty of light industry and environmental protection engineering, Obuda University, 1034 Budapest, B\'ecsi \'ut 96/B, Hungary} \email{gonzalez.henry@rkk.uni-obuda.hu} \thanks{Submitted January 31, 2011. Published November 28, 2011.} \subjclass[2000]{93D09, 34A60} \keywords{Robust stability; stability radius; differential inclusions} \begin{abstract} For an arbitrary second-order stable matrix $A$, we calculate the maximum positive value $R$ for which the differential inclusion $$ \dot{x}\in F_{R}(x):=\{(A+\Delta)x, \Delta \in \mathbb{R}^{2\times 2}, \|\Delta \| \leq R \} $$ is asymptotically stable. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \allowdisplaybreaks \section{Introduction}\label{sec.int} Let $A$ be a second-order stable matrix (all the eigenvalues of $A$ have negative real part) and $R$ be a positive real number. For each vector $x$ in the plane we consider the set of vectors \begin{equation} F_{R}(x):=\{(A+\Delta)x: \Delta \in \mathbb{R}^{2\times 2},\; \|\Delta \| \leq R \} , \label{set} \end{equation} where $\|\cdot \|$ denotes the operator norm of a matrix. The objective of this work is to study the global asymptotical stability (g.a.s.) of the parameter-dependent differential inclusion \begin{equation} \dot{x} \in F_{R}(x) . \label{inclusion} \end{equation} The main task is computing the number \begin{eqnarray} R_{i} (A)=\inf \{ R>0: \dot{x} \in F_{R}(x) \text{ is not g.a.s.}\}. \label{incstabradius} \end{eqnarray} This number is closely related to the robustness of stability of the linear system $\dot{x} =Ax$, under unstructured real time-varying and nonlinear perturbations. As in \cite{Hin1} we consider the perturbed systems of the following types: \begin{equation} \begin{gathered} \Sigma_{\Delta}:\quad \dot{x}(t)=Ax(t)+\Delta x(t)\\ \Sigma_{N} :\quad \dot{x}(t)=Ax(t)+N(x(t))\\ \Sigma_{\Delta (t)} :\quad \dot{x}(t)=Ax(t)+\Delta (t) x(t)\\ \Sigma_{N(t)} :\quad \dot{x}(t)=Ax(t)+N(x(t),t), \end{gathered} \label{perturbedsystems} \end{equation} where \begin{itemize} \item $\Delta \in \mathbb{R}^{2\times 2}$; \item $N:\mathbb{R}^2 \to \mathbb{R}^2$, $N(0)=0$, $N$ is differentiable at $0$, is locally Lipschitz and there exists $\gamma \geq 0$ such that $\| N(x)\| \leq \gamma \| x \|$ for all $x\in \mathbb{R}^2$; \item $\Delta (\cdot) \in L^{\infty}(R_+ , R^{2\times 2})$; \item $N(\cdot,\cdot):\mathbb{R}^2 \times \Re_{+}\to \mathbb{R}^2$, $N(0,t)=0$ for all $t\in \Re_{+}, N(x,t)$ is locally Lipschitz in $x$ continuous in t and there exists $\gamma \geq 0$ such that $\| N(x,t)\| \leq \gamma \| x \|$ for all $x\in \mathbb{R}^2, t\in \Re_{+}$. \end{itemize} The corresponding sets of perturbations are denoted by $\mathbb{R}^{2\times 2}, P_n (\mathbb{R}), P_t (\mathbb{R})$, $P_{nt} (\mathbb{R})$ respectively. As perturbation norms we choose \begin{itemize} \item $\| \Delta \|$ is the operator norm of the matrix; \item $\| N\|_n =\inf \{ \gamma > 0; \forall x\in \mathbb{R}^2: \| N(x)\| \leq \gamma \| x \| \}$, $N\in P_n (\mathbb{R})$; \item $\| \Delta \|_t =\operatorname{ess\,sup}_{t\in \Re_+} \| \Delta (t) \|$, $\Delta \in P_t (\mathbb{R})$; \item $\| N\|_{nt} =\inf \{ \gamma > 0; \forall t\in \Re_+ \; \forall x\in \mathbb{R}^2: \| N(x,t)\| \leq \gamma \| x \| \}$, $N\in P_{nt} (\mathbb{R})$. \end{itemize} Following \cite{Hin1} (also \cite{Hin2,Hin3}), we define the radii of stability for $A$ with respect to the considered perturbations classes: \begin{equation} \begin{gathered} R(A)=\inf \{ \| \Delta \|; \Delta \in \mathbb{R}^{2\times 2} , \Sigma_{\Delta} \text{ is not g.a.s.} \}\\ R_n (A)=\inf \{ \| N \|; N \in P_n (\mathbb{R}), \Sigma_{N} \text{ is not g.a.s.} \}\\ R_t (A)=\inf \{ \| \Delta \|_t; \Delta \in P_t (\mathbb{R}), \Sigma_{\Delta} \text{ is not g.a.s.} \}\\ R_{nt} (A)=\inf \{ \| N \|; N \in P_{nt} (\mathbb{R}), \Sigma_{N} \text{ is not g.a.s.} \} \end{gathered} \end{equation} For the defined stability radii in \cite{Hin1} it has been shown that \begin{eqnarray} R(A)\geq R_{n} (A) \geq R_{t} (A) \geq R_{nt} (A) . \label{radineq} \end{eqnarray} In \cite{HinMot} it is proved that \begin{equation} R(A)=\min \big\{ \underline{\sigma}(A),-\frac{1}{2} \operatorname{tr}(A) \big\}, \label{rtimeinvariant} \end{equation} where $\underline{\sigma}(A)$ is the smallest singular value and $\operatorname{tr}(A)$ is the trace of the matrix $A$. In section \ref{sec.FilAppl}, we show that $R_{nt} (A)\geq R_i(A)$, so that based on this fact, \eqref{rtimeinvariant} and \eqref{radineq} we can restrict the analysis of the asymptotical stability of differential inclusion \eqref{set}-\eqref{inclusion} for $R0 \}, \\ \widetilde {F}^{-} (\varphi):=\{(y_1 ,y_2) \in \widetilde {F} (\varphi): y_2 <0 \}. \end{gather*} For $\varphi$ such that $\widetilde {F}^{+} (\varphi)\ne \phi$, (respect. and $\widetilde {F}^{-} (\varphi)\ne \phi$), we put \begin{equation} K^{+}(\varphi):=\sup_{(y_1 ,y_2)\in \widetilde {F}^{+} (\varphi)} \frac{y_1}{\|y_2 \|},\quad \big(\text{respect. } K^{-}(\varphi):=\sup_{(y_1 ,y_2)\in \widetilde {F}^{-} (\varphi)} \frac{y_1}{\|y_2 \|} \Big). \label{sup} \end{equation} By Filippov's Theorem, differential inclusion \eqref{inclusionF} satisfying the conditions (i)-(iii) is asymptotically stable if and only if for all $x \ne 0$ the set $F(x)$ does not have common points with the ray $c x, 0 \leq c< +\infty$ and when the set $\widetilde {F}^{+} (\varphi)$ (respect. $\widetilde {F}^{-} (\varphi)$) for almost all $\varphi$ is not empty, the inequality \[ \int_{0}^{2 \pi} K^{+}(\varphi) d \varphi <0 \quad \Big(\text{respect. } \int_{0}^{2 \pi} K^{+}(\varphi) d \varphi <0 \Big) \] holds. \section{Application of the Filippov's theorem}\label{sec.FilAppl} From Definition \eqref{set} we have that for all $R>0$, the set $F_R (x)$ for all $x\in \mathbb{R}^2$ is non empty, bounded, closed and convex in the plane, and $F_R (x)$ is linear with respect to $x$. So differential inclusion \eqref{set}-\eqref{inclusion} satisfies properties (i)-(iii) and Filippov's Theorem can be applied. The following lemma allows us to write the set $F_R (x)$ in the form we will use it in the application of the Filippov's theorem. \begin{lemma} \label{lem1} For all $R>0$ and $x\in \mathbb{R}^2$ it holds that \[ \big\{ \Delta x,\Delta \in \mathbb{R}^{2\times 2}, \| \Delta \| \leq R \big\}= \big\{ r \| x \| \begin{pmatrix}\cos \theta \\ \sin \theta \end{pmatrix} : 0 \leq r \leq R; 0 \leq \theta < 2 \pi \big\}. \] \end{lemma} \begin{proof} Let $z=\Delta x, \Delta \in \mathbb{R}^{2\times 2},\| \Delta \| \leq R$ then $\|z\|=\|\Delta x\| \leq R\|x\|$. Thus exist $r$: $0\leq r\leq R$, and $\theta \in [0,2\pi)$ such that $z=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix}$ so that we obtained that $z\in \{ r \| x \| \begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix}: 0 \leq r \leq R; 0 \leq \theta < 2 \pi \}$. Let now $z=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix}$, $0 \leq r \leq R; 0 \leq \theta < 2 \pi$ then there exists $\widetilde \Delta \in \mathbb{R}^{2\times 2}$ such that $\widetilde \Delta x=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix}$ so $\| \widetilde \Delta x\|\leq R\|x\|$ and from the well known theorem of Hahn-Banach $\widetilde \Delta \in \mathbb{R}^{2\times 2}$ may be chosen such that $\| \widetilde \Delta \| \leq R$. So we have: $z=r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix} \in \{ \Delta x,\Delta \in \mathbb{R}^{2\times 2}, \| \Delta \| \leq R \}$. \end{proof} As a direct consequence of this lemma, the inclusion \eqref{set}-\eqref{inclusion} can be written in the form \begin{equation} \dot{x} \in \big\{ Ax+r\|x\| \begin{pmatrix} \cos\theta \\ \sin \theta \end{pmatrix} : 0 \leq r \leq R; 0 \leq \theta < 2 \pi\big \} =F_R (x). \label{inclusiontheta} \end{equation} Changing in \eqref{inclusiontheta} to polar coordinates, \begin{gather*} \frac{\dot{\rho} (t)}{\rho}=y_1(t) \\ \dot{\varphi} (t)=y_2(t), (y_1 (t),y_2 (t))\in \widetilde {F}_{R} (\varphi) \end{gather*} \begin{gather*} \widetilde {F}_{R} (\varphi) :=\big\{(y_1 (\varphi, \theta, r), y_2 (\varphi, \theta, r)), 0 \leq r \leq R; 0 \leq \theta \leq 2 \pi \big\} \\ y_1 (\varphi, \theta, r):=f_1(\varphi)+r cos(\theta -\varphi) \\ y_2 (\varphi, \theta, r):=f_2(\varphi)+r sin(\theta -\varphi), \end{gather*} where \begin{gather} f_1(\varphi):=a_{11} \cos^2 (\varphi)+(a_{12} +a_{21}) \sin(\varphi) \cos(\varphi)+a_{22} \sin^2 (\varphi), \label{f1} \\ f_2(\varphi):=a_{21} \cos^2 (\varphi)+(a_{22} -a_{11}) \sin(\varphi) \cos(\varphi)-a_{12} \sin^2 (\varphi). \label{f2} \end{gather} Using trigonometrical identities we have: \begin{gather} f_1(\varphi)=m_1 +n \sin 2(\varphi - \chi), \label{f12}\\ f_2(\varphi)=m_2 +n \cos 2(\varphi - \chi), \label{f22} \end{gather} where \begin{eqnarray} m_1=\frac{a_{11}+a_{22}}{2}, \quad m_2=\frac{a_{21}-a_{12}}{2}, \quad n=\sqrt{(\frac{a_{11}-a_{22}}{2})^2+ (\frac{a_{12}+a_{21}}{2})^2} \label{m1m2n} \end{eqnarray} and $$ \cos 2(\chi )=\frac{a_{12}+a_{21}}{2n}, \quad \sin 2(\chi )=-\frac{a_{11}-a_{22}}{2n}. $$ From expressions \eqref{f12}, \eqref{f22} it follows that: \[ \min \{ f_2 (\varphi ), \varphi \in [0, 2\pi ) \}= m_2-n, \quad \max \{ f_2 (\varphi ), \varphi \in [0, 2\pi ) \}= m_2+n \] For the corresponding sets $\widetilde {F}^{+}(\varphi)$, and $\widetilde {F}^{-}(\varphi)$ that appears in Filippov's theorem, we have \begin{gather*} \widetilde {F}^{+}_{R}(\varphi)=\{(y_1 ,y_2) \in \widetilde {F}_R (\varphi): y_2 >0 \}, \\ \widetilde {F}^{-}_{R}(\varphi)=\{(y_1 ,y_2) \in \widetilde {F}_R (\varphi): y_2 <0 \}. \end{gather*} Denote \begin{equation} \begin{gathered} R^+ (A):=-\min \{0,\min f_2 (\varphi) \}=\max \{ 0,n-m_2 \},\\ R^- (A):=\max \{0,\max f_2 (\varphi) \}=\max \{ 0,n+m_2 \}. \end{gathered} \label{rplusminus} \end{equation} \begin{lemma}\label{boundofR} Let $R0$ and this is true if and only if for all $\varphi \in [0,2 \pi)$ is $f_2 (\varphi)+r>0$ and so if and only if either $f_2(\varphi)\geq 0$ for all $\varphi \in [0,2 \pi)$ or $r>-min \{ f_2 (\varphi), \varphi \in [0,2\pi) \}$ condition equivalent with the assertion (b) of this lemma. (c) $\widetilde {F}^{-}_{R}(\varphi) \ne \phi$ for all $\varphi \in [0,2 \pi) $ if and only if for all $\varphi \in [0,2 \pi)$ there is $\theta \in [0,2 \pi)$ such that $f_2(\varphi)+r sin(\theta -\varphi)<0$ and this is true if and only if for all $\varphi \in [0,2 \pi)$ is $f_2 (\varphi)-r<0$ and so if and only if either $f_2(\varphi)\leq 0$ for all $\varphi \in [0,2 \pi)$ or $r>max \{ f_2 (\varphi), \varphi \in [0,2\pi) \}$ condition equivalent with the assertion (c) of this lemma. \end{proof} We denote \begin{equation} K(\theta,\varphi,r) :=\frac{f_1(\varphi)+r \cos(\theta -\varphi)} {f_2(\varphi)+r \sin(\theta -\varphi)},\label{K} \end{equation} then for $R\in (R^+ (A),R(A))$ the function $K^{+}(\varphi)$ that appears in Filippov's theorem can be written as \begin{equation} K^{+}_{R}(\varphi)=\sup_{(r, \theta)\in [0,R]\times [0, 2 \pi)} \{ K(\theta,\varphi,r):f_2(\varphi)+r \sin(\theta -\varphi) >0\}. \label{krplus1} \end{equation} Similarly for $R\in (R^- (A),R(A))$ the function $K^{-}(\varphi)$ can be written as \begin{equation} K^{-}_{R}(\varphi)=\sup_{(r, \theta)\in [0,R]\times [0, 2 \pi)} \{ -K(\theta,\varphi,r):f_2(\varphi)+r sin(\theta -\varphi) <0\}.\label{krminus1} \end{equation} \begin{lemma}\label{Kfunctions} (a) For $R\in (R^+ (A),R(A))$ we have \begin{equation} K^{+}_{R}(\varphi)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-R^2}+R f_2(\varphi)}{f_2(\varphi) \sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_1(\varphi)}. \label{Kplus} \end{equation} (b) For $R\in (R^- (A),R(A))$ we have: \begin{equation} K^{-}_{R}(\varphi)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_2(\varphi)}{-f_2(\varphi) \sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-R^2}-R f_1(\varphi)}. \label{Kmin} \end{equation} \end{lemma} \begin{proof} First for arbitrary $R\in (R^+ (A),R(A))$ we prove \eqref{Kplus}. Let given $\varphi \in [0,2\pi)$ and $r\in [0,R]$ and let $\theta_0 \in [0,2\pi)$ be such that $y_2 (\theta_0,\varphi,r)=0$. Then $y_1(\theta_0,\varphi,r)<0$ and so the limit of $K(\theta,\varphi,r)$ for $\theta \to \theta_0$ and $y_2(\theta,\varphi,r)>0$ is $-\infty$ and therefore for the calculation of the supremum in \eqref{krplus1} we can consider only points in the interior of the set $y_2 (\theta,\varphi,r)>0$. So the supremum is taken for a value $\theta$ for which the partial derivative of $K(\theta,\varphi,r)$ with respect to $\theta$ is zero. From this condition after simplifications we obtain \begin{equation} f_2(\varphi)\sin(\theta-\varphi)+f_1(\varphi)\cos(\theta-\varphi)+r=0, \end{equation} and solving this equation for $\sin(\theta-\varphi)$ and $\cos(\theta-\varphi)$, \begin{gather} \sin(\theta-\varphi)=\frac{-rf_2(\varphi)}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)} \mp \frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2}}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)}, \label{sintheta}\\ \cos(\theta-\varphi)=\frac{-rf_1(\varphi)}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)} \pm \frac{f_2(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2}}{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)}. \label{costheta} \end{gather} Substituting in the expression \eqref{K} of $K(\theta,\varphi,r)$ we obtain \begin{equation} K(\varphi,r)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2} \pm rf_2(\varphi)} {f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2} \mp rf_1(\varphi)}. \label{Kplusmin} \end{equation} When the following inequalities hold: ${f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2}+rf_1(\varphi)}>0$ and ${f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2}-rf_1(\varphi)}>0$, from the two possible signs in \eqref{Kplusmin} by direct comparison we have that the maximum value of $K(\varphi,r)$ is \begin{equation} K(\varphi,r)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2}+rf_2(\varphi)} {f_2(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2}-rf_1(\varphi)}, \label{K+} \end{equation} and so taken into account that, according with \eqref{krplus1}, the function \eqref{K+} is a monotone increasing function in $r$ we have the assertion \eqref{Kplus} of the lemma. When one of the numbers $$ f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2} +rf_1(\varphi),\quad f_2(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2}-rf_1(\varphi) $$ is positive and the other negative then we have for the maximum of $K(\varphi,r)$: \begin{equation} K(\varphi,r)=\frac{f_1(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2} -rf_2(\varphi) (sign f_1(\varphi))} {f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2} +r |{f_1(\varphi)}|}, \label{Kplusother} \end{equation} but in this case we have \begin{align*} &\Big( f_2(\varphi)\sqrt{f_1 ^2 (\varphi)+ f_2 ^2 (\varphi)-r^2} +rf_1(\varphi)\Big) \Big( f_2(\varphi)\sqrt{f_1 ^2 (\varphi) + f_2 ^2 (\varphi)-r^2} -rf_1(\varphi)\Big)\\ &=( f_2 ^2 (\varphi)-r^2 ) ( f_1 ^2 (\varphi)+ f_2 ^2 (\varphi))<0, \end{align*} and so $(f_2 (\varphi)-r)(f_2 (\varphi)+r)<0$ from what follows that there exists ${\widetilde r}\in (0,R)$ such that $(f_2 (\varphi)+{\widetilde r})=0$ or $(f_2 (\varphi)-{\widetilde r})=0$. We consider only the first case, because in the same form can be analyzed the second case. Then for $\theta=\varphi+\frac{\pi}{2}$ we have $(f_1 (\varphi)+{\widetilde r}\cos(\theta-\varphi),f_2(\varphi) +{\widetilde r}\sin(\theta-\varphi)) =(f_1(\varphi),f_2 (\varphi)+{\widetilde r})=(f_1(\varphi),0) \in \widetilde {F}_{R}(\varphi)$ with $Rb, \label{int3tipo} \end{equation} where $\prod(\cdot,\cdot)$ denotes the complete elliptic integral of the third kind and \begin{eqnarray} \alpha^2 =1+\frac{p}{a^2}, k^2=1-\frac{b^2}{a^2}. \end{eqnarray} After rationalization of the denominators in \eqref{Kplus}, \eqref{Kmin} we obtain \begin{gather} K_R^+ (\varphi)=\frac{f_1 (\varphi) f_2 (\varphi) +R\sqrt{f_1^2 (\varphi)+f_2^2 (\varphi)-R^2}}{f_2^2 (\varphi)-R^2}, \label{kplusr}\\ K_R^- (\varphi)=\frac{-f_1 (\varphi) f_2 (\varphi) +R\sqrt{f_1^2 (\varphi)+f_2^2 (\varphi)-R^2}}{f_2^2 (\varphi)-R^2}. \label{kminr} \end{gather} The rationalization can introduce some singularities in the integrals, but taken into account that the original integrals exist as proprius integrals for the considered values of $R$, we can calculate this integrals in the sense of the Cauchy principal value. From Theorem \ref{aplicacionFilippov} and \eqref{kplusr}, \eqref{kminr} after decomposition in partial fractions we have \begin{align*} I^+ (R)&=\frac {1}{2} \int_0^{2\pi } \Big( \frac{f_1 (\varphi)}{f_2 (\varphi)+R} +\frac{f_1 (\varphi)}{f_2 (\varphi)-R} \\ &\quad - \frac{\sqrt{f_1^2 (\varphi) + f_2^2 (\varphi)-R^2}}{f_2 (\varphi)+R} +\frac{\sqrt{f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2}}{f_2 (\varphi)-R} \Big) d \varphi , \end{align*} \begin{align*} I^- (R)&=\frac {1}{2} \int_0^{2\pi } \Big( \frac{-f_1 (\varphi)}{f_2 (\varphi)+R} +\frac{-f_1 (\varphi)}{f_2 (\varphi)-R} \\ &\quad -\frac{\sqrt{f_1^2 (\varphi) + f_2^2 (\varphi)-R^2}}{f_2 (\varphi)+R}+ \frac{\sqrt{f_1^2 (\varphi) + f_2^2 (\varphi)-R^2}}{f_2 (\varphi)-R} \Big) d \varphi . \end{align*} So if we define \begin{gather} I_1(R):=\frac {1}{2} \int_0^{2\pi } \frac{f_1 (\varphi)}{f_2 (\varphi)+R} d \varphi =\frac {1}{2} \int_0^{2\pi } \frac{m_1}{f_2 (\varphi)+R} d \varphi \label{I1} \\ I_2(R):=\frac {1}{2} \int_0^{2\pi } \frac{f_1 (\varphi)}{f_2 (\varphi)-R} d \varphi =\frac {1}{2} \int_0^{2\pi } \frac{m_1}{f_2 (\varphi)-R} d \varphi \label{I2} \\ I_3(R):=\frac {1}{2} \int_0^{2\pi } \frac{-\sqrt{f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2}}{f_2 (\varphi)+R} d \varphi \label{I3} \\ I_4(R):=\frac {1}{2} \int_0^{2\pi } \frac{\sqrt{f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2}}{f_2 (\varphi)-R} d \varphi \label{I4} \end{gather} we have \begin{gather} I^+ (R)=I_1 (R)+I_2 (R)+I_3 (R)+I_4 (R) , \label{Imas} \\ I^- (R)=-I_1 (R)-I_2 (R)+I_3 (R)+I_4 (R). \label{Imenos} \end{gather} \begin{lemma} \label{integracion} If $A\in \mathbb{R}^{2\times 2} $ is a stable matrix such that $n\ne 0$, then for the integrals $I_k (R) , k=1,2,3,4$ in the sense of Cauchy Principal Value we have \begin{gather} I_1(R)=\begin{cases} 0 &\text{if } |m_2+R|n; \end{cases} \label{I12} \\ I_2(R)=\begin{cases} 0 &\text{if } |m_2-R|n ; \end{cases} \label{I22} \end{gather} \begin{gather} I_3(R)=\begin{cases} 0 \quad\text{if }|m_2+R|n; \end{cases} \label{I3final} \\ I_4(R)=\begin{cases} 0 \quad \text{if } |m_2-R|n, \end{cases} \end{gather} where $\prod (\cdot,\cdot)$ denotes the complete elliptical integral of second kind, ${\underline \sigma}(A), {\overline \sigma}(A)$ are the smallest and largest singular values of the matrix $A$, and $m_1, m_2, n$ are the numbers given by \eqref{m1m2n}, and \begin{gather} a(R)=\sqrt{\frac{{\overline \sigma}^2 (A)-R^2}{{\underline \sigma}^2 (A) -R^2}} \\ \alpha_3 (R)=\frac{-2({\underline \sigma}^2 (A)-R^2)}{\sqrt{{\overline \sigma}^2(A)-R^2} \big( m_2+R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}\big)} \label{alpha3} \\ \beta_3 (R)=1-\frac{n m_1 i}{\sqrt{m_1^2+m_2^2} \sqrt{(m_2+R)^2-n^2}} \label{beta3} \\ \tau_3 (R)=\frac{\frac{n m_1}{\sqrt{m_1^2+m_2^2}} +i \sqrt{(m_2+R)^2-n^2} }{m_2+R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}} } \label{tau3} \\ \alpha_4 (R)=\frac{2({\underline \sigma}^2 (A)-R^2)} {\sqrt{{\overline \sigma}(A)^2-R^2} \big( m_2-R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}\big)} \\ \beta_4 (R)=1-\frac{n m_1 i}{\sqrt{m_1^2+m_2^2} \sqrt{(m_2-R)^2-n^2}} \\ \tau_4 (R)=\frac{\frac{n m_1}{\sqrt{m_1^2+m_2^2}} +i \sqrt{(m_2-R)^2-n^2}}{m_2-R-\frac{n m_2}{\sqrt{m_1^2+m_2^2}}} \end{gather} \end{lemma} \begin{proof} The integrands in $I_1(R)$ and $I_2(R)$ are very simple rational functions, which primitive functions are given in terms of logarithmic or arco tangents functions and so evaluating the integrals in the sense of the Cauchy Principal value we obtain easily the results of the lemma. Now we explain how to compute the more complicated integral $I_3(R)$ (The computation of $I_4(R)$ is completely similar). In the case $|m_2+R|n$ from expressions \eqref{f12}, \eqref{f22} we obtain \begin{gather*} f_1^2 (\varphi)+ f_2^2 (\varphi)-R^2=m_1^2+m_2^2 +n^2-R^2+2n\sqrt{m_1^2+m_2^2}\cos2x ,\\ f_2(\varphi)+R=m_2+R+n \Big[ \frac{m_2}{\sqrt{m_1^2+m_2^2}} \cos2x - \frac{m_1}{\sqrt{m_1^2+m_2^2}}\sin2x\Big], \end{gather*} where $x=\varphi-\chi-\psi$ and $\sin \psi=\frac{m_1}{\sqrt{m_1^2+m_2^2}}$, $\cos \psi=\frac{m_2}{\sqrt{m_1^2+m_2^2}}$. Using this expressions we write the integral in the form \[ I_3(R)=-\frac{1}{4} \int_{0}^{4\pi} \frac{\sqrt{m_1^2+m_2^2 +n^2-R^2+2n\sqrt{m_1^2+m_2^2}\cos2x}}{m_2+R +n \big[ \frac{m_2}{\sqrt{m_1^2+m_2^2}}\cos2x - \frac{m_1}{\sqrt{m_1^2+m_2^2}}\sin2x\big]} dx \] Now by the change of the variable of integration $\tan (x/2)=t$ and using the expressions for the smallest and the largest singular values of the matrix $A$: \begin{gather*} {\underline \sigma}(A)=m_1^2+m_2^2+n^2-2n\sqrt{m_1^2+m_2^2} , \\ {\overline \sigma}(A)=m_1^2+m_2^2+n^2+2n\sqrt{m_1^2+m_2^2} , \end{gather*} we obtain \[ I_3(R)=- \int_{-\infty}^{\infty} \frac{ \sqrt{\overline \sigma}^2 (A)-R^2 +({\underline \sigma}^2 (A)-R^2)t^2/ \sqrt{1+t^2} } {\big[ (m_2+R-\frac{nm_2}{\sqrt{m_1^2+m_2^2}})t^2 -\frac{2nm_1 t}{\sqrt{m_1^2+m_2^2}}+ m_2+R+ \frac{nm_2}{\sqrt{m_1^2+m_2^2}}\big]} \;dt. \] Factoring the denominator, \[ I_3(R)=- \frac{\sqrt{{\underline \sigma}^2(A)-R^2}} {m_2+R-\frac{nm_2}{\sqrt{m_1^2+m_2^2}}} \int_{-\infty}^{\infty} \frac{\sqrt{\frac{{\overline \sigma}^2 (A)-R^2}{{\underline \sigma}^2 (A)-R^2}+t^2} } {\sqrt{1+t^2}(t-\tau_3(R))(t-{\overline \tau_3(R)})}\;dt, \] where $\tau_3(R)$ is given by \eqref{tau3}. Using the identity \begin{eqnarray} \frac{1}{(t-\tau)(t-\overline \tau)}=2\Re \big[ \frac{1}{\tau-{\overline \tau}} \big(\frac{\tau}{t^2-\tau^2} +\frac{t}{t^2-\tau^2}\big) \big] , \end{eqnarray} and taking into account that the integral of an odd function in the real line is zero, we obtain \begin{align*} &I_3(R)\\ &=- \frac{\sqrt{{\underline \sigma}^2 (A)-R^2}} {m_2+R-\frac{nm_2}{\sqrt{m_1^2+m_2^2}}} 2\Re \int_{-\infty}^{\infty} \frac{\sqrt{\frac{{\overline \sigma}^2 (A)-R^2}{{\underline \sigma}^2 (A)-R^2}+t^2}} {\sqrt{1+t^2}} \frac{\tau_3(R)}{\tau_3(R)-{\overline \tau}_3(R)} \frac{1}{t^2-\tau_3^2(R)} \;dt . \end{align*} Now using expressions \eqref{tau3} and \eqref{beta3}, \[ \frac{\tau_3(R)}{\tau_3(R)-{\overline \tau_3(R)}}=\frac{1}{2} \big[ 1- \frac{nm_1 i}{\sqrt{m_1^2+m_2^2}\sqrt{(m_2+R)^2-n^2}}\big]= \frac{1}{2} \beta_3(R), \] \[ I_3(R)=- \frac{\sqrt{{\underline \sigma}^2 (A)-R^2}}{m_2+R -\frac{nm_2}{\sqrt{m_1^2+m_2^2}}} \Re \big\{ \beta_3(R) \int_{0}^{\infty} \frac{\sqrt{\frac{{\overline \sigma}^2 (A) -R^2}{{\underline \sigma}^2 (A)-R^2}+t^2}} {\sqrt{1+t^2}} \frac{1}{t^2-\tau_3^2(R)} \;dt \big\}. \] And finally from the formula \eqref{int3tipo} and expression \eqref{alpha3} we obtain the expression \eqref{I3final}. \end{proof} \section{Calculation of the radius of stability for arbitrary matrices} \label{general} Let us now formulate some important results related to the integrals $I^+ (R)$, $R\in (R^+ (A),R(A))$ and $I^- (R)$, $R\in (R^- (A),R(A))$, which allow characterizing the stable matrices $A\in \mathbb{R}^{2\times 2}$ such that $R_i (A)=R(A)$ and formulate the algorithm for the calculation of the number $R_i (A)$. \begin{lemma} \label{m2n0} Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix such that $n=0$ or $m_2=0$, then $R_{i}(A)=R(A)$. \end{lemma} \begin{proof} If $n=0$, then from \eqref{f12} and \eqref{f22} we have that $f_1(\varphi)=m_1$, $f_2(\varphi)=m_2$ are constant functions. So if the differential inclusion \eqref{inclusiontheta} changes to be unstable throughout a nonlinear perturbation $N_{R}^+ (A,x)$ or $N_{R}^-(A,x)$, then this perturbation will be in this case linear constant perturbation and so from inequalities \eqref{radiiresult} we have $R_{i}(A)=R(A)$. If $m_2=0$, then $R^+(A)=n$, $R^-(A)=n$, thus for $R>n$ from \eqref{I12} and \eqref{I22} follows that $I_1(R)+I_2(R)=0$ and from \eqref{I3} and \eqref{I4} that $I_3(R)<0$ and $I_4(R)<0$, so using the expressions \eqref{Imas}, \eqref{Imenos} we conclude that $I^+(R)<0$, $I^-(R)<0$ and from Theorem \ref{aplicacionFilippov} $R_{i}(A)=R(A)$. \end{proof} \begin{lemma} \label{solounaint} Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix such that $\max \{ R^- (A), R^+ (A) \} 0$ is $I^- (R)<0$ and in the case $m_2<0$ is $I^+ (R)<0$. \end{lemma} \begin{proof} Let $R\in (\max \{ R^- (A), R^+ (A) \},R(A))$ then $f_2(\varphi)+R>0$ and $f_2(\varphi)-R<0$ for all $\varphi \in [0, 2\pi)$ and from expressions \eqref{I3} and \eqref{I4} we have that $I_3(R)<0$ and $I_4(R)<0$. Now if $m_2>0$, then $m_2 +R>0, m_2-R<0, m_2 +R> |m_2-R|$ and so from the expressions \eqref{I12} and \eqref{I22} follows that $I_1(R)+I_2(R)>0$, but now from this and \eqref{Imenos} we conclude $I^- (R)<0$. The proof in the case $m_2<0$ is completely similar. \end{proof} \begin{theorem} \label{thm2} Let $A\in \mathbb{R}^{2\times 2} $ be a stable matrix. The equality $R_i (A)=R(A)$ is true if and only if from the inequality $\max \{ R^- (A), R^+ (A) \} 0$ and $I^-(R(A))\leq 0$ in the case $m_2>0$. \end{theorem} \begin{proof} From lemma \ref{m2n0} the assertion of the theorem holds in the cases $m_2=0$ or $n=0$. Thus from now on we assume $m_2\ne 0$ and $n\ne 0$. In the case $R^- (A)\geq R(A), R^+ (A)\geq R(A)$ in theorem \ref{aplicacionFilippov} the condition for the integrals automatically follows, and so $R_i(A)=R(A)$. \\ Now if $R^+ (A)n$, and $| m_2-R| 0$ and $\max \{ R^- (A), R^+ (A) \} 0$ and $R^+ (A)R^+ (A)$ sufficiently near to $R^+ (A)$ is $I^+ (R)<0$; \item[(ii)] If $m_2<0$ and $R^- (A)R^- (A)$ sufficiently near to $R^- (A)$ is $I^- (R)<0$. \end{itemize} \end{lemma} \begin{proof} We prove only the assertion i), the prove of ii) is similar. For $R>R^+(A)$ sufficiently near to $R^+(A)$ we have from \eqref{I12} that $I_1(R)<0$ and from \eqref{I3} that $I_3(R)<0$. Furthermore for $R$ sufficiently near to $R^+(A)$ is $| m_2-R| 0$ calculate $I^+ (R(A))$. If $I^+ (R(A))\leq 0$ then put $R_i(A)=R(A)$; \item[5] If $\max \{ R^- (A), R^+ (A) \} 0$ and $I^+ (R(A))>0$, search $R_0 \in (R^+ (A),R(A))$ such that $I^+(R_0)<0$, and use bisection method in the interval $(R_0,R(A))$ to determine the root $R$ of the equation $I^+ (R)=0$ and put $R_i(A)=R$; \item[7] If $\max \{ R^- (A), R^+ (A) \} 0$, search $R_0 \in (R^- (A),R(A))$ such that $I^-(R)<0$, and use bisection method in the interval $(R_0,R(A))$ to determine the root $R$ of the equation $I^- (R)=0$ and put $R_i(A)=R$; \end{itemize} \section{Examples} In this section we give applications of the main results of this work to the calculation of the stability radius $R_i (A)$. Example 1. Let \[ A=\begin{bmatrix} -220&-99\\ 181&-220 \end{bmatrix}\,. \] Then simple calculations give $m_1=-220$, $m_2=140$, $n=41$, ${\underline \sigma}(A)=219.768$. So, $R(A)=\min \{{\underline \sigma}(A), -\frac{1}{2} \operatorname{tr}(A) \}=219.768$, $R^{+}(A)=\max \{ 0, n-m_2\}=0$, $R^{-}(A)=\max \{ 0, n+m_2\}=181$, $\max \{ R^{+}(A), R^{-}(A) \} 0$, so from theorem 2 we have that $R_i (A)0 \] and we can take $R_i (A)= 214.555$. Example2. Let \[ A=\begin{bmatrix} -220&-159\\ 241&-220 \end{bmatrix}\,. \] Then $m_1=-220$, $m_2=200$, $n=41$, ${\underline \sigma}(A)=256.321$. So, $R(A)=220$, $R^{+}(A)=0, R^{-}(A)=241$. So $R^{-}(A)>R(A)$ and the assertion of the Theorem 2 implies that $R_i(A)=R(A)=220$. Example 3. Let \[ A=\begin{bmatrix} -220&-9\\ 91&-220 \end{bmatrix} \] Then from the calculations we obtain: $m_1=-220$, $m_2=50$, $n=41$, ${\underline \sigma}(A)=184.610$. So, $R(A)=\min \{ {\underline \sigma}(A), -\frac{1}{2} \operatorname{tr}(A) \}=184.610$, $R^{+}(A)=0$, $R^{-}(A)=9$, $\max \{ R^{+}(A), R^{-}(A) \}