\documentclass[reqno]{amsart} \usepackage{graphicx} \usepackage{hyperref} \AtBeginDocument{{\noindent\small \emph{Electronic Journal of Differential Equations}, Vol. 2008(2008), No. 104, pp. 1--36.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2008 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2008/104\hfil Stability analysis of networks] {A general framework for stability analysis of gene regulatory networks with delay} \author[I. Shlykova, A. Ponosov, Y. Nepomnyashchikh, A. Shindiapin \hfil EJDE-2008/104\hfilneg] {Irina Shlykova, Arcady Ponosov,\\ Yury Nepomnyashchikh, Andrei Shindiapin} \address{Irina Shlykova \newline CIGENE - Centre for Integrative Genetics, \& Department of Mathematical Sciences and Technology, Norwegian University of Life Sciences, N-1430 \AA s, Norway} \email{irinsh@umb.no} \address{Arcady Ponosov \newline CIGENE - Centre for Integrative Genetics, \& Department of Mathematical Sciences and Technology, Norwegian University of Life Sciences, N-1430 \AA s, Norway} \email{arkadi@umb.no} \address{Yury Nepomnyashchikh \newline CIGENE - Centre for Integrative Genetics \& Department of Mathematics and Informatics, Eduardo Mondlane University, C.P. 257 Maputo, Mozambique} \email{yuvn2@yandex.ru} \address{Andrei Shindiapin \hfill\break CIGENE - Centre for Integrative Genetics \& Department of Mathematics and Informatics, Eduardo Mondlane University C.P. 257 Maputo Mozambique} \email{andrei.olga@tvcabo.co.mz} \thanks{Submitted May 20, 2008. Published August 6, 2008.} \subjclass[2000]{34K60, 92D10} \keywords{Gene regulation; delay equations; stability} \begin{abstract} A method to study asymptotic properties of solutions to systems of differential equations with distributed time-delays and Boolean-type nonlinearities (step functions) is offered. Such systems arise in many applications, but this paper deals with specific examples of such systems coming from genetic regulatory networks. A challenge is to analyze stable stationary points which belong to the discontinuity set of the system (thresholds). The paper describes an algorithm of localizing stationary points in the presence of delays as well as stability analysis around such points. The basic technical tool consists in replacing step functions by special smooth functions (``the tempered nonlinearities'') and investigating the systems thus obtained. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{remark}[theorem]{Remark} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{assumption}[theorem]{Assumption} \section{Introduction} \label{1sec} We study asymptotically stable steady states (stationary points) of the delay system \begin{equation}\label{Eq.MainSystem} \begin{gathered} \dot{x_i}=F_i(Z_{1}, \dots, Z_m)-G_i(Z_{1}, \dots, Z_m)x_i \\ Z_{k}=\Sigma(y_{i(k)}, \theta_k, q_k)\\ y_i(t)= (\Re_i x_i)(t) \quad (t\ge 0), \; i=1,\dots,n; \; k=1,\dots,m). \end{gathered} \end{equation} This system describes a gene regulatory network with autoregulation \cite{Mestl-95,PlahteKjogl,Plahte-94,Plahte-98,PoSh}, where changes in one or more genes happen slower than in the others, which causes delay effects in some of the variables. Let us now specify the main assumptions put on the entries in \eqref{Eq.MainSystem}. The functions $F_i$, $G_i$, which are affine in each $Z_k$ and satisfy $$ F_i(Z_{1}, \dots, Z_m)\ge 0, \quad G_i(Z_{1}, \dots, Z_m)>0 $$ for $0\le Z_k\le 1$, $k=1,\dots,m$, stand for the production rate and the relative degradation rate of the product of gene $i$, respectively, and $x_i$ denoting the gene product concentration. The input variables $y_i$ endow System \eqref{Eq.MainSystem} with feedbacks which, in general, are described by nonlinear Volterra (``delay'') operators $\Re_i$ depending on the gene concentrations $x_i(t)$. The delay effects in the model arise from the time required to complete transcription, translation and diffusion to the place of action of a protein \cite{deJong}. If $\Re_i$ is the identity operator, then $x_i=y_i$, and we obtain a non-delay variable. Non-delay regulatory networks, where $x_i=y_i$ for all $i=1,\dots ,n$ in their general form, i.e. where both production and degradation are regulated, were introduced in \cite{Mestl-95}. \textbf{Remark.} Below we will use the notation ${^\nu}\!c_{i}$, $^{\nu}\!K_{i}$, $\alpha _{i}$, $ ^{\nu}v_{i}$, where the indexes $\nu$ and $i$ indicate the number of an item and an equation, respectively. In this paper we assume $\Re_i$ to be integral operators of the form \begin{equation}\label{Eq.DelayOperator} (\Re_i x_i)(t)=^{0}\!\!\!cx_{i}(t)+\int_{-\infty}^{t}K_i(t-s)x_i(s)ds, \quad t \ge 0, \; i=1,\dots,n, \end{equation} where \begin{gather}\label{trickFuncKi} K_i(u)=\sum_{\nu=1}^{p}{^{\nu}}\!\!c_{i} \cdot ^{\nu}\!\!\!K_{i}(u)\,,\\ \label{trickFuncK_ji} ^{\nu}\!\!K_{i}(u)=\frac{\alpha_i^{\nu}\cdot u^{{\nu}-1}}{({\nu}-1)!}e^{-\alpha_i u} \quad (i=1,\dots,n). \end{gather} The coefficients ${^\nu}\!c_{i}$ ($\nu=0,\dots,p$, $i=1,\dots,n$) are real nonnegative numbers satisfying $$ \sum_{\nu=0}^{p}{^{\nu}}\!\!c_i=1 $$ for any $i=1,\dots,n$. It is also assumed that $\alpha_i >0$ for all $i=1,\dots,n$. \begin{example}\label{exKernels} \rm Let \begin{gather}\label{Eq.WeakGenericKernel} ^1\!K(u)=\alpha e^{-\alpha u}, \quad \alpha >0 \quad \mbox{(the weak generic delay kernel)}, \\ \label{Eq.StrongGenericKernel} {^2}\!K(u)=\alpha^2\cdot ue^{-\alpha u}, \quad \alpha >0 \quad \mbox{(the strong generic delay kernel)}. \end{gather} Kernels $^{1}\!K(u)$ and $^{2}\!K(u)$ $(\alpha=0.7)$ are illustrated in Figure~1 and Figure~2, respectively. \begin{figure}[ht] \begin{center} \includegraphics[height=45mm]{fig1} %{ker1.eps} \end{center} \caption{Kernel $^{1}\!K(u)$} \end{figure} \begin{figure}[ht] \begin{center} \includegraphics[height=45mm]{fig2} %{ker2.eps} \end{center} \caption{Kernel $^{2}\!K(u)$} \end{figure} The function ${^1}\!K(u)$ is strictly decreasing, while ${^2}\!K(u)$ tends to zero for large positive $u$ and has maximum at time $T=\frac{1}{\alpha}$. If \, ${^2}\!K(u)$ is sharper in the sense that the region around $T$ is narrower, then in the limit we can think of \, ${^2}\!K(u)$ as approximation the Dirac function $\delta(T-t)$, where $$ \int _{-\infty}^{\infty} \delta(T-t)f(t)dt =f(T). $$ \end{example} The ``response functions'' $Z_{k}$ express the effect of the different transcription factors regulating the expression of the gene. Each $Z_k=Z_k(y_{i(k)})$ ($0\le Z_k\le 1$ for $y_{i(k)}\ge 0$) is \textit{a smooth function} depending on exactly one input variable $y_{i(k)}$ and on two other parameters: the threshold value $\theta_k$ and the steepness value $q_k\ge 0$. A gene may have more than one, or no thresholds. This is expressed in the dependence $i=i(k)$. If different $k$ correspond to the same $i$, then gene ${i(k)}$ has more than one threshold. If some $i$ does not correspond to any $k$, then gene ${i(k)}$ has no threshold. In the vicinity of the threshold value $\theta_k$ the response function $Z_k$ is increasing almost instantaneously from 0 to 1, i.e. gene $i(k)$ becomes activated very quickly. Thus, the response function is rather close to the step function that has the unit jump at the threshold $y_i=\theta_i$. There are many ways to model response functions. The simplest way is to use the unit step functions which are either ``on'': $Z_i=1$, or ``of'': $Z_i=0$. It corresponds to $q_k=0$ ($k=1,\dots,m$) in the above notation. In this case System \eqref{Eq.MainSystem} splits into a number of affine scalar delay systems, and it is usually an easy exercise (see Section \ref{2sec}) to find all their solutions explicitly. However, coupled together these simple systems can produce some complicated effects, especially when a trajectory approaches the switching domains, where a switching from one affine system to another occurs. Particularly sensitive is the stability analysis of the stationary points which belong to these switching domains. This may require the use of smooth approximations $Z_k(y_{i(k)})=\Sigma(y_{i(k)},\theta_k,q_k)$ (corresponding to the case $q_i>0$) of the step response functions. In this paper we will use approximations which were introduced in \cite{Plahte-94} and which are based on the so-called ``tempered nonlinearities'' or ``logoids'' (see the next section). This concept simplifies significantly the stability analysis of the steady states belonging to the discontinuity set of the system in the non-delay model \cite{Mestl-95}, \cite{Plahte-98}. As we will see, the logoid approach is also efficient in the delay case. Let us stress that a ``real-world'' gene network is always smooth. A number of genes may, however, be rather large, so that a theoretical or a computer-based analysis of such networks can be complicated. That is why a simplified approach based on step response functions (``boolean nonlinearities'') is to be preferred. There are two main challenges one faces when using boolean functions. Firstly, one has to describe effects occurring in the vicinity of thresholds (e.g. sliding modes or steady states belonging to the discontinuity set of the system). Secondly, one needs to justify the transition from the simplified to the ``real-world'' model. \section{Response functions}\label{2sec} In this section we describe the properties of general logoid functions and look at some examples. Let $Z=\Sigma(y,\theta,q)$ be any function defined for $y\in\mathbb{R}$, $\theta>0$, $0< q< q^0$ and $0\leq Z\leq1$. The following assumptions describe the response functions: \begin{assumption}\label{Assumption2.1} \rm $\Sigma(y,\theta,q)$ is continuous in $(y,q)\in {\mathbb{R}}\times (0,q^0)$ for all $\theta>0$, continuously differentiable w.r.t.(with respect to) $y\in {\mathbb{R}}$ for all $\theta>0, 0< q< q^0$, and $\frac{\partial}{\partial y}\Sigma(y,\theta,q)> 0$ on the set $\{y\in{\mathbb{R}}: 0<\Sigma(y,\theta,q)<1\}$ . \end{assumption} \begin{assumption}\label{Assumption2.2} \rm $\Sigma(y,\theta,q)$ satisfies $$ \Sigma(\theta,\theta,q)=0.5, \quad \Sigma(0,\theta,q)=0, \quad \Sigma(+\infty,\theta,q)=1 $$ for all $\theta>0$, $0< q< q^0$. \end{assumption} Clearly, Assumptions \ref{Assumption2.1}-\ref{Assumption2.2} imply that $Z=\Sigma(y,\theta,q)$ is non-decreasing in $y\in{\mathbb{R}}$ and strictly increasing in $y$ on the set $\{y\in{\mathbb{R}}: 0<\Sigma(y,\theta,q)<1\}$. The inverse function $y=\Sigma^{-1}(Z,\theta,q)$ w.r.t. $Z$, $\theta$ and $q$ being parameters, is defined for $Z\in(0,1)$, $\theta>0$, $0< q< q^0$, where it is strictly increasing in $Z$ and continuously differentiable w.r.t. $Z$. \begin{assumption}\label{Assumption2.3} \rm For all $\theta>0$, $\frac{\partial}{\partial Z}\Sigma^{-1}(Z,\theta,q)\to 0$ ($q\to 0$) uniformly on compact subsets of the interval $Z\in(0,1)$, and $\Sigma^{-1}(Z,\theta,q)\to\theta$ ($q\to 0$) pointwise for all $Z\in(0,1)$ and $\theta>0$. \end{assumption} \begin{assumption}\label{Assumption2.4} \rm For all $\theta>0$, the length of the interval $[y_1(q), y_2(q)]$, where $y_1(q):=\sup\{y\in{\mathbb{R}} : \Sigma(y,\theta,q)=0\} $ and $y_2(q):=\inf\{y\in{\mathbb{R}} : \Sigma(y,\theta,q)=1 \}$, tends to $0$ as $q\to 0$. \end{assumption} \begin{proposition}\label{propPropertiesSigmoid} If Assumptions \ref{Assumption2.1}-\ref{Assumption2.3} are satisfied, then the function $Z=\Sigma(y,\theta,q)$ has the following properties (see \cite{PonShinNep}): \begin{enumerate} \item If $q\to 0$, then $\Sigma^{-1}(Z,\theta,q)\to\theta$ uniformly on all compact subsets of the interval $Z\in (0,1)$ and every $\theta>0$; \item if $q\to 0$, then $\Sigma(y,\theta,q)$ tends to 1 $(\forall y>\theta$), to 0 $(\forall y<\theta$), and to 0.5 (if $y=\theta$) for all $\theta>0$; \item for any sequence $(y_n,\theta,q_n)$ such as $q_n\to 0$ and $\Sigma(y_n,\theta,q_n)\to Z^*$ for some $00$. There exist $Z_1$, $Z_2$ such as $00$, $Z_n\in [\delta, 0.5]$. As $y=\Sigma^{-1}(Z_n,\theta,q_n)$ for all $n$, this contradicts the uniform convergence of $\Sigma^{-1}(Z,\theta,q)\to \theta$ on the interval $[\delta, 0.5]$, as all $Z_n$ belong to it (see the Property 1). A similar argument applies if $y$ satisfies $\theta0, q>0$. \textit{The Hill function} is $$ \Sigma(y,\theta, q):=\begin{cases}0 & \text{if } y<0\\ \frac{y^{1/q}}{y^{1/q}+\theta^{1/q}} & \text{if } y\ge 0. \end{cases} $$ \end{example} However, the Hill function does not satisfy Assumption \ref{Assumption2.4}, as it never reaches the value $Z=1$. This assumption is fulfilled for the following logoid function. \begin{example}[\cite{Mestl-95,PlahteKjogl}]\label{Ex.LogoidFunc} \rm Let $$ \Sigma(y,\theta, q):=L\left(0.5+ \frac{y-\max\{\theta,\sigma(q)\}}{2\sigma(q)},\ \frac{1}{q}\right), \quad (\theta>0,\, 01\\ \frac{u^{p}}{u^{p}+(1-u)^{p}}& \text{if } 0\le u\le 1 \end{cases} $$ and $\sigma(q)\to +0$ if $q\to +0$. \end{example} The function $\Sigma$ assumes the value $\Sigma=1$ for all $y\geq \theta +\sigma (q)$ and the value $\Sigma=0$ for all $y\leq \theta -\sigma (q)$, so that $\sigma (q)$ is the distance from the threshold $\theta$ to the closest values of $y$, where the response function $\Sigma$ becomes $0$ (to the left of $\theta$) and $1$ (to its right). However, it should be noticed that by definition $\theta$ may assume arbitrary positive values, so that $\sigma(q)$ may formally be larger than $\theta$ for some $q$, eventually becoming less that $\theta$, because $\sigma(q)\to 0$ as $q\to 0$. It is straightforward to check Assumptions \ref{Assumption2.1}-\ref{Assumption2.3} as well. Let us for instance verify the second part of Assumption \ref{Assumption2.3}. To do that, we keep fixed an arbitrary $Z\in (0,1)$, put $y_q=\Sigma^{-1}(Z,\theta,q)$ and choose any $\varepsilon>0$. Then there exists $q_\varepsilon>0$ such that $\sigma(q)<\varepsilon$ for $00$; \item If $y\ne \theta$, then $\frac{\partial \Sigma}{\partial y}(y,\theta, q)=0 $ for sufficiently small $q >0$. \end{enumerate} \end{proposition} \begin{proof} According to Assumptions \ref{Assumption2.2}, \ref{Assumption2.4}, we have $\Sigma(\theta,\theta,q)=0.5$ and $|y_1(q)- y_2(q)|\to 0$ as $q\to 0$, where $y_1(q):=\sup\{y\in{\mathbb{R}} : \Sigma(y,\theta,q)=0\} $ and $y_2(q):=\inf\{y\in{\mathbb{R}} : \Sigma(y,\theta,q)=1 \}$. Then $\theta\in[y_1(q), y_2(q)]$. Let $y<\theta$ and put $\delta=\theta-y$. For sufficient small $q$ we have $y_2(q)-y_1(q)<\delta$. Therefore $y\theta. \end{cases} $$ In what follows we only use the tempered response functions (called logoids in \cite{Plahte-94}); i.e., functions satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4}. Thus, analysis based on the more traditional Hill function is not the subject of the present paper. However, some of the results below are still valid for the response functions, which satisfy Assumptions \ref{Assumption2.1}-\ref{Assumption2.3}, but not necessarily Assumption \ref{Assumption2.4}. \section{Obtaining a system of ordinary differential equations}\label{3sec} A method to study System \eqref{Eq.MainSystem} is well-known in the literature, and it is usually called "the linear chain trick" (see e.g. \cite{Mc}). However, a direct application of this "trick" in its standard form is not suitable for our purposes, because we want any $Z_i$ depend on $y_i$, only. Modifying the linear chain trick we can remove this drawback of the method. In fact, the idea of how it can be done comes from the general method of representing delay differential equations as systems of ordinary differential equations using certain integral transforms (the so-called "$W$-transforms"). Those are much more general than the linear chain trick (see \cite{MPS} for further details). Let us also mention here the paper \cite{Dom} which demonstrates how such $W$-transforms can be applied to stability analysis of integro-differential equations. Finally, in \cite{Ber} it is shown how the $W$-transforms can be used in stability analysis without reducing delay equations to ordinary differential equations. The version of the linear chain trick used below was suggested in \cite{PoSh}. Here we only provide the final formula for the case of one delay operator (\ref{Eq.DelayOperator}), which is sufficient for our purposes. The formula follows from the general results proved in \cite{PoSh}, but they can also be checked by a straightforward calculation. This section is divided into three parts. For a better understanding of the method we first (Subsection 3.1) consider a scalar equation \begin{equation}\label{Eq.Main} \begin{gathered} \dot{x}(t)=F(Z)-G(Z)x(t) \\ Z=\Sigma(y,\theta,q)\\ y(t)= (\Re x)(t), \quad (t\ge 0) \end{gathered} \end{equation} and a three-term delay operator \begin{equation}\label{Eq.DelayOperator-3term} (\Re x)(t)={^0}\!cx(t)+\int_{-\infty}^{t}K(t-s)x(s)ds, \quad t \ge 0, \end{equation} where $K(u)={^1}\!c\cdot {^1}\!K(u)+{^2}\!c\cdot {^2}\!K(u)$, ${^\nu}\!c \ge 0$ ($\nu=0,1,2$), ${^0}\!c+{^1}\!c+{^2}\!c=1$, where $t \ge 0$, and $^1\!K(u)$, ${^2}\!K(u)$ are defined by (\ref{Eq.WeakGenericKernel}) and (\ref{Eq.StrongGenericKernel}), respectively. The second part (Subsection 3.2) provides a reduction scheme for a rather general delay equation. Finally (Subsection 3.3), we use the second part to write down a system of ordinary differential equations which is equivalent to the main system \eqref{Eq.MainSystem}. %%%%First step \noindent {\bf Subsection 3.1}. In trying to replace the delay equation (\ref{Eq.Main}) with a system of ordinary differential equations, let us introduce new variables: \begin{equation}\label{Eq.Functions-w-i} {^1}\!w(t)=\int_{-\infty}^{t}{^1}\!K(t-s)x(s)ds, \hspace{0.5 truecm} {^2}\!w(t)=\int_{-\infty}^{t}{^2}\!K(t-s)x(s)ds, \end{equation} It is easy to see that ${^1}\dot{w}=-\alpha \cdot {^1}w+\alpha x$ and ${^2}\!\dot{w}=\alpha \cdot {^1}w-\alpha\cdot {^2}\!w$. This is used in the standard linear chain trick. Applying it we obtain $Z=\Sigma({^0}\!cx+{^1}\!c\cdot{^1}w+{^2}\!c\cdot{^2}\!w,\theta, q)$. By this, the response function $Z$ becomes a function of three variables, but we wanted only one. Therefore we will use the modified variables \begin{equation}\label{EqChangeOfVariables(Trick)} ^1\!v={^0}\!c\,x+{^1}\!c\cdot{^1}\!w+{^2}\!c\cdot{^2}\!w, \quad ^2\!v={^2}\!c\cdot{^1}\!w \end{equation} (We remark that $y=^1\!\!v$). Differentiating $^2\!v$ we obtain $$ \dot{^2\!v}={^2}\!c\cdot{^1}\!\dot{w}=\alpha(-{^2}\!c\cdot{^1}\!w+{^2}\!c\cdot x)=-\alpha \cdot ^2\!v +\alpha \cdot {^2}\!cx. $$ Similarly, \begin{align*} ^1 \!\dot{v} & ={^0}\!c\,\dot{x}+{^1}\!c\cdot {^1}\!\dot{w}+{^2}\!c\cdot {^2}\!\dot{w}\\ &={^0}\!c\,(F(Z)-G(Z)x)+\alpha (-{^1}\!c\cdot{^1}\!w+{^1}\!c\,x)+\alpha({^2}\!c\cdot{^1}\!w-{^2}\!c\cdot{^2}\!w)\\ & ={^0}\!c\,(F(Z)-G(Z)x)+\alpha (-{^1}\!c\cdot{^1}\!w+{^1}\!c\,x)+\alpha \cdot {^2}\!c\cdot{^1}\!w-\alpha(^1\!v-{^0}\!c\,x-{^1}\!c\cdot{^1}\!w)\\ & = {^0}\!c\,(F(Z)-G(Z)x)+\alpha x({^0}\!c+{^1}\!c)-\alpha \cdot ^1\!v+\alpha \cdot^2\!v.\\ \end{align*} Thus, we arrive at the following system of ordinary differential equations: \begin{equation}\label{Eq.MainTricked} \begin{gathered} \dot{x}=F(Z)-G(Z)x, \\ ^1\!\dot{v}={^0}\!c\,(F(Z)-G(Z)x)+\alpha x({^0}\!c+{^1}\!c)-\alpha \cdot ^1\!v+\alpha \cdot ^2\!v,\\ ^2\!\dot{v}=\alpha\cdot {^2}\!c\,x -\alpha \cdot^2\!v, \end{gathered} \end{equation} where $Z=\Sigma(y,\theta,q)$. This system is equivalent to (\ref{Eq.Main}) in the following sense. Assume that, (\ref{Eq.Main}) is also supplied with the initial condition \begin{equation}\label{Eq.InitialCondDelay} x(s)=\varphi (s), \quad s<0, \end{equation} where $\varphi : (-\infty,0]$ is a bounded, continuous function. Then, as shown above, the triple $(x(t), ^1\!\!v(t), ^2\!v(t))$, where $^1\!v,^2\!v$ are given by (\ref{EqChangeOfVariables(Trick)}) with ${^1}\!w, {^2}\!w$ defined by (\ref{Eq.Functions-w-i}), satisfies System (\ref{Eq.MainTricked}) and the initial conditions: \begin{equation}\label{Eq.InitialCondTricked} \begin{aligned} x(0)&=\varphi(0), \\ ^1\!v(0)&={^0}\!c\,\varphi(0)+\int_{-\infty}^{0}K(-s)\varphi(s)ds\\ &= {^0}\!c\,\varphi(0)+\int_{-\infty}^{0}({^1}\!c\, \alpha \,e^{\alpha s}-{^2}\!c \,\alpha^2\cdot s\,e^{\alpha s})\varphi(s)ds, \\ ^2\!v(0)&={^2}\!c\int_{-\infty}^{0}{^1}\!K(-s)\varphi(s)ds=\alpha \cdot {^2}\!c\int_{-\infty}^{0}e^{\alpha s}\varphi (s)ds . \end{aligned} \end{equation} Conversely, assume that $x(s)=\varphi(s)$ ($s<0$) for some bounded, continuous function $\varphi(s)$ and that the triple $(x(t), ^1\!v(t), ^2\!v(t))$ satisfies (\ref{Eq.MainTricked}) and (\ref{Eq.InitialCondTricked}). We want to check that $x(t)$ is a solution to (\ref{Eq.Main}). It is sufficient to show that $^1\!v(t)=(\Re x)(t)$. We consider first the more difficult case ${^2}\!c\ne 0$. Going back to \begin{equation}\label{w1w2} {^1}\!w=^2\!\!v\,({^2}\!c)^{-1},\,\,\, {^2}\!w=(^1\!v-{^0}\!c\,x-{^1}\!c\cdot{^1}\!w)({^2}\!c)^{-1} \end{equation} and using (\ref{Eq.MainTricked}) we easily obtain that ${^1}\!\dot{w}=-\alpha \cdot {^1}\!w + \alpha x, \ {^2}\!\dot{w}=-\alpha\cdot {^2}\!w + \alpha \cdot{^1}\!w$. The fundamental matrix $W(t)$ of the corresponding homogeneous system, i.e. the matrix-valued solution of the system with $\alpha x\equiv 0$, satisfying $W(0)=\begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix}$, is $$ W(t)=e^{-\alpha t}\begin{pmatrix} 1 & 0 \\ \alpha t & 1 \end{pmatrix}. $$ Hence \begin{gather*} {^1}\!w(t)=e^{-\alpha t}\cdot{^1}\!w(0)+\alpha\int_{0}^{t} e^{-\alpha (t- s)}x(s)ds, \\ {^2}\!w(t)=e^{-\alpha t}(\alpha t\cdot{^1}\!w(0)+{^2}\!w(0))+\alpha^2\int_{0}^{t} (t-s)e^{-\alpha (t- s)}x(s)ds. \end{gather*} From (\ref{Eq.InitialCondTricked}) and (\ref{w1w2}) we also deduce $$ {^1}\!w(0)=\alpha\int_{-\infty}^{0} e^{\alpha s}\cdot\varphi(s)ds, \quad {^2}\!w(0)=-\alpha^2\int_{-\infty}^{0} se^{\alpha s}\cdot\varphi(s)ds. $$ Evidently, this yields \begin{gather*} {^1}\!w(t)= \int_{-\infty}^{0}{^1}\!K(t-s)\varphi(s)ds+\int_{0}^{t}{^1}\!K(t-s)x(s)ds, \\ {^2}\!w(t)=\int_{-\infty}^{0}{^2}\!K(t-s)\varphi(s)ds + \int_{0}^{t}{^2}\!K(t-s)x(s)ds, \end{gather*} so that $^1\!v(t)={^0}\!c\,x(t)+{^1}\!c\cdot{^1}\!w(t)+{^2}\!c\cdot{^2}\!w(t) =(\Re x)(t)$. In the case $^2\!c=0$ (the weak generic delay kernel) $^1\!c >0$ since the system is supplied with delay effect. System (\ref{Eq.MainTricked}) then reads \begin{equation}\label{Eq.MainTricked-c-2=0} \begin{gathered} \dot{x}=F(Z)-G(Z)x\\ \dot{^1\!v}={^0}\!c\,(F(Z)-G(Z)x)+\alpha x-\alpha \cdot^1\!v. \end{gathered} \end{equation} The initial conditions in this case become \begin{equation}\label{Eq.InitialCondTricked-c-2=0} \begin{gathered} x(0)=\varphi(0) \\ ^1\!v(0)= {^0}\!c\,\varphi(0)+{^1}\!c \alpha\int_{-\infty}^{0} e^{\alpha s}\varphi(s)ds \end{gathered} \end{equation} Consider $^1\!w=(^1\!v-^0\!cx)(^1\!c)^{-1}$ and using (\ref{Eq.MainTricked}) we get $^1\!\dot{w}=-\alpha\cdot^1\!w+\alpha x$. Similarly to the first case we have that $^1\!v(t)={^0}\!c\,x(t)+{^1}\!c\cdot{^1}\!w(t)=(\Re x)(t)$, and we obtain the result. \begin{remark}\label{rem-c-2=0} \rm We can formally obtain (\ref{Eq.MainTricked-c-2=0}), (\ref{Eq.InitialCondTricked-c-2=0}) from (\ref{Eq.MainTricked}), (\ref{Eq.InitialCondTricked}) if we simply put ${^2}\!c=0$ in the system and in the initial conditions (\ref{Eq.InitialCondTricked}). Indeed, this will give $^2\!v(t)\equiv 0$ and hence (\ref{Eq.MainTricked-c-2=0}) and (\ref{Eq.InitialCondTricked-c-2=0}). By this, ${^2}\!c=0$ is a particular case of the general situation. \end{remark} \begin{remark}\label{rem-ODE-tricked} \rm In Section \ref{1sec} we observed that the assumption $^1\!v(t)\equiv x(t)$ for all $t\ge 0$ corresponds to the non-delay case. It is easy to see that the "tricked" system (\ref{Eq.MainTricked}) provides in this case two copies of the same non-delay equation. \end{remark} %%%%%%%Second step \noindent{\bf Subsection 3.2}. The second step consists in describing the modified linear chain trick for a quite arbitrary nonlinear delay equation. To simplify the notation, this step is performed for the scalar case, only. The following scalar nonlinear delay differential equation is considered: \begin{equation}\label{eq.basic} \dot{x}(t)=f(t, x(t), (\Re x)(t)), \quad t > 0 \end{equation} with the initial condition \begin{equation}\label{prehist} x(\tau)=\varphi (\tau), \quad \tau \le 0. \end{equation} The function $f(\cdot , \cdot, \cdot): [0,\infty)\times \mathbb{R}^2 \to \mathbb{R}$ has three properties. \begin{itemize} \item[(C1)] The function $f(\cdot,u,v)$ is measurable for any $u,v\in\mathbb{R}$. \item[(C2)] The function $f(\cdot,0,0)$ is bounded: $|f(t,0,0)|\le C$ ($t\ge 0$) for some constant $C$. \item[(C3)] The function $f$ is Lipschitz: There exists a constant $L$ such that \begin{equation}\label{f2} |f(t,^1\!u,^1\!v)-f(t,^2\!u,^2\!v)|\le L (|^1\!u-^2\!u|+|^1\!v-^2\!v|) \end{equation} for all $t\ge 0$, $^i\!u,^i\!v \in \mathbb{R}$. \end{itemize} Note that these three conditions imply that $|f(t,u,v)|\le L(|u|+|v|)+C$ for any $t \ge 0$ and $u,v\in\mathbb{R}$. The initial function $\varphi$ is bounded and measurable. The integral operator $\Re$ is assumed to be \begin{equation}\label{operator} (\Re x)(t)=\int_{-\infty}^{t}K(t-s)x(s)ds, \quad t>0, \end{equation} where \begin{gather}\label{trickFuncG} K(u)=\sum_{\nu=1}^{p}{^{\nu}}\!\!c \cdot^{\nu}\!\!K(u)\,,\\ \label{trickFuncG_j} ^{\nu}\!K(u)=\frac{\alpha^{\nu}\cdot u^{{\nu}-1}}{({\nu}-1)!}e^{-\alpha u}. \end{gather} The coefficients ${^\nu}\!c$ are real numbers, and it is also assumed that $\alpha >0$. Note that the operator (\ref{operator}) is a particular case of the operator (\ref{Eq.DelayOperator}) with $^{0}\!c=0$. If the initial function is defined on a finite interval $[-H,0]$, then one can put $x(\tau)=0$ for $\tau < -H$. The functions $^{\nu}\!K$ have the following properties: \begin{equation}\label{TrickPropertiesG} \begin{gathered} ^{\nu}\!K(\infty)=0, \\ ^{\nu}\!K(0)=0,\quad ({\nu}\ge 2.) \\ {^1}\!K(0)=\alpha \,. \end{gathered} \end{equation} It is also straightforward to show that \begin{equation}\label{TrickPropertiesG-1} \begin{gathered} \frac{d}{du}\,^{\nu}\!K(u)=\alpha \cdot ^{{\nu}-1}\!K(u) - \alpha\cdot ^{\nu}\!K(u) \quad ({\nu}\ge 2)\\ \frac{d}{du}\,^{\nu}\!K(u)=-\alpha \cdot^{\nu}\!K(u) \quad ({\nu}=1). \end{gathered} \end{equation} The classical linear chain trick (see e.g. \cite{Mc}) rewritten in the vector form would give \begin{equation}\label{trickVariables-z} ^{\nu}\!w(t)=\int_{-\infty}^{t}{^{\nu}}\!K(t-s)x(s)ds \quad ({\nu}=1, 2,\dots , p) \end{equation} yields \begin{equation}\label{TrickEq-x} (\Re x)(t) =\int_{-\infty}^{t}\sum_{{\nu}=1} ^{p}{^{\nu}}\!c \cdot^{{\nu}}\!K(t-s)x(s)ds = \sum_{{\nu}=1}^{p}{^{\nu}}\!c\cdot^{\nu}\!w(t), \end{equation} so that \begin{equation}\label{Eq.TrickPart1} \dot{x}(t)=f(t, x(t), \sum_{{\nu}=1}^{p}{{^\nu}\!c\cdot^{\nu}\!w(t)})=f(t, x(t), lw(t)), \end{equation} where \begin{equation}\label{Trick-functional} l= ({^1}\!c, \, {^2}\!c, \dots , ^p\!c), \end{equation} the coefficients ${^\nu}\!c$ being identical with the coefficients in (\ref{trickFuncG}). On the other hand, for ${\nu}\ge 2$ the functions $^{\nu}\!w$ satisfy $$ \frac{d}{dt}\,^{\nu}\!{w}(t) = \alpha \cdot^{{\nu}-1}\!w(t)-\alpha \cdot^{\nu}\!w(t), $$ while for ${\nu}=1$ one has $$ ^1\dot{w}(t) = -\alpha \cdot^1\!w(t)+\alpha x(t). $$ This gives the following system of ordinary differential equations: \begin{equation}\label{Trick-model} \dot{w}(t)=Aw(t)+\pi x(t), \quad t\ge 0, \end{equation} where \begin{equation}\label{matrixA} A= \begin{pmatrix} -\alpha & 0 & 0 & \dots & 0 \\ \alpha & -\alpha & 0 & \dots & 0 \\ 0 & \alpha & -\alpha & \dots & 0 \\ \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & \dots & \alpha & -\alpha \end{pmatrix} \quad\text{and}\quad %\label{Trick-func-p} \pi =\begin{pmatrix} \alpha \\ 0\\ \vdots\\ 0 \end{pmatrix}. \end{equation} Clearly, the system of ordinary differential equations (\ref{Eq.TrickPart1}), (\ref{Trick-model}) is equivalent to the delay differential equation (\ref{eq.basic}), (\ref{operator}). The initial condition (\ref{prehist}) can be rewritten in terms of the new functions as follows: \begin{equation}\label{Trick-func-q-scalar} ^{\nu}\!w(0)=\int_{-\infty}^{0}{^{\nu}}\!K(-\tau)\varphi(\tau)d\tau =(-1)^{{\nu}-1}\frac{\alpha ^{\nu}}{({\nu}-1)!}\int_{-\infty}^{0}\tau^{{\nu}-1}\cdot e^{\alpha \tau}\varphi (\tau)d\tau, \end{equation} ${\nu}=1, \dots , p$. As before, $x(0)=\varphi(0)$. The initial conditions (\ref{Trick-func-q-scalar}) can be represented in a vector form as well (see e.g. \cite{PoSh}): \begin{equation}\label{Trick-func-q-vector} w(0)=\int_{-\infty}^{0}e^{A(-\tau)}\pi\varphi(\tau)d\tau. \end{equation} As we already have mentioned, this classical version of the linear chain trick is not directly suitable for gene regulatory networks as the regulatory functions $Z_{i}$ depend only on one variable, while the "trick" gives a sum of the form (\ref{TrickEq-x}). That is why we use a modification of the linear chain trick, which is a particular case of the general reduction scheme introduced in \cite{MPS}. First of all, let us observe that the solution to the auxiliary system (\ref{Trick-model}) can be represented as follows: \begin{equation}\label{Eq.TrickW-funct} \begin{aligned} w(t) & = e^{At}w(0)+\int_0^t e^{A(t-s)}\pi x(s)ds\\ & =e^{At}\int_{-\infty}^{0}e^{A(-\tau)}\pi\varphi(\tau)d\tau +\int_0^t e^{A(t-s)}\pi x(s)ds \\ & =\int_{-\infty}^t e^{A(t-s)}\pi x(s)ds, \end{aligned} \end{equation} as $x(s)=\varphi(s)$ for $s\le 0$. Thus, \begin{equation}\label{Eq.DelayOperatorRepresentation} (\Re x)(t)=\int_{-\infty}^t \Big(\sum_{\nu=1}^p{^{\nu}}\!c\cdot^{\nu}\!w\Big)ds =l\int_{-\infty}^t e^{A(t-s)}\pi x(s)ds. \end{equation} This formula is a starting point for a modification of the linear chain trick which is used in this paper. Below we generalize (in a matrix form) the procedure described in Subsection 3.1. Let us put $$ ^1\!v=\sum_{\nu=1}^p {^{\nu}}\!c\cdot^{\nu}\!w, \quad ^{\nu}\!v=\sum_{j=1}^{p-\nu+1}{^{{j+\nu-1}}\!c}\cdot^{j}\!w \ \ (\nu=2,\dots,p). $$ Formally, the auxiliary system of the same form as in (\ref{Trick-model}) is exploited. However, the matrix $A$, the solution $w(t)$, the functionals $\pi$ and $l$ will be changed to $\tilde{A}=A^T$, \begin{equation}\label{3.26m} \begin{aligned} v(t) & = \int_{-\infty}^t e^{\tilde{A}(t-s)}\tilde{\pi} x(s)ds, \end{aligned} \end{equation} \begin{equation}\label{Trick-func-p-modified} \tilde{\pi} x=\alpha x \begin{pmatrix} {^1}\!c\\ ^{2}\!c\\ \vdots\\ ^p\!c \end{pmatrix} \end{equation} and %\label{Trick-functional-new} $ \tilde{l} = (1, 0, \dots , 0, 0)$, respectively. It is claimed, in other words, that System (\ref{eq.basic}) with Condition (\ref{prehist}) is equivalent to the following system of ordinary differential equations: \begin{equation}\label{Eq.trickedModified} \begin{gathered} \dot{x}(t)=f(t,x(t),^1\!\!v(t))\\ \dot{v}=\tilde{A}\cdot v+\tilde{\pi}x(t)\\ \end{gathered} \end{equation} with the initial conditions $x(0)=\varphi(0)$ and \begin{equation}\label{Trick-func-q-vector-modified} v(0)=\int_{-\infty}^{0}e^{\tilde{A}(-\tau)}\tilde{\pi}\varphi(\tau)d\tau. \end{equation} Note that, unlike the right-hand side in the classical linear chain trick (see (\ref{Eq.TrickPart1})), the right-hand side in (\ref{Eq.trickedModified}) depends only on two state variables: $x$ and $^1\!v$. This is crucial for applications which are of interest in this paper. To prove (\ref{Eq.trickedModified}), one needs to show that the representation (\ref{Eq.DelayOperatorRepresentation}) holds true if $A$, $\pi$ and $l$ are replaced by $\tilde{A}$, $\tilde{\pi}$ and $\tilde{l}$, respectively. This is done by writing down the fundamental matrix of the corresponding homogeneous system: \begin{equation}\label{Trick-FundamentalMatrix} Y(t) = e^{-\alpha t} \begin{pmatrix} 1 & \alpha t & \frac{(\alpha t)^{2}}{2!} & \dots & \frac{(\alpha t)^{p-1}}{(p-1)!} \\ 0& 1 & \alpha t & \dots & \frac{(\alpha t)^{p-2}}{(p-2)!} \\ 0& 0 & 1& \dots& \vdots \\ \vdots & \vdots & \ddots & \ddots & \alpha t \\ 0 & 0 & \dots & 0 & 1 \end{pmatrix}. \end{equation} Then a direct calculation proves the result. A similar argument gives (\ref{Trick-func-q-vector-modified}). \begin{remark}\label{remPositiveness} \rm Assume that $v(t)$ is a solution to $\dot{v}=\tilde{A}\cdot v+\tilde{\pi} x(t)$, $A$, $\tilde{\pi}$ are given by (\ref{matrixA}) and (\ref{Trick-func-p-modified}), respectively. If now ${^\nu}\!c\ge 0$ for $\nu =1,\dots p$, $^{\nu}\!v(0)\ge 0$ for $\nu =1,\dots p$, and $x(t)\ge 0$ for all $t\ge 0$, then $^{\nu}\!v(t)\ge 0$ for all $t\ge 0, \nu =1,\dots ,p$, as well. It follows easily from the representation formula for the solution $v(t)$ and the formula (\ref{Trick-FundamentalMatrix}) for the fundamental matrix. \end{remark} %%%%%%Third step \noindent{\bf Subsection 3.3}. Finally, let us now go back to the general delay system \eqref{Eq.MainSystem}. To simplify the notation we will write $Z$ for $(Z_1,\dots,Z_m)$. Below we use the formulas obtained in the second part of the section. First of all let us observe that the delay operators are now slightly different from those studied in the previous part of the section: one term is added, namely ${^0}\!c_{i}x_i$. However, this is not a big problem: we will replace $^1\!v$ by the input variable $y={^0}\!c\,x+^1\!\!v$ arriving, as we will see, at a slightly different system of ordinary differential equations. Indeed, differentiating $y$ gives $$ \dot{y}={^0}\!c\,\dot{x}+^1\!\!\dot{v}={^0}\!cf(t,x,y)-\alpha \cdot^1\!w+ \alpha\cdot ^2\!w+\alpha \cdot {^1}\!c\, x={^0}\!cf(t,x,y)-\alpha y+\alpha\cdot^2\!w+\alpha x({^0}\!c+{^1}\!c). $$ For the sake of notations simplicity we still want $y$ coincide with the first coordinate $^1\!v$ of the vector instead of $v$, so that we actually assume that $^1\!v=^0\!cx+"old" ^1\!v$, so that $y=^1\!v$. For \eqref{Eq.MainSystem} this results in the following system of ordinary differential equations: \begin{equation}\label{Eq.Trick-model-general} \begin{gathered} \dot{x_i}(t)=F_i(Z)-G_i(Z)x_i(t) \\ \dot{v}_{i}(t)=A_{i}v_{i}(t)+\Pi_{i} (x_i(t)) \quad t >0 \\ Z_k=\Sigma(y_{i(k)},\theta_k, q_k), \quad y_i=^1\!\!v_{i} \quad (i=1,\dots ,n), \end{gathered} \end{equation} where \begin{equation}\label{matrixA-i} A_{i}= \begin{pmatrix} -\alpha_i & \alpha_i & 0 & \dots & 0 \\ 0 & -\alpha_i & \alpha_i & \dots & 0 \\ 0 & 0 & -\alpha_i & \dots & 0 \\ \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & \dots & 0 & -\alpha_i \end{pmatrix}, \quad {v}_{i}=\begin{pmatrix} ^1\!v_{i}\\ ^{2}\!v_{i}\\ \vdots\\ ^p\!v_{i}\\ \end{pmatrix}, \end{equation} and \begin{equation}\label{vectorPi} \Pi_{i} (x_i):=\alpha_i x_i\pi_{i} + {^0}\!c_{i}f_i(Z,x_i) \end{equation} with \begin{equation}\label{Eq.pi-i,f-i1} \pi_{i}:=\begin{pmatrix} {^0}\!c_{i}+{^1}\!c_{i}\\ ^{2}\!c_{i}\\ \vdots\\ ^p\!c_{i}\\ \end{pmatrix}, \quad f_i(Z,x_i):=\begin{pmatrix} F_i(Z)-G_i(Z)x_i\\ 0\\ \vdots\\ 0\\ \end{pmatrix}. \end{equation} Recall that, according to the assumptions on System \eqref{Eq.MainSystem}, $F_i, G_i$ are real functions which are affine in each $Z_k$ and which satisfy $F_i\ge 0, G_i\ge \delta>0$ for $0\le Z_k\le 1$. Note that the notation in (\ref{Eq.Trick-model-general}) is chosen in such a way that the first coordinate $^1\!v_{i}$ always coincides with the $i$-th input variable $y_i$. For the sake of simplicity the notation $^1\!v_{i}$ in (\ref{Eq.Trick-model-general}) will be kept in the sequel. If we assume that $Z_k=\mbox{const}$ ($i=1,\dots n$). Then System (\ref{Eq.Trick-model-general}) becomes affine: \begin{equation}\label{Eq.MainSystem-Sect4InBoxes} \begin{gathered} \dot{x}_i(t)=\psi_i-\gamma_ix_i(t) \\ \dot{v}_{i}(t)=A_{i}v_{i}(t)+\bar\Pi_{i}( x_i(t)), \quad t >0, \quad i=1,\dots ,n, \end{gathered} \end{equation} where $y_i=^1\!\!v_{i}$, $\psi_i\ge 0, \gamma_i >0$, and \begin{equation}\label{vectorPi-insideboxes} \bar\Pi_{i} (x_i):= \alpha_i x_i \begin{pmatrix} {^0}\!c_{i}+{^1}\!c_{i}\\ ^{2}\!c_{i}\\ \vdots\\ ^p\!c_{i} \end{pmatrix} + {^0}\!c_{i}\begin{pmatrix} \psi_i-\gamma x_i\\ 0\\ \vdots\\ 0 \end{pmatrix}. \end{equation} \section{Some definitions}\label{SecFormalization} In this section we give a summary of some general notation and definitions related to geometric properties of System (\ref{Eq.Trick-model-general}) in the limit case ($q_k=0, \ k=1,\dots,m$). The notation and similar definitions in the non-delay case were introduced in the paper \cite{PlahteKjogl}. According to the idea described in the previous section System (\ref{Eq.Trick-model-general}) replaces the delay system \eqref{Eq.MainSystem}. The system of ordinary differential equations (\ref{Eq.Trick-model-general}) is more general than the system studied in \cite{PlahteKjogl} and may have different properties as well. By this reason, some definitions from \cite{PlahteKjogl} have to be revised. We start with the notation which we adopt from \cite{PlahteKjogl}. In what follows, it is assumed that \begin{itemize} \item $M:=\{1,\dots ,m\}, \ \ J:=\{1,\dots ,j\}, \ \ N:= \{1,\dots ,n\}, \ \ n\le j ,\ m\le j \\ (\mbox{i. e.} \ \ N \subset J, M\subset J);$ \item $R:=M-S \ \mbox{for a given} \ S\subset M;$ \item $A^B\ \mbox{consists of all functions}\ v: B\to A;$ \item $a_R:= (a_r)_{r\in R} \ \ (a_R\in A^R), \ \ \ a_S:= (a_s)_{s\in S} \ \ (a_S\in A^S);$ \end{itemize} The following system of ordinary differential equations, generalizing System \eqref{Eq.MainSystem} in the limit case ($q_k=0, \ k=1,\dots,m$), is used in this section \begin{equation}\label{Eq.GeneralizedOrdinarySystem} \dot{u}(t)=\Phi(Z,u(t)), \ \ \ t>0, \end{equation} where $u=(u_1,\dots u_j)$, $Z=(Z_1,\dots Z_m)$, $Z_k=\Sigma(u_{i(k)},\theta_k,0)$ for $k\in M$ (i.e. $Z_k$ is the unit step function with the threshold $\theta_k>0$), $i(k)$ is a function from $M$ to $N$. The function $\Phi_j: [0,1]^M\times \mathbb{R}^{J}\to\mathbb{R}^{J}$ is continuously differentiable in $Z\in [0,1]^M$ for all $u\in \mathbb{R}^{J}$ and affine in each vector variable $u\in \mathbb{R}^{J}$ for all $Z\in [0,1]^M$. These assumptions are e. g. fulfilled for System (\ref{Eq.Trick-model-general}) where $u_i$ coincides with $x_i$ for $i\in N$ and with one of the auxiliary variables $^{\nu}\!v_{i}$ (appropriately numbered) for $i\in J-N$. In fact, it is the only example which is of interest in this paper. However, System (\ref{Eq.GeneralizedOrdinarySystem}) is used to keep the notation under control. The assumptions imposed on System (\ref{Eq.GeneralizedOrdinarySystem}) are needed for the following reason: if one replaces the step functions $\Sigma(u_{i(k)},\theta_k,0)$ with the sigmoid functions $\Sigma(u_{i(k)},\theta_k,q_k)$ ($q_k \in (0,q^0)$), satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4} from Section \ref{2sec}, then for any $u^0\in \mathbb{R}^{J}$ there exists a unique (local) solution $u(t)$ to (\ref{Eq.GeneralizedOrdinarySystem}) satisfying $u(0)=u^0$. As $q_k >0$, the function $\Sigma(u_{i(k)},\theta_k,q_k)$ is smooth w.r.t. $u_{i(k)}$ for all $k\in M$, so that the unique solution does exist. Assume again that {all} $q_k=0$. Then the right-hand side of System (\ref{Eq.GeneralizedOrdinarySystem}) can be discontinuous, namely, if one or several $u_{i(k)}$ ($i\in N$) assume their threshold values $u_{i(k)}=\theta_k$. We associate a Boolean variable $B_k$ to each $Z_k$ by $B_k=0$ if $u_{i(k)}<\theta_k$ and $B_k=1$ if $u_{i(k)}>\theta_k$. Let $\Theta$ denote the set $\{u\in\mathbb{R}^J: \exists k\in M: u_{i(k)}=\theta_k\}$. This set contains all discontinuity points of the vector-function $$ f(\Sigma(u_{i(k)},\theta_k,0)_{k\in M}, (u_i)_{i\in J}) $$ and is equal to the space $\mathbb{R}$ minus a finite number of open, disjoint subsets of $\mathbb{R}^J$. Inside each of these subsets one has $Z_k=B_k$, where $B_k=0$ or $B_k=1$ for all $k\in M$, so that System (\ref{Eq.GeneralizedOrdinarySystem}) becomes affine: \begin{equation}\label{Eq.GeneralizedOrdinarySystemInBoxes} \dot{u}(t)=\Phi(B,u(t)):=A_Bu(t)+f_B, \quad t>0, \end{equation} where $B:=(B_k)_{k\in M}$ is a constant Boolean vector. The set of all Boolean vectors $ B=( B_1,\dots , B_m)$ (where $ B_k=0$ or $1$) will be denoted by $\{0, 1\}^M$. Thus, if the initial value of a possible solution belongs to one of these subsets, then the local existence and uniqueness result can be easily proved. The global existence problem is, however, more complicated (see e. g. \cite{PlahteKjogl}). This problem is not addressed here: global existence in the case of smooth response functions and local existence outside the discontinuity set in the case of the step functions are sufficient for our purposes. System (\ref{Eq.GeneralizedOrdinarySystem}) is studied below under the assumption $q_k=0, \ k\in M $, so that $Z_k=\Sigma(u_{i(k)},\theta_k,0)$. Assume that $u_{i(k)}$ has thresholds $\theta_k,\,\theta_l$, such as $\theta_k,\neq \theta_l$ if $k\neq l$. The next three definitions can be found in \cite{PlahteKjogl}. \begin{definition}\label{defRegDomain} \rm Given a Boolean vector $ B\in \{0, 1\}^M$, the set $\mathcal{B}( B)$, which consists of all $u\in\mathbb{R}^J$, where $(Z_k(u_{i(k)}))_{k\in M}=B$, is called \emph{a regular domain} (or a box). \end{definition} \begin{remark}\label{remBoxNonempty} \rm If some variables $u_i$ have more than 1 threshold, then some Boolean vectors can generate empty boxes. The necessary and sufficient condition for $\mathcal{B}( B)$ to be non-empty reads as follows: $i(k)=i(l) \ \& \ \theta_k > \theta_l \ \Rightarrow B_k\le B_l$. This is because $\Sigma(u_{i(k)},\theta_k,0)\le \Sigma(u_{i(l)},\theta_l,0)$. \end{remark} Any box is an open subset of the space $\mathbb{R}^J$, as $\Sigma(\theta_k,\theta_k,0)=0.5$ (according to Assumption \ref{Assumption2.2}) excludes the value $u_{i(k)}=\theta_k$. Only the variables $u_1,\dots,u_n$ can have (one or more) thresholds, the other have no threshold at all. In the system (\ref{Eq.Trick-model-general}), these variables correspond to those that are different from any $y_i$ ($i\in N$), i.e. either to $x_i$ (if $x_i$ is "delayed" and thus different from $y_i$), or to one of the auxiliary variables $^{\nu}\!v_{i}$ with $\nu\ge 2$. \begin{definition}\label{defSingDomain} \rm Given a subset $S \subset M, S \ne \emptyset$ and a Boolean vector $ B_R\in \{0, 1\}^R$, where $R=M-S$, the set $\mathcal{SD}(\theta_S, B_R)$, which consists of all $u\in\mathbb{R}^J$, where $B_r=Z_r(u_{i(r)}) \ ({r\in R})$ and $u_{i(s)}=\theta_s \ s\in S$, is called \emph{a singular domain}. \end{definition} \begin{remark}\label{remSDnonempty} \rm Again, if some variables $u_i$ have more than 1 threshold, then some subsets $S$ can generate empty singular domains. The necessary and sufficient conditions for $\mathcal{SD}(\theta_S, B_R)$ to be non-empty are as follows: \begin{itemize} \item[(1)] $i(k)=i(l), \ k,l \in R, \ \& \ \theta_k > \theta_l \ \Rightarrow B_k\le B_l$, \item[(2)] $i(k)=i(l), \ k,l \in S \ \Rightarrow k=l$ (this is because any point can only belong to one threshold for each variable $u_i$), \item[(3)] $i(k)=i(l), \ k \in R, \,l \in S\ \& \ \theta_k > \theta_l \ \Rightarrow B_k=0$, \item[(4)] $i(k)=i(l), \ k \in R, \,l \in S\ \& \ \theta_k < \theta_l \ \Rightarrow B_k=1$. \end{itemize} \end{remark} Any $\mathcal{SD}(\theta_S, B_R)$ is an open subset of the linear manifold $\{u_N: u_{i(s)}=\theta_s, s\in S\}$. The boxes are separated by singular (switching) domains. A singular domain can be described by its singular variables $u_{i(s)} (s\in S)$ which have threshold values in $\mathcal{SD}$ and by its regular variables $u_{i(r)} (r\in R)$. The variables $u_{i(r)} (r\in R)$ never obtain their threshold values in $\mathcal{SD}$. \begin{definition}\label{defWall} \rm Given a number $\mu \in M$ and a Boolean vector $ B_R\in \{0, 1\}^R$, where $R=M\backslash\{\mu \}$, the singular domain $\mathcal{SD}(\theta_{\mu}, B_R)$ is called \emph{a wall}. \end{definition} In other words, a wall is a singular domain of codimention 1. It is always open being also nonempty since $i(k)\neq i(\mu)$ for all $k\in M\backslash \{\mu\}$ (Remark \ref{remSDnonempty}). \begin{example} \rm Consider variables $u_1$ with the thresholds $\theta_1, \theta_2$ $(\theta_1< \theta_2)$ and $u_2$ with the threshold $\theta_3$. The phase space is then the union of six boxes, seven walls and two singular domains of codimension 2. Let us consider boxes. For the first box we have $u_1<\theta_1, u_1<\theta_2$ and $u_2>\theta_3$, the corresponding boolean vector is $\{0,0,1\}$. Similarly we obtain five other boxes corresponding to the following boolean vectors $\{1,0,1\}$, $\{1,1,1\}$, $\{0,0,0\}$, $\{1,0,0\}$, $\{1,1,0\}$(see Figure~3). But for example the boolean vectors $\{0,1,0\}$, $\{0,1,1\}$ generate empty boxes. To describe walls between two adjacent boxes we should replace the only boolean variable which is different for the two boxes. The wall between boxes $B(1,0,1)$ and $B(1,1,1)$ is denoted by $\mathcal{SD}(1,\theta_2,1)$. For this wall one has $u_1>\theta_1$, $u_1=\theta_2$ and $u_2>\theta_3$. In addition, we have the following walls $\mathcal{SD}(\theta_1,0,1)$, $\mathcal{SD}(0,0,\theta_3)$, $\mathcal{SD}(\theta_1,0,0)$, $\mathcal{SD}(1,0,\theta_3)$, $\mathcal{SD}(1,\theta_2,0)$ and $\mathcal{SD}(1,1,\theta_3)$. The singular domains of codimension 2 are the limit points for four boxes. They are $u_1=\theta_1, u_2=\theta_3$ and $u_1=\theta_2, u_2=\theta_3$. But the subsets $\mathcal{SD}(\theta_1,1,1)$, $\mathcal{SD}(0,\theta_2,0)$ generate empty singular domains. \end{example} \begin{figure}[ht] \begin{center} \includegraphics[height=45mm]{fig3} %{boxe.eps} \end{center} \caption{} \end{figure} System (\ref{Eq.GeneralizedOrdinarySystem}) can be regarded, at least in some situations, as a switching dynamical system. Inside any regular domain, it is an affine system of differential equations. Switching between domains can only occur if a trajectory hits a singular domain, usually a wall. But as it is demonstrated in \cite{PlahteKjogl}, sliding modes can press trajectories into singular domains of lower dimensions as well. It is also shown in \cite{PlahteKjogl} that in such cases the dynamics cannot be described by a simple indication of how the system switches between the regular domains. In the non-delay case walls can be either attractive ("black"), expelling (``white'') or ``transparent'' (see \cite{Plahte-94}). In the delay case, walls can also be of a mixed type. That is why the properties of ``blackness'', ``whiteness'' and ``transparency'' can now only be described locally, i.e. without using the focal points as in the non-delay case (see \cite{PlahteKjogl}). Consider the wall $\mathcal{SD}(\theta_{\mu}, B_R)$ which lies between the box $\mathcal{B}( B^0)$, where $Z_{\mu}=0$, and the box $\mathcal{B}( B^1)$, where $Z_{\mu}=1$. This gives two different systems (\ref{Eq.GeneralizedOrdinarySystemInBoxes}): for $B=B^0$ and $B=B^1$, respectively. Let $P$ be a point in a wall $\mathcal{SD}(\theta_{\mu}, B_R)$ and $u(t,\nu, P)$ be the solution to (\ref{Eq.GeneralizedOrdinarySystemInBoxes}) with $B=B^{\nu}$, which satisfies $u(0,\nu, P)=P$ ($\nu=0,1$). Denote by $\dot{u}_\mu(0,Z,P)$ component of number $\mu$ (which is orthogonal to the wall $\theta=\theta_\mu$) of the velocity vector $\dot{u}_{\mu}(t,Z,P)$ at $P$ for $t=0$ $(Z=0$ or $ 1)$. \begin{definition}\label{defwalltypes} \rm A point $P\in\mathcal{SD}(\theta_{\mu}, B_R)$ is called \begin{itemize} \item ``black" if $\dot{u}_{\mu}(0,1,P)<0$ and $\dot{u}_{\mu}(0,0,P)>0$; \item ``white" if $\dot{u}_{\mu}(0,1,P)>0$ and $\dot{u}_{\mu}(0,0,P)<0$; \item ``transparent" if $\dot{u}_{\mu}(0,1,P)<0$ and $\dot{u}_{\mu}(0,0,P)<0$, or if $\dot{u}_{\mu}(0,1,P)>0$ and $\dot{u}_{\mu}(0,0,P)>0$. \end{itemize} \end{definition} \begin{definition}\label{defwalltypesContinued} \rm We say that a wall $\mathcal{SD}(\theta_{\mu}, B_R)$ is {black} (white, transparent) if any point in it, except for a nowhere dense set, is {black} (white, transparent). \end{definition} Exceptional points correspond to the trajectories that are not transversal to the hyperplane $u_{\mu}=\theta_{\mu}$, i. e. where $\dot{u}_{\mu}=0$. Clearly, at any transparent point the solution to (\ref{Eq.GeneralizedOrdinarySystem}) can be extended to some neighborhood of this point. Thus, at transparent points System (\ref{Eq.GeneralizedOrdinarySystem}) can be characterized as a switching dynamical system. However, at black points the system is of a more complicated nature (see \cite{PlahteKjogl}). \section{Stationary points}\label{5sec} We are studying the system of ordinary differential equations (\ref{Eq.Trick-model-general}), which is equivalent to the delay system \eqref{Eq.MainSystem}. The definitions from the previous section are now applied to (\ref{Eq.Trick-model-general}) without further comments. A very important advantage of the logoid nonlinearities, satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4}, unlike more general sigmoid nonlinearities, satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.3}, is the localization principle. Roughly speaking we may remove all regular variables in the stability analysis, because they did not influence local properties of solutions around stationary points. This principle is of particular importance for delay systems (which are non-local). On the other hand, the localization principle helps to simplify both notation and proofs. It is easy to define stationary points for this system if $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ are all smooth ($q_k>0$). However, in this case the stability analysis and computer simulations may be cumbersome and time-consuming. To simplify the model, one uses the step functions $Z_k=\Sigma(y_{i(k)},\theta_k,0)$ and the corresponding limit system. The latter becomes, however, discontinuous if at least one $y_i$ assumes one of its threshold values. If a stationary point of the limit system does not belong to the discontinuity set, then the analysis of the dynamics of the perturbed smooth systems ($q_k>0, \ k=1,\dots,m$) is almost trivial (see below). If, however, a (well defined) stationary point of the perturbed system approaches the discontinuity set of the limit system, the corresponding dynamics may be subject to irregularities, and on the other hand, an independent and verifiable definition of a (stable and unstable) stationary point of the limit system should be given. The natural idea is to replace the step functions $Z_k=\Sigma(y_{i(k)},\theta_k,0))$ with smooth functions $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ ($q_k>0$, $k=1,\dots,m$), which leads to the following formal definition. \begin{definition}\label{defSSP} \rm A point $\hat{P}$ is called a stationary point for System (\ref{Eq.Trick-model-general}) with $Z_k=\Sigma(y_{i(k)},\theta_k,0)$ ($k\in M$) if for any set of functions $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ ($k\in M$), satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4} from Section \ref{2sec}, there exist a number $\varepsilon>0$ and points $P(q)$, $q=(q_1,\dots,q_m), \ q_k\in (0,\varepsilon)$ ($k\in M$) such that \begin{itemize} \item $P(q)$ is a stationary point for System (\ref{Eq.Trick-model-general}) with $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ ($k\in M$); \item $P(q)\to \hat{P}$ as $q\to 0$ (i.e. to the zero vector). \end{itemize} \end{definition} If the limit point $\hat{P}$ does not belong to the discontinuity set of System (\ref{Eq.Trick-model-general}), i.e. if $y_{i(k)}\ne\theta_k$ ($k\in M$), then $\hat{P}$ simply becomes a conventional stationary point for the limit system. To see it, we assume that $Z=B$ at $\hat{P}$ for some Boolean vector $B$. Then the coordinates of $\hat{P}$ satisfy \begin{equation}\label{Eq.ForRSP} \begin{gathered} F_i(B)-G_i(B)x_i = 0 \quad (i\in N)\\ A_{i}v_{i}+\Pi_{i} (x_i)=0. \\ \end{gathered} \end{equation} Here neither the delay operator $\Re$, nor the logoids $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ ($k\in M$, $q_k>0$), satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4} from Section \ref{2sec}, influence the position of the stationary point. Conversely, due to Assumption \ref{Assumption2.4} we have that $Z_k=B_k$ at $\hat{P}$ for sufficiently small $q_k>0$ and any $k\in M$. This is because $\hat{P}$ lies at a positive distance from the discontinuity set of the system. The smooth version of System (\ref{Eq.Trick-model-general}) in the vicinity of $\hat{P}$ will just be equal to the limit system, so that $P(q)=\hat{P}$ for sufficiently small $q$. Thus obtained $\hat{P}$ is called \textit{regular stationary point} (RSP) (see \cite{Glass}, \cite{Plahte-94}). It is also easy to calculate this point (and by this also $P(q)$): \begin{equation}\label{Eq.FocalPoints-Sect4} \begin{gathered} \hat{x}_i=F_i(B) G_i^{-1}(B), \\ \hat{v_i}=-(A_{i})^{-1}\Pi_{i}(\hat{x}_i) \quad (i\in N) \end{gathered} \end{equation} (the matrix $A_{i}$ is given by (\ref{matrixA-i}) therefore it's invertible). The situation is, however, different if $\hat{P}$ belongs to the discontinuity set. Such a $\hat{P}$ is called \textit{singular stationary point} (SSP)(see \cite{Glass}, \cite{Plahte-94}). In this case we can only get rid of regular variables. In quite a similar way, we can define the notion of a stable stationary point (see e.g. \cite{Mestl-95}). \begin{definition}\label{defStableSSP} \rm A stationary point $\hat{P}$ for (\ref{Eq.Trick-model-general}) with $Z_k=\Sigma(y_{i(k)},\theta_k,0)$ ($k\in M$) is called asymptotically stable if for any set of functions $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ ($k\in M$), satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4} from Section \ref{2sec}, there exist a number $\varepsilon>0$ and points $P(q)$, $q=(q_1,\dots,q_m), \ q_k\in (0,\varepsilon)$ ($k\in M$) such that \begin{itemize} \item $P(q)$ is a asymptotically stable stationary point for System (\ref{Eq.Trick-model-general}) with $Z_k=\Sigma(y_{i(k)},\theta_k,q_k)$ ($k\in M$); \item $P(q)\to \hat{P}$ as $q\to 0$ (i.e. to the zero vector). \end{itemize} \end{definition} In what follows, a crucial role will be played by the Jacoby matrix $\frac{\partial}{\partial Z_{S}}F_S(Z)-\frac{\partial}{\partial Z_{S}}G_S(Z)y_{i(S)}$. The entry in the $s$-th row and the $\sigma$-th column of this matrix amounts $\frac{\partial}{\partial Z_{\sigma}}F_{i(s)}(Z)-\frac{\partial}{\partial Z_{\sigma}}G_{i(s)}(Z)y_{i(s)}$. In other words, \begin{equation}\label{Eq.DetJacobian} \begin{aligned} J_S(Z_S,B_R,y_{i(S)})&= \frac{\partial}{\partial Z_{S}}F_S(Z_S,B_R) -\frac{\partial}{\partial Z_{S}}G_S(Z_S,B_R)y_{i(S)} \\ &= \Big[\frac{\partial}{\partial Z_{\sigma}}F_{i(s)}(Z_S,B_R)-\frac{\partial}{\partial Z_{\sigma}}G_{i(s)}(Z_S,B_R)y_{i(s)} \Big]_{s,\sigma\in S}. \end{aligned} \end{equation} Using Remark \ref{remSDnonempty} it is easy to see that if the singular domain $\mathcal{SD}(\theta_S, B_R)$ is not empty, then this Jacoby matrix is an $|S|\times|S|$-matrix. Below we will use Proposition 7.4 from the paper \cite{PoSh}: \begin{proposition}[\cite{PoSh}]\label{paper9} Given arbitrary $i\in N,\, x_i, y_i\in \mathbb{R}$, the system \begin{gather*} A_{i}v_{i}+\alpha_i x_i\pi_{i}=0 \\ ^1\!v_{i}=y_i, \end{gather*} where $A_i$ and $\pi_i$ are defined by (\ref{matrixA-i}) and (\ref{vectorPi}), respectively, has a solution $^1\!v_i, ^2\!v_i, \dots, ^p\!v_i$ if and only if $x_i=y_i$. In this case the solution is unique. \end{proposition} \begin{theorem}\label{thSSP} Assume that for some $S\subset M$ the system of algebraic equations \begin{equation}\label{Eq.ForSSP} \begin{gathered} F_{i(S)}(Z_S,B_R)-G_{i(S)}(Z_S,B_R)\theta_{i(S)} = 0, \\ F_{i(R)}(Z_S,B_R)-G_{i(R)}(Z_S,B_R)y_{i(R)}=0 \end{gathered} \end{equation} with the constraints \begin{equation}\label{Eq.ConstraintsSSP} \begin{gathered} 00$. Then, using the inverse sigmoid functions, we arrive at a system of functional equations w.r.t. $Z_s$ which is resolved by the implicit function theorem. This gives the values of $Z_s$ depending on the vector parameter $q=q_s \ (q_s\ge 0)$. Then we restore, step by step, the other variables, namely $y(q)$, $x(q)$ and finally, $^{\nu}\!v_{i}(q)$. All of them depend continuously on the parameter $q$. Letting $q_s$ go to zero gives SSP in the wall $\mathcal{SD}(\theta_S, B_R)$. To implement this idea we rewrite the stationarity conditions for the variables $y_S$ in the matrix form. It gives \begin{equation}\label{Eq.Th1-01} F_S(Z_S,B_R)-G_S(Z_S,B_R)y_S=0, \end{equation} which is an equation in $\mathbb{R}^S$. Originally, i. e. in (\ref{Eq.ForSSP}), it was assumed that $y_S=\theta_S$. If the step functions are replaced by smooth sigmoid functions, then this equality may be violated. However, we may assume without loss of generality that the regular variables satisfy $Z_R=B_R$ for sufficiently small $q$ (see Assumption \ref{Assumption2.4}). Let $Z_S=\Sigma(y_S,\theta_S,q):=(\Sigma(y_s,\theta_s,q_s))_{s\in S}$, where $q_s>0$. Due to Assumption \ref{Assumption2.1} from Section \ref{2sec} the inverse function $y_S=\Sigma^{-1}(Z_S,\theta_S,q)$ is continuously differentiable with respect to $Z_s\in (0,1)$, $s\in S$. Putting it into (\ref{Eq.Th1-01}) produces \begin{equation}\label{Eq.Th1-02} F_S(Z_S,B_R)-G_S(Z_S,B_R)\Sigma^{-1}(Z_S,\theta_S,q)=0. \end{equation} The Jacoby matrix of the left-hand side with respect to $Z_S$ is equal to \begin{equation}\label{Eq.Th1-03} \begin{aligned} &\frac{\partial}{\partial Z_{S}}F_S(Z_S,B_R) -\frac{\partial}{\partial Z_{S}}\big( G_S(Z_S,B_R) \big) \Sigma^{-1}(Z_S,\theta_S,q) \\ &-G_S(Z_S,B_R)\frac{\partial}{\partial Z_{S}}\big( \Sigma^{-1}(Z_S,\theta_S,q) \big ). \end{aligned} \end{equation} According to Assumptions \ref{Assumption2.1}-\ref{Assumption2.2} from Section \ref{2sec} and assumptions on $F, G$ listed in Introduction, this is a continuous function w.r.t. $(Z_S,q)$, if $00$. This function satisfies $0<\hat{Z}_s<1$ for all $s\in S$. Now, put \begin{equation}\label{Eq.Th1-05} \begin{gathered} x_s(q)=y_s(q)=\Sigma^{-1}(Z_S(q),\theta_s,q_s) \quad (s\in S) \\ x_{i(r)}(q)=y_{i(r)}(q)=F_{i(r)}(Z_S(q), B_R)G_{i(r)}^{-1}(Z_S(q), B_R) \quad (r\in R) \end{gathered} \end{equation} and for an arbitrary $i\in N$ consider the following system for the auxiliary variables $v_{i}$: \begin{equation}\label{Eq.Th1-06} \begin{gathered} A_{i}v_{i}+\Pi_{i}x_i(q)=0 \\ ^1\!v_{i}=y_i(q), \end{gathered} \end{equation} where $$ \Pi_{i} (x_i(q)):=\alpha_i x_i(q)\pi_{i} + {^0}\!c_{i}f_i(Z_S(q),B_R,x_i(q)) $$ and $$ f_i(Z_S(q),B_R,x_i(q))=(F_i(Z_S(q),B_R)-G_i(Z_S(q),B_R)x_i(q),0,\dots 0)^T. $$ By construction, $F_i(Z_S(q),B_R)-G_i(Z_S(q),B_R)x_i(q)=0$ for all $i\in N$, so that \begin{equation}\label{Eq.Th1-06a} \begin{gathered} A_{i}v_{i}+\alpha_i x_i(q)\pi_{i}=0 \\ ^1\!v_{i}=y_i(q). \end{gathered} \end{equation} Applying again Proposition \ref{paper9} gives the only solution $v_{i}(q)$ to (\ref{Eq.Th1-06a}). By this, all the coordinates of the stationary point $P(q)$ for $q_s>0$, $s\in S$ are calculated. Let now $q\to 0$. It is already shown that $Z_S(q)\to \hat{Z}_S$. Using again Proposition \ref{propPropertiesSigmoid} part(1) gives $$ \hat{y}_S:=\lim_{q\to 0}y_s(q)=\lim_{q\to 0}\Sigma^{-1}(Z_S(q),\theta_S,q)=\theta_S. $$ This and (\ref{Eq.Th1-05}) justify also the equalities \begin{gather*} \hat{x}_S:=\lim_{q\to 0}x_S(q)=\lim_{q\to 0}y_S(q)=\theta_S ,\\ \hat{y}_{i(R)}:=\lim_{q\to 0}y_{i(R)}(q)=\lim_{q\to 0}x_{i(R)}(q):=\hat{x}_{i(R)}. \end{gather*} Finally, $v_{i}(q)\to \hat{v}_{i}$ which solves Equation (\ref{Eq.Th1-06aux}), where $\hat{x}_i=\hat{y}_i$ for all $i\in N$. By this, it is shown that the point $\hat{P}$, constructed at the very beginning of the proof, is the limit point for $P(q)$, $q\to 0$, the latter being stationary points for System (\ref{Eq.Trick-model-general}) with $Z_s=\Sigma(y_s,\theta_s,q_s)$ ($q_s>0$, $s\in S$). The proof is complete. \end{proof} Let $\Gamma$ be a parameter space for System (\ref{Eq.ForSSP})-(\ref{Eq.ConstraintsSSP}); i.e., $\Gamma$ is the set of all polynomial coefficients $F_i$, $G_i$ and thresholds $\theta_i$ such that (1) $F_i>0$, $G_i>0$ for $00$ $(k\in M)$, (2) $\theta_i>0$. The functions $F_i$, $G_i$ are continuous in $Z_k$ and $\theta_k$. Therefore, if the number of parameters equals $p$, then $\Gamma$ is an open subset of the space $R^p$. Consider the subset $\Gamma_{S} \subset \Gamma$ ($S$ is a fixed subset of $M$, $B_R$ is a fixed Boolean vector, corresponding to the singular domain $\mathcal{SD}(\theta_S, B_R)$), such that there exists at least one solution to System (\ref{Eq.ForSSP})-(\ref{Eq.ConstraintsSSP}). By $\Gamma_{S}^0 \subset \Gamma_S$ we denote the set where $\det J_S(\hat{Z}_S, B_R,\theta_S)=0$. \begin{theorem} \label{thm5.5} If $\mathcal{SD}(\theta_S, B_R)\neq \emptyset$, then $\Gamma_{S}-\Gamma_{S}^0$ is an open and dense subset of $\Gamma_{S}$. \end{theorem} \begin{proof} First let us prove that $\Gamma_{S}-\Gamma_{S}^0$ is open in $\Gamma_{S}$. Suppose we have (\ref{Eq.JacobianNe0}) for some $\gamma_0 \in \Gamma_{S}$. Take $\gamma \in \Gamma_{S}$ to be sufficiently close to $\gamma_0 $. Using the implicit function theorem for the first equation of System (\ref{Eq.ForSSP}), we observe that for any $\gamma$, which is sufficiently close to $\gamma_0$, the equation is solvable in the vicinity of $\hat{Z}_S$. Moreover, the condition (\ref{Eq.JacobianNe0}) and the first condition in (\ref{Eq.ConstraintsSSP}) are fulfilled. Let us denote this solution by $\hat{Z}_S(\gamma)$. Using the second equation in (\ref{Eq.ForSSP}) we obtain $\hat{y}_{i(R)}(\gamma)$ from the formula $$ \hat{y}_{i(r)}(\gamma)=\frac{F_{i(r)}(\hat{Z}_S(\gamma), B_R)}{G_{i(r)} (\hat{Z}_S(\gamma), B_R)}. $$ The function $\hat{y}_{i(r)}(\gamma)$ is continuous in $\gamma$ (we can use a smaller neighborhood of $\gamma_0$ if needed), therefore the second condition in (\ref{Eq.ConstraintsSSP}) is fulfilled also. Thus, $\Gamma_{S}-\Gamma_{S}^0$ is open in $\Gamma_{S}$. Now we will show that $\Gamma_{S}-\Gamma_{S}^0$ is dense in $\Gamma_{S}$. Let $\gamma_1\in \Gamma_S$, $\hat{Z}_S$ be the corresponding solution of the first equation of System (\ref{Eq.ForSSP}). Suppose that the condition (\ref{Eq.JacobianNe0}) is not fulfilled for $\hat{Z}_S$ and that there exists a vicinity $\mathfrak{O}$ of $\gamma_1$ such that $\det J_S=0$ for any $\gamma \in \mathfrak{O}$ and for any solution of System (\ref{Eq.ForSSP})-(\ref{Eq.ConstraintsSSP}). Put $\xi_s=Z_s-\hat{Z}_s$ $ (s\in S)$. It follows from Remark 4.4 that $\mathcal{SD}(\theta_S, B_R)\neq 0$, so that $i$ is an bijective function on $S$. It can be assumed that $i(s)=s$ for all $s\in S$ and $S=\{1,\dots,|S|\}$. Then the system $$f_s(\xi_s)=F_s(\xi_s, B_R)-G_s(\xi_s, B_R)\theta_{i(s)}=0$$ has a zero solution $\hat{\xi_s}$ $(s\in S)$. The first member of equation is an affine polynomial in $\hat{\xi_s}$ $(s\in S)$, i.e. $$ f_s(\xi_1,\dots,\xi_{|S|})=a_{1s}\xi_1+a_{2s}\xi_2+\dots+a_{ss}\xi_s + \sum_{p\geq 2} {A_{s_1\dots s_p}\xi_{s_1}\dots\xi_{s_p}}. $$ Obviously, $\det J_S(0,B_R,\theta_S)=\det(a_{ij})_{i,j\in S}=0$. Consider the perturbed coefficients $a_{ij}+\varepsilon_{ij}$ $(\varepsilon_{ij}\neq0)$. In this case, $\hat{\xi_s}=0$ is still a solution with the Jacoby matrix $J_{S,\varepsilon}(0,B_R,\theta_S)=(a_{ij}+\varepsilon_{ij})_{i,j\in S}$. We assumed before that $\det J_{S,\varepsilon}=0$ for any sufficiently small $\varepsilon_{ij}$ (${i,j\in S})$. However, it is well-known that in each neighborhood of a singular $n\times n$-matrix there exists a nonsingular matrix. This contradiction proves the theorem. \end{proof} \begin{remark} \rm Condition \eqref{Eq.JacobianNe0} guarantees the uniqueness of the solution $(\hat{Z}_S, \hat{y}_{i(R)})$ in its vicinity. \end{remark} In a similar way, we define the notion of a stable stationary point (see e.g. \cite{Mestl-95}). \begin{remark}\label{remCoordinatesP-0} \rm The coordinates $\hat{x}_i, \hat{y}_i$, $^{\nu}\!\hat{v}_i$ ($i\in N, \nu = 1,\dots ,p$) of the stationary point $\hat{P}$ for System (\ref{Eq.Trick-model-general}) with $Z_i=\Sigma(y_i,\theta_i,0)$ ($i\in N$) satisfy \begin{enumerate} \item $\hat{x}_{i(r)}=\hat{y}_{i(r)}$, $Z_r(\hat{y}_{i(r)})=B_r$ \ ($r\in R$); \item $\hat{x}_s=\hat{y}_s=\theta_s$ \ ($s\in S$); \item $A_{i}\hat{v}_i+\alpha_i\pi_{i} \hat{x}_i=0$ \ ($i\in N$). \end{enumerate} \end{remark} \begin{example} \label{exa5.8} \rm Consider the delay equation \begin{gather*} \dot{x}=2-2Z-x \\ Z=\Sigma (y, 1, q) \\ y(t)=^{0}\!\!cx(t)+^{1}\!\!c\!\int_{-\infty}^{t}{^{1}}\!K(t-s)x(s)ds. \end{gather*} Assume that $^{0}\!c\geq 0,\, ^{0}\!c+^{1}\!\!c=1$, $q\geq0$, $\Sigma(y,1,q)$ is the logoid function, given in Example \ref{Ex.LogoidFunc}, and $^{1}\!K(u)$ is the weak generic delay kernel given by $(1.5)$. Using the non-delay representation (\ref{Eq.Trick-model-general}), we obtain the system: \begin{gather*} \dot{x}=2-2Z-x \\ ^1\!\dot{v}=^{0}\!\!c\,(2-2Z-x)+\alpha x -\alpha \cdot^1\!v \\ Z=\Sigma (y, 1, q). \end{gather*} Let us apply Theorem \ref{thSSP} to this system. Solving the equation $2-2Z-1=0$, corresponding to (\ref{Eq.ForSSP}), we obtain $\hat{Z}_S=0.5$, where we also have $\det J_S=-2\neq0$. Thus, the point $\hat{P}(1,1)$ is SSP. The coordinates of points $P(q)(x_k,y_k)$ can be found from the system \begin{gather*} 2-2\frac{(0.5+ \frac{y_k-1}{2\delta(q_k)})^{\frac{1}{q_k}}}{(0.5+\frac{y_k-1}{2\delta(q_k)})^{\frac{1}{q_k}}+(0.5-\frac{y_k-1}{2\delta(q_k)})^{\frac{1}{q_k}}}-y_k=0,\\ x_k=y_k, \end{gather*} where $q_k\in(0,\varepsilon)$ $(k\in M)$. Assume that $\delta(q_k)=q_k$. The relation between $q_k$ and $y_k$ is shown in Figure~4. \end{example} \begin{figure}[ht] \begin{center} \includegraphics[height=45mm]{fig4} %{yandq.eps} \end{center} \caption{} \end{figure} \begin{example} \label{exa5.9} \rm Consider the system \begin{gather*} \dot{x_1}=Z_1-Z_1Z_2-\gamma_1x_1\\ \dot{x_2}=1-Z_1Z_2-\gamma_2x_2\\ y_1=x_1\\ y_2=\int_{-\infty}^{t}{^1}\!K(t-s)x_2(s)ds, \end{gather*} where $\gamma_1$, $\gamma_2$, are positive parameters, $Z_i=\Sigma(x_i,\theta_i,q)$, $(q\geq 0)$, $(i=1,2)$ are logoid functions, given in Example \ref{Ex.LogoidFunc}. Assume that the thresholds $\theta_1=\theta_2=1$ and the parameters $\gamma_1=0.6$, $\gamma_2=0.9$. The model has four walls $\mathcal{SD}(1,\theta_2)$, $\mathcal{SD}(\theta_1,1)$, $\mathcal{SD}(\theta_1,0)$ and $\mathcal{SD}(0,\theta_2)$. Let us apply Theorem \ref{thSSP} to this system. For the first wall $\mathcal{SD}(1,\theta_2)$ System (\ref{Eq.ForSSP}) will be $$ \begin{gathered} F_{2}(Z_2,1)-G_{2}(Z_2,1)\theta_{2} = 0 \\ F_{1}(Z_2,1)-G_{1}(Z_2,1)\hat{y}_{1}=0, \end{gathered} $$ or $$ \begin{gathered} 1-\hat{Z}_2-0.9\,\theta_{2} = 0 \\ 1-\hat{Z}_2-0.6\,\hat{y}_{1}=0. \end{gathered} $$ The solution $\hat{y}_1=1.5, \hat{Z}_2=0.1$ satisfies the constraints (\ref{Eq.ConstraintsSSP}). For $\mathcal{SD}(\theta_1,1)$ System (\ref{Eq.ForSSP}) becomes $$ \begin{gathered} 1-\hat{Z}_1-0.9\,\hat{y}_{2} = 0 \\ 1-\hat{Z}_1-0.6\,\theta_{1}=0, \end{gathered} $$ but the solution $\hat{y}_2=0.6, \hat{Z}_1=0.4$ does not belong to this wall. The same conclusion holds for the singular domains $\mathcal{SD}(\theta_1,0)$ and $\mathcal{SD}(0,\theta_2)$. The Jacoby determinant (\ref{Eq.DetJacobian}) for the wall $\mathcal{SD}(1,\theta_2)$ will be $$ \det J_2(\hat{Z}_2,\theta_2)= \det\Big(\frac{\partial}{\partial Z_{2}}F_2(Z_2,1) -\frac{\partial}{\partial Z_{2}}G_2(Z_2,1)y_2\Big) =-1\neq0. $$ This means that, the system has one stationary point $\hat{P}\in \mathcal{SD}(1,\theta_2)$ for $q=0$ (and thus stationary points for small $q> 0$) with the coordinates $x_1=y_1=1.5,\,x_2=y_2=1$. \end{example} \section{Stability analysis and the localization principle} We study System \eqref{Eq.MainSystem} with the delay operator (\ref{Eq.DelayOperator}). According to our method, System \eqref{Eq.MainSystem} is replaced with System (\ref{Eq.Trick-model-general}), which includes more variables. We should therefore justify that stability properties of \eqref{Eq.MainSystem} and (\ref{Eq.Trick-model-general}) are the same. We start with the formal definition of stability (instability) using the delay equation (\ref{eq.basic}) and System (\ref{Eq.trickedModified}). Equation (\ref{eq.basic}) and System (\ref{Eq.trickedModified}) are generalizations of \eqref{Eq.MainSystem} and (\ref{Eq.Trick-model-general}), respectively. Assume that $x(t)=0$ is a solution of Equation (\ref{eq.basic}) for $t\geq 0$. Obviously, $x(t)=0, \, \,v(t)=0 $ will be a zero solution of System (\ref{Eq.trickedModified}) for $t\geq 0$. \begin{definition}\label{zerosolution3.10exst} \rm The zero solution of (\ref{eq.basic}) is called exponentially stable if there exist $M>0$, $\kappa >0$, $\delta>0$ such that \begin{equation}\label{exponstable3.10} |x(t)|\leq M e^{-\kappa t} \sup_{\tau\leq0}|\varphi(\tau)|\quad (t>0) \end{equation} for any measurable function $\varphi(\tau),\tau \leq0$, which is the initial function for $x(t)$ (see (\ref{eq.basic})) satisfying the estimate $$ \sup_{\tau\leq0}|\varphi(\tau)|<\delta. $$ \end{definition} \begin{definition}\label{zerosolution3.29exst} \rm The zero solution of (\ref{Eq.trickedModified}) is called exponentially stable if there exist $M>0$, $\kappa >0$, $\delta>0$ such that \begin{equation}\label{exponstable3.29} |x(t)|+|v(t)|\leq M e^{-\kappa t} (|x(0)|+|v(0)|)\quad (t>0) \end{equation} where $|x(0)|<\delta$, $|v(0)|<\delta$. \end{definition} \begin{definition}\label{zerosolution3.10st} \rm The zero solution of (\ref{eq.basic}) is stable if for any $\varepsilon>0$ there exists $\delta>0$ such that \begin{equation}\label{stable3.10} \sup_{t>0}|x(t)|<\varepsilon \end{equation} as soon as $$\sup_{\tau\leq0}|\varphi(\tau)|<\delta,$$ where $\varphi(\tau)$ is the initial function for $x(t)$ . \end{definition} \begin{definition}\label{zerosolution3.29st} \rm The zero solution of (\ref{Eq.trickedModified}) is stable if for any $\varepsilon>0$ there exists $\delta>0$ such that \begin{equation}\label{stable3.29} |x(t)|+|v(t)|<\varepsilon \end{equation} as soon as $|x(0)|<\delta$, $|v(0)|<\delta$. \end{definition} \begin{theorem}\label{stability equivalent} Suppose that $x(t)=0$ is a solution of Equation (\ref{eq.basic}), where $f$ satisfies the conditions (C1)-(C3) from Subsection 3.2 and $\Re $ is given by (\ref{operator})-(\ref{trickFuncG_j}). Then the exponential stability (instability) of $x(t)=0$ is equivalent to the exponential stability (instability) of the zero solution of System (\ref{Eq.trickedModified}), where $A,\,\, \tilde{\pi}$ are given by (\ref{matrixA}) and (\ref{Trick-func-p-modified}), respectively. \end{theorem} \begin{proof} First we consider the case of exponential stability. Assume that the solution $(x(t),v(t))$ of (\ref{Eq.trickedModified}) satisfies (\ref{exponstable3.29}). The matrix $A$ is stable ($\mbox{Re}\, \lambda\!\leq\!\!-\kappa_1$ for all eigenvalues of the matrix $A$, ($\kappa_1>0$)). Then we have \begin{equation}\label{norm} \|e^{At}\|\leq Ne^{-\kappa_1t}\,\,\,(t\geq0). \end{equation} Put $\delta_1=\frac{\delta \kappa_1}{\|\tilde{\pi}\|N}$, where $\delta> 0$, such as we have (\ref{exponstable3.29}) while $|x(0)|< \delta,\,|v(0)|<\delta$. If $$ \sup_{\tau\leq0}|\varphi(\tau)|\leq\delta_1, $$ then (\ref{Eq.trickedModified}) gives \begin{align*} |x(t)|\leq |x(t)|+|v(t)| &\leq M e^{-\kappa t}(|x(0)|+|v(0)|) \\ &\leq M e^{-\kappa t}(|\varphi(0)| + \frac{N \|\tilde{\pi}\|}{\kappa_1} \sup_{\tau\leq0}|\varphi(\tau)|)\\ &=\bar{M}e^{-\kappa t}\sup_{\tau\leq0}|\varphi(\tau)|, \end{align*} since $x(0)=\varphi(0)$, $|x(0)|<\delta$, $|v(0)|<\delta$ and $$ |v(0)|\leq \frac{N\|\tilde{\pi}\|}{\kappa_1} \sup_{\tau\leq0}|\varphi(0)|. $$ Therefore, the solution $x(t)$ of (\ref{Eq.trickedModified}) satisfies (\ref{zerosolution3.10exst}). Now assume that we have the estimate (\ref{zerosolution3.10exst}) for the solution $x(t)$. Using (\ref{3.26m}) we obtain \begin{align*} &|x(t)|+|v(t)| \\ &\leq M e^{-\kappa t}\sup_{\tau\leq0}|\varphi(\tau)| +|e^{A^{T}t}v(0)|+\big|\int_{0}^{t}e^{A^T(t-s)}\tilde{\pi} x(s)ds\big|\\ &\leq M e^{-\kappa t}\sup_{\tau\leq0}|\varphi(\tau)| +Ne^{-\kappa_1 t}|v(0)| +\int_{0}^{t}e^{-\kappa_1(t-s)}\|\tilde{\pi}\|M e^{-\kappa s}ds \cdot\sup_{\tau\leq0}|\varphi(\tau)|\\ &\leq M e^{-\kappa t}\sup_{\tau\leq0}|\varphi(\tau)| +\frac{\|\tilde{\pi}\|M}{|\kappa-\kappa_1|}|e^{-\kappa_1 t} -e^{-\kappa t}|\sup_{\tau\leq0}|\varphi(\tau)| +Ne^{-\kappa_1 t}|v(0)|. \end{align*} (we may assume that $\kappa \neq \kappa_1$, otherwise we can use a smaller $\kappa_1$). Taking $\bar{\kappa}=\min \{\kappa,\kappa_1\}>0$ we arrive at $$ |x(t)|+|v(t)|\leq \bar{M} e^{-\bar{\kappa} t}\sup_{\tau\leq0}|\varphi(\tau)|+Ne^{-\kappa_1 t}|v(0)|. $$ The last estimate holds for any $\varphi$ which gives the solution $x(t)$ of Equation (\ref{eq.basic}). However, another $\varphi$ may give the same solution $x(t)$, $t\geq0$. Let us use this fact. Equation (\ref{eq.basic}) and System (\ref{Eq.trickedModified}) are equivalent. Thus, $\varphi_1(\tau)$ and $\varphi_2(\tau)$ give the same solution $x(t)$ of Equation (\ref{eq.basic}) if and only if $\varphi_1(0)=\varphi_2(0)$ $(=x(0))$ and $\int_{0}^{\infty}e^{\tilde{A}\tau}\varphi_1(-\tau)d\tau=\int_{0}^{\infty}e^{\tilde{A}\tau}\varphi_2(-\tau)d\tau$ $(=v(0))$, as the pair $(x(0),v(0))$ completely determines the solution of System (\ref{Eq.trickedModified}). Let $\varphi(\tau)$ is equal to a constant vector $\varphi_0$ on the interval $(-\infty,0)$ and $\varphi(0)=x(0)$. Also assume that $\varphi_0$ satisfies the equation $$ v(0)=\int_{0}^{\infty}e^{\tilde{A}\tau}\tilde{\pi} \varphi_0 d\tau=\bar{A}\tilde{\pi} \varphi_0, $$ where $\bar{A}=\int_{0}^{\infty}e^{\tilde{A}\tau}d\tau$. According to (\ref{Trick-func-q-vector-modified}), all eigenvalues of the matrix $A$ are equal to $\int_{0}^{\infty}e^{-\alpha t}dt=\frac{1}{\alpha}\neq 0$. Thus, $\bar{A}$ is an invertible matrix. Let $\tilde{\pi}^\sharp$ be a left inverse matrix to $\tilde{\pi} $. Assume that $\varphi_0=\pi^{\sharp}\bar{A}^{-1}v(0)$, so that \begin{align*} \sup_{s\leq0}|\varphi(s)| &=\max \{|\varphi_0|;|x(0)|\}\\ &\leq\max \{\|\pi^{\sharp}\|\cdot\|\bar{A}^{-1}\|\cdot |v(0)|;|x(0)|\}\\ &\leq c_1(|v(0)|+|x(0)|). \end{align*} Substituting $\varphi$ just defined we obtain \begin{align*} |x(t)|+|v(t)| &\leq \bar{M} e^{-\bar{\kappa}t}c_1(|v(0)|+|x(0)|) +Ne^{-\kappa t}|v(0)| \\ & \leq M_2e^{-\bar{\kappa} t}(|v(0)|+|x(0)|),\quad t\geq0 \end{align*} for sufficiently small $|v(0)|$ and $|x(0)|$. This gives the estimate (\ref{exponstable3.29}). Continuing the proof of the theorem we look now at the property of instability. If the zero solution $x(t)$ of System (\ref{eq.basic}) is unstable then, obviously, the zero solution of System (\ref{Eq.trickedModified}) is unstable as well, since $x(t)$ is part of this solution. Assume that the zero solution of System (\ref{Eq.trickedModified}) is unstable. Suppose that this solution is stable in the first component, i.e. the relation (\ref{stable3.10}) is satisfied. From (\ref{3.26m}) and (\ref{norm}) we obtain \begin{align*} |v(t)|&\leq Ne^{-\kappa t}|v(0)| +\int_{0}^{t}Ne^{-\kappa_1(t-s)}\|\tilde{\pi} \|\varepsilon ds\\ &\leq Ne^{-\kappa t}|v(0)|+\frac{\varepsilon\|\tilde{\pi}\|N}{\kappa_1} (1-e^{-\kappa_1t})\\ &< Ne^{-\kappa t}|v(0)|+\frac{\varepsilon\|\tilde{\pi}\|N}{\kappa_1} \end{align*} for $$ \sup_{\tau\leq0}|\varphi(\tau)|<\delta. $$ Letting $\varepsilon_1>0$ be fixed, let us choose $\varepsilon$ such that $\frac{\varepsilon\|\tilde{\pi}\|N}{\kappa_1}<\frac{\varepsilon_1}{2}$ and construct $\delta$ such that the estimate (\ref{stable3.10}) holds for this $\varepsilon$. Moreover, assume that $\delta<\frac{\varepsilon}{2N}$. Now for any $v(0)$ and $x(0)$, satisfying $|x(0)|<\delta$ and $|v(0)|<\delta$, we get $|v(t)|<\varepsilon_1$ for all $t>0$. It means that the zero solution is stable in both components. This contradiction completes the proof. \end{proof} Let us now formulate a stability result for System \eqref{Eq.MainSystem} with $Z_k$ given by the logoid function $(k=1,\dots,m)$. Let $S\subset M$ and $B_R$ be fixed. We are looking for SSP in the singular domain $\mathcal{SD}(S, B_R)$. Assume that the conditions of Theorem \ref{thSSP} are fulfilled, i.e. there exists an isolated stationary point $\hat{P}\in \mathcal{SD}(S, B_R)$. Consider the reduced system \begin{equation}\label{reducedSystem} \begin{gathered} \dot{\mathbf {x}}_s=\mathbf {F}_s(\mathbf {Z}_{s})-\mathbf {G}_s(\mathbf {Z}_{s})\mathbf {x}_s \\ \mathbf {Z}_{s}=\Sigma(\mathbf {y}_{s}, \theta_s, q_s)\\ \mathbf {y}_s(t)= (\Re_s x_s)(t), \quad (s\in S), \end{gathered} \end{equation} where $\mathbf{F}_s(\mathbf {Z}_{s})= F_{i(s)}(Z_s, B_R)$, $\mathbf{G}_s(\mathbf {Z}_{s})=G_{i(s)}(Z_s, B_R)$. \begin{theorem}[localization principle]\label{SspOfReducedSystem} Suppose that the conditions of Theorem \ref{thSSP} are fulfilled. Then System (\ref{reducedSystem}) has an isolated stationary point $\mathbf{\hat{P}}$. The point $\mathbf{\hat{P}}$ is asymptotically stable (unstable) if and only if $\hat{P}$ is asymptotically stable (unstable) for System \eqref{Eq.MainSystem}. \end{theorem} \begin{proof} We use Theorem \ref{thSSP}, where we put $S=N$, $R=\emptyset$, $i(s)=s$, $\mathbf{\hat{Z}_s}=\hat{Z}_s$ and obtain a condition of existence of SSP for System (\ref{reducedSystem}). According to Theorem \ref{stability equivalent}, it is equivalent to study stability properties of this point for System \eqref{Eq.MainSystem} and for System (\ref{Eq.Trick-model-general}). According to Proposition 7.4 from the paper \cite{PoSh} , we have that $\hat{x}_i=\hat{y}_i$ for a stationary point. Therefore, $x_i(q)$ is close to $y_i(q)$ for a small $q$. Then $\Sigma({y}_{i(r)}, \theta_r, q_r)=B_r$ for all $r\in R$ and System (\ref{Eq.Trick-model-general}) becomes quasi-triangular: \begin{equation}\label{kvaziSystem} \begin{gathered} \dot{\xi}=A(\xi), \\ \dot{\eta}=B(\xi,\eta), \end{gathered} \end{equation} where $\xi=(x_S,v_S)^T$, $\eta=(x_{i(R)},v_{i(R)})^T$, \begin{gather*} A(\xi)=(F_S(Z_S,B_R)-G_S(Z_S,B_R)x_S; A_Sv_S+\Pi_S(x_S))^T, \\ B(\xi,\eta)=(F_{i(R)}(Z_S,B_R)-G_{i(R)}(Z_S,B_R)x_{i(R)}; A_{i(R)}v_{i(R)}+\Pi_{i(R)}(x_{i(R)}))^T. \end{gather*} Clearly, the first equation coincides with System (\ref{reducedSystem}). Assume that the stationary point $\mathbf{\hat{P}}$ for (\ref{reducedSystem}) is asymptotically stable. According to Theorem \ref{thSSP} the stationary point $\hat{P}$ of System (\ref{kvaziSystem}) is $\hat{P}=(\mathbf{\hat{P}},\hat{Q}), $ where $\hat{Q}$ is a coordinate vector corresponding to $\eta$. Since System (\ref{reducedSystem}) is asymptotically stable and the matrix $A$ is differentiable, the zero solution of the linearized equation is asymptotically stable, as well. Let us linearize the whole System (\ref{kvaziSystem}) around the stationary point $\hat{P}$. Clearly, the Jacoby matrix there will be quasi-triangular. This implies that it is sufficient to check stability properties of the second quasi-diagonal matrix. It is, however, easy to see that this matrix coincides with the stable matrix $A_{i(R)}$ given by (\ref{matrixA-i}). Thus, the whole matrix $A$ is stable too, so that the solution $\hat{P}$ of System (\ref{kvaziSystem}) is asymptotically stable, i.e. the stationary solution of System \eqref{Eq.MainSystem} is asymptotically stable, as well (in fact, exponential stable). If the stationary solution of (\ref{reducedSystem}) is unstable then the stationary solution of (\ref{kvaziSystem}) is unstable a fortiori. \end{proof} \begin{example} \label{exa6.7} \rm Consider the system from Example 5.8. The reduced system in the wall $\mathcal{SD}(1,\theta_2)$ will be \begin{gather*} \dot{x_1}=1-Z_2-0.6x_1,\\ \dot{x_2}=1-Z_2-0.9x_2,\\ y_2=\int_{-\infty}^{t}{^1}\!K(t-s)x_2(s)ds. \end{gather*} Using Theorem \ref{thSSP} to find a stationary point, we obtain the following system: \begin{gather*} 1-\hat{Z}_2-0.9\,\theta_{2} = 0 \\ 1-\hat{Z}_2-0.6\,\hat{y}_{1}=0. \end{gather*} Solving this system, we get the same solution $\hat{y}_1=1.5$, $\hat{Z}_2=0.1$ as in Example \ref{exa5.8}. \end{example} \begin{remark}\label{jacobianmatrix} \rm The reduced system for System (\ref{Eq.Trick-model-general}) is given by \begin{equation}\label{Eq.Trick-model-generalreduced} \begin{gathered} \dot{\mathbf {x}}_s(t)=\mathbf {F}_s(\mathbf {Z}_s)-\mathbf {G}_s(\mathbf {Z}_s)\mathbf {x}_s(t) \\ \dot{\mathbf{v}}_{s}(t)=A_{s}\mathbf{v}_{s}(t)+\mathbf{\Pi}_{s} (\mathbf {x}_s(t)), \quad t >0 \\ \mathbf {Z}_s=\Sigma(\mathbf{y}_{s},\theta_s, q_s), \quad \mathbf {y}_s=^1\!\!\mathbf{v}_{s} \quad , \end{gathered} \end{equation} where $\mathbf{x}_s= x_{i(s)}$, $\mathbf{v}_s=v_{i(s)}$, $\mathbf{F}_s(\mathbf {Z}_{s})= F_{i(s)}(Z_s, B_R)$, $\mathbf{G}_s(\mathbf {Z}_{s})=G_{i(s)}(Z_s, B_R)$ $(i=1,\dots,n,\,\, s=1,\dots,\sigma$, $\sigma=|S|)$. Notice that this reduced system is equal to the reduced system (\ref{reducedSystem}). Then the Jacoby matrix for System (\ref{reducedSystem}) will be \begin{equation}\label{eqJacobianreduced} J:=\begin{pmatrix} XX & XV_1 & XV_2 & XV_3 & \dots & XV_\sigma \\ V_1X & V_1V_1 & V_1V_2 & V_1V_3 & \dots & V_1V_\sigma\\ V_3X& V_3V_1 & V_3V_2 & V_3V_3 & \dots & V_3V_\sigma \\ \vdots & \vdots & \vdots & \vdots & \vdots & \vdots \\ V_\sigma X & V_\sigma V_1 & V_\sigma V_2 & V_\sigma V_3 & \dots & V_\sigma V_\sigma \end{pmatrix}, \end{equation} where $$ XX:=\begin{pmatrix} -g_1 & 0 & 0 & \dots & 0 \\ 0 & -g_2 & 0 & \dots & 0\\ 0& 0 & -g_3 & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 \dots & -g_\sigma \\ \end{pmatrix}, $$ $$ V_jV_j:=\begin{pmatrix} -\alpha_j+^0\!\!c_jJ_j & \alpha_j & 0 & \dots & 0 \\ 0 & -\alpha_j & \alpha_j & \dots & 0\\ 0& 0 & -\alpha_j & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 \dots & -\alpha_j \\ \end{pmatrix}, $$ $V_jV_k=\emptyset$ if $j\neq k$ $(j,k=1,\dots,\sigma)$, $$ XV_1:=\begin{pmatrix} J_1 & 0 & 0 & \dots & 0 \\ 0 & 0 & 0 & \dots & 0\\ 0& 0 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 \dots & 0 \end{pmatrix}, \quad XV_2:=\begin{pmatrix} 0 & 0 & 0 & \dots & 0 \\ 0 & J_2 & 0 & \dots & 0\\ 0& 0 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 \dots & 0 \end{pmatrix},\dots, $$ $$ XV_\sigma:=\begin{pmatrix} 0 & 0 & 0 & \dots & 0 \\ 0 & 0 & 0 & \dots & 0\\ 0& 0 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 \dots & J_\sigma \end{pmatrix}, \quad V_1X:=\begin{pmatrix} a_1 & 0 & 0 & \dots & 0 \\ \alpha_1\,^2\!c_1 & 0 & 0 & \dots & 0\\ \alpha_1\,^3\!c_1& 0 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ \alpha_1\,^p\!c_1 & 0 & 0 & 0 \dots & 0 \end{pmatrix}, $$ $$V_2 X:=\begin{pmatrix} 0 & a_2 & 0 & \dots & 0 \\ 0 & \alpha_2\,^2\!c_2 & 0 & \dots & 0\\ 0& \alpha_2\,^3\!c_2 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & \alpha_2\,^p\!c_2 & 0 & 0 \dots & 0 \end{pmatrix},\dots, V_\sigma X:=\begin{pmatrix} 0 & 0 & 0 & \dots & a_\sigma \\ 0 & 0 & 0 & \dots & \alpha_\sigma\,^2\!c_\sigma\\ 0& 0 & 0 & \dots & \alpha_\sigma\,^3\!c_\sigma \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & 0 \dots & \alpha_\sigma\,^p\!c_\sigma \end{pmatrix}, $$ $J_s=(\frac{\partial}{\partial \mathbf{Z}_{s}}\mathbf{F}_{s}(\mathbf{Z}_s) -\mathbf{x}_{s}\frac{\partial}{\partial \mathbf{Z}_{s}}\mathbf{G}_{s}(\mathbf{Z}_s))\frac{\partial \mathbf{Z}_{s}}{\partial y_{s}}$, $a_s=\alpha_s(^0\!c_s+^1\!\!c_s)-\mathbf{G}_s(\mathbf{Z}_s)^0\!c_s$, $g_s=\mathbf{G}_s(Z_s)$, $(s=1,\dots,\sigma,\,\,\,\sigma=|S|)$. \end{remark} \section{Stability analysis of SSP in the black wall} In Section 4 we mentioned that the system can have 3 types singular domains (white, black and transparent). In this section we study a stable singular points therefore we will focus only on stationary points in the black wall. In the non-delay case any regular stationary point is always asymptotically stable as soon as it exists. This is due to the assumptions $G_i>0$. Stability of the matrix $ J_S(Z_S,B_R,\theta_{S})$ (see (\ref{Eq.DetJacobian})) provides, then, asymptotic stability of singular stationary points (see e.g. \cite{Plahte-98} for delays). Including delays leads to more involved stability conditions. We study here Equation (\ref{Eq.Main}) \begin{gather*} \dot{x}(t)=F(Z)-G(Z)x(t) \\ Z=\Sigma(y,\theta,q)\\ y(t)= (\Re x)(t) \ \ \ (t\ge 0)\\ \end{gather*} with the delay operator given by (\ref{Eq.DelayOperator-3term}) $$ (\Re x)(t)={^0}\!cx(t)+\int_{-\infty}^{t}K(t-s)x(s)ds, \quad t \ge 0, $$ where $K(u)={^1}\!c\cdot{^1}\!K(u)+{^2}\!c\cdot{^2}\!K(u)$, $^{\nu}\!c\ge 0$ ($\nu=0,1,2$), ${^0}\!c+{^1}\!c+{^2}\!c=1$. According to the localization principle presented in the previous section the stability analysis below is, in fact, valid for an arbitrary number of genes $x_i$, where only one gene $x$ becomes activated (i.e. $y$ assumes its threshold value) at any time. Applying the generalized linear chain trick, we arrive at System (\ref{Eq.MainTricked}) \begin{gather*} \dot{x}=F(Z)-G(Z)x \\ ^1\!\dot{v}={^0}\!c\,(F(Z)-G(Z)x)+\alpha x({^0}\!c+{^1}\!c)-\alpha\cdot ^1\!v+\alpha \cdot^2\!v\\ ^2\!\dot{v}=\alpha\cdot {^2}\!c\,x -\alpha \cdot^2\!v, \end{gather*} where $Z=\Sigma(y,\theta,q)$. The equivalence of Systems (\ref{Eq.Main}) and (\ref{Eq.MainTricked}) is a particular case of equivalence of Systems \eqref{Eq.MainSystem} and (\ref{Eq.Trick-model-general}) (or, in general, Systems (\ref{eq.basic}) and (\ref{Eq.Trick-model-general})).We first present two theorems. \begin{theorem}\label{th-c-0-ne-0} Let ${^0}\!c>0$ in (\ref{Eq.DelayOperator-3term}) and let the equation \begin{equation}\label{eq-Z-Scalar} F(Z)-G(Z)\theta=0 \end{equation} have a solution $^0\!Z$ satisfying $0<^0\!\!\!Z<1$. Then the point $\hat{P}(^0\!x,^0\!\!(^1\!v),^0\!\!(^2\!v))$, where $^0\!x=^0\!\!\!(^1\!v)=\theta, \ ^0\!(^2\!v)={^2}\!c\,\theta$, will be asymptotically stable if $ D< 0$, and unstable if $ D>0$, where \begin{equation}\label{eq-J-scalar} D=F'(Z)-G'(Z)\theta \end{equation} is independent of $Z$ (as both $F$ and $G$ are affine). \end{theorem} \begin{proof} In the course of the proof we keep fixed an arbitrary logoid function $Z=\Sigma(y,\theta,q)$, $q>0$, satisfying Assumptions \ref{Assumption2.1}-\ref{Assumption2.4}. Let $P(q)(x(q),^1\!v(q),^2\!v(q))$ be the corresponding approximating stationary points from Definition \ref{defStableSSP}, which converge to $\hat{P}$ as $q\to 0$. (Below $y(q)$ replaces $^1\!v(q)$ to simplify the notation.) Then $$ Z(q):=\Sigma(y(q),\theta,q)\to \Sigma(^0\!y,\theta,0):=\hat{Z} $$ due to Assumption \ref{Assumption2.1}. As $P(q)$ is a stationary point for (\ref{Eq.Main}) with $Z=\Sigma(y,\theta,q)$ for sufficiently small $q> 0$, we have $F(Z(q))-G(Z(q))x(q)=0$. Letting $q\to +0$, we obtain the equality $F(\hat{Z})-G(\hat{Z})\theta=0$. From the assumptions of the theorem it follows, however, that $F(^0\!Z)-G(^0\!Z)\theta=0$. As the functions $F(Z)$ and $G(Z)$ are affine in $Z$, the function $F(Z)-G(Z)\theta$ is affine as well and, moreover, it is not constant because $\det{D}\ne 0$. This implies that $\hat{Z}=^0\!\!\!Z$. In particular, \begin{equation}\label{eqZ-qgoesToZ-bar} Z(q)=\Sigma(y(q),\theta,q)\to ^0\!\!\!Z \ \ (q\to 0). \end{equation} According to Definition \ref{defStableSSP}, we have to look at the Jacoby matrix $J(q)$ of the smooth system (\ref{Eq.Main}) with $Z=\Sigma(y,\theta,q)$, $q>0$, evaluated at the stationary point $P(q)$. Evidently, \begin{equation}\label{Eq.thStableSSP-03} J(q):=\begin{pmatrix} -g(q) &D(q)d(q)&0\\ \alpha({^0}\!c+{^1}\!c)-{^0}\!cg(q) & -\alpha +{^0}\!cD(q)d(q)& \alpha \\ \alpha \,{^2}\!c & 0 & -\alpha \end{pmatrix}, \end{equation} where we, to simplify the notation, put \begin{equation}\label{yslovia} g(q):=G(Z(q)),\quad D(q):=F'(Z(q))-G'(Z(q))x(q), \quad d(q):= \frac{\partial \Sigma}{\partial y}(y(q),\theta,q). \end{equation} Clearly, \begin{equation}\label{ysloviaq0} g(q)\to G(^0\!Z), \quad D(q)\to D, \quad d(q)\to +\infty \end{equation} as $q\to 0$. The challenge is to study spectral properties of the matrix $J(q)$ as $q\to 0$. This is done in the paper \cite{PonShinNep}. The final result says that if $D<0$, then the matrix $J(q)$ is stable for small positive $q$, and if $D>0$, then the matrix $J(q)$ is unstable for small positive $q$. This completes the proof of the theorem. \end{proof} \begin{theorem}\label{th-c-0=0} Let ${^0}\!c=0$ in (\ref{Eq.DelayOperator-3term}) and let the equation (\ref{eq-Z-Scalar}) have a solution $^0\!Z$ satisfying $0<^0\!\!\!Z<1$. Then the point $\hat{P}(^0\!x,^0\!\!(^1\!v),^0\!(^2\!v))$, where $^0\!x=^0\!\!(^1\!v)=\theta, \ ^0\!(^2\!v)={^2}\!c\,\theta$, has the following properties \begin{enumerate} \item If $D>0$, then $\hat{P}$ is unstable. \item If $D<0$, ${^1}\!c= 0$, then $\hat{P}$ is unstable. \item If $D<0$, ${^1}\!c\ne 0$ and $G(^0\!Z)<\alpha{({^1}\!c)}^{-1}(1-2\,{^1}\!c)$, then $\hat{P}$ is unstable. \item If $D<0$, ${^1}\!c\ne 0$ and $G(^0\!Z)>\alpha{({^1}\!c)}^{-1}(1-2\,{^1}\!c)$, then $\hat{P}$ is asymptotically stable spiral point. \end{enumerate} Here $D$ is again given by (\ref{eq-J-scalar}). \end{theorem} \begin{proof} Setting ${^0}\!c=0$ in (\ref{Eq.thStableSSP-03}) produces \begin{equation}\label{Eq.thStableSSP-03-c0=0} J(q)=\begin{pmatrix} -g(q) &D(q)d(q)&0\\ \alpha\cdot {^1}\!c & -\alpha & \alpha \\ \alpha \cdot{^2}\!c & 0 & -\alpha \end{pmatrix}, \end{equation} which has no limit as $q\to 0$. The asymptotical analysis of the matrix $J(q)$ yields the following (see \cite{PonShinNep}): if $D<0$, ${^1}\!c\ne 0$ and $\alpha{({^1}\!c)}^{-1}(1-2\,{^1}\!c)0$ in (\ref{Eq.DelayOperator-3term}) and that for some finite sequence $B_i$ ($i=2,\dots n$) consisting of $0$ or $1$ the system \begin{equation}\label{Eq.ConstraintsSSPWall} \begin{gathered} F_1(Z_1,B_R)-G_1(Z_1,B_R)\theta_1=0 \\ 00$, then SSP $\hat{P}$ is unstable. $\bar{D}$ is given by $$ \bar{D}=\frac{\partial}{\partial Z_1}F_1(Z_1,B_R) -\frac{\partial}{\partial Z_1}G_1(Z_1,B_R). $$ \end{corollary} \begin{corollary}\label{th-c-0=0-n-equations} Assume that ${^0}\!c=0$ in (\ref{Eq.DelayOperator-3term}) and that for some finite sequence $B_i$ ($i=2,\dots n$) consisting of $0$ or $1$ the system (\ref{Eq.ConstraintsSSPWall}) has a solution $^0\!Z_1$, $^0\!{x}_i \ (i\ge 2)$. Then the point $\hat{P}=(^0\!{x}_1,\dots, ^0\!\!{x}_n, ^0\!\!(^1\!v), ^0(^2\!v))$, where $^0\!{x}_1=^0\!\!(^1\!v)=\theta_1$ and $^0\!(^2\!v)={^2}\!c\,\theta_1$ is an unstable SSP for System (\ref{Eq.Trick-model-general}) with $Z_i=\Sigma(y_i,\theta_i,0)$ ($i=1,\dots,n$) in the following cases: \begin{enumerate} \item If $\bar{D}>0$. \item If $\bar{D}<0$, ${^1}\!c= 0$. \item If $\bar{D}<0$, ${^1}\!c\ne 0$ and $G(^0\!Z_1)<\alpha{({^1}\!c)}^{-1}(1-2\,{^1}\!c)$. \end{enumerate} If $\bar{D}<0$, ${^1}\!c\ne 0$ and $G(^0\!Z_1)>\alpha{({^1}\!c)}^{-1}(1-2\,{^1}\!c)$, then $\hat{P}$ is an asymptotically stable SSP. \end{corollary} The proof of Corollaries \ref{th-c-0-ne-0-n-equations} and \ref{th-c-0=0-n-equations} is followed from Theorems \ref{SspOfReducedSystem}, \ref{th-c-0-ne-0} and \ref{th-c-0=0}. Let consider now a more general case. We study System (\ref{Eq.Main}) with the delay operator \begin{equation}\label{Eq.DelayOperatorwithouti} (\Re x)(t)=^{0}\!\!\!cx(t)+\int_{-\infty}^{t}K(t-s)x(s)ds, \quad t \ge 0, \ \ \end{equation} where \begin{gather}\label{trickFuncK} K(u)=\sum_{\nu=1}^{n}{^{\nu}}\!\!c \cdot ^{\nu}\!\!K(u)\,,\\ \label{trickFuncK_j} ^{\nu}\!\!K(u)=\frac{\alpha^{\nu}\cdot u^{{\nu}-1}}{({\nu}-1)!}e^{-\alpha u}\,. \end{gather} The coefficients ${^\nu}\!c$ $(\nu=0,\dots,n)$ are real nonnegative numbers satisfying \linebreak $\sum_{\nu=0}^{n}{^{\nu}}\!\!c=1$. It is also assumed that $\alpha >0$ . Let us put \begin{equation}\label{Eq.Functions-w-ip} ^{\nu}\!w(t)=\int_{-\infty}^{t}{^{\nu}\!K(t-s)x(s)}ds, \end{equation} where $t \ge 0$. Below we summarize the ideas we presented in Section 3. Let us put \begin{equation}\label{EqChangeOfVariables(Trick)p} ^1\!v=^0\!\!cx+\sum_{\nu=1}^n {^{\nu}}\!c\cdot^{\nu}\!w, \quad ^{\nu}\!v=\sum_{j=1}^{n-\nu+1}{^{{j+\nu-1}}\!c}\cdot^{j}\!w \quad (\nu=2,\dots,n). \end{equation} In particular, $^{n}\!v=^{n}\!\!c\cdot ^1\!w$. Then \begin{equation}\label{Eq.MainSystemTricked-n1} \begin{gathered} \dot{x}(t)=F(Z)-G(Z)x(t) \\ \dot{\mathbf{v}}(t)=A\mathbf{v}(t)+\Pi (x(t)), \quad t >0 \\ Z=\Sigma(y,\theta, q), \quad y=^1\!\!v, \end{gathered} \end{equation} where \begin{equation}\label{matrixAn} A= \begin{pmatrix} -\alpha & \alpha & 0 & \dots & 0 \\ 0 & -\alpha & \alpha & \dots & 0 \\ 0 & 0 & -\alpha & \dots & 0 \\ \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & \dots & 0 & -\alpha \end{pmatrix}, \mathbf{v}=\begin{pmatrix} ^1\!v\\ ^2\!v\\ \vdots\\ ^n\!v\\ \end{pmatrix}, \end{equation} and \begin{equation}\label{vectorPi2} \mathbf\Pi (x):=\alpha x\mathbf{\pi} + ^0\!\!c\,\mathbf{f}(Z,x) \end{equation} with \begin{equation}\label{Eq.pi-i,f-i} \mathbf{\pi}=\begin{pmatrix} ^0\!c+^1\!\!c\\ ^2\!c\\ \vdots\\ ^n\!c \end{pmatrix}, \quad \mathbf{f}(Z,x):=\begin{pmatrix} F(Z)-G(Z)x\\ 0\\ \vdots\\ 0 \end{pmatrix}. \end{equation} In this case we get the system of ordinary differential equations: \begin{equation}\label{Eq.MainSystemTricked-n} \begin{gathered} \dot{x}=F(Z)-G(Z)x \\ ^1\!\dot{v}=^0\!\!c\,(F(Z)-G(Z)x)+\alpha x(^0\!c+^1\!\!c)-\alpha \cdot^1\!v+\alpha \cdot^2\!v\\ ^2\!\dot{v}=\alpha \cdot^2\!cx-\alpha\cdot ^2\!v+\alpha \cdot^3\!v\\ ^3\!\dot{v}=\alpha \cdot^3\!cx -\alpha \cdot^3\!v+\alpha \cdot^4\!v, \\ \dots\\ ^{n-1}\!\dot{v}=\alpha \cdot^{n-1}\!c\,x -\alpha \cdot^{n-1}\!v+\alpha \cdot^{n}\!v, \\ ^{n}\!\dot{v}=\alpha\cdot ^{n}\!cx -\alpha\cdot^{n}\!v, \\ \end{gathered} \end{equation} where $Z=\Sigma(y,\theta,q)$. In this case, $\sum_{k=0}^n {^k\!c}=1$. This system is equivalent to (\ref{Eq.Main}). Consider the Jacoby matrix of System (\ref{Eq.MainSystemTricked-n}) to study the asymptotical stability of System (\ref{Eq.Main}). The $(n+1)\times(n+1)$ Jacoby matrix of the system (\ref{Eq.MainSystemTricked-n}) reads \begin{equation}\label{Eq.thStableSSP-pn} J(q):=\begin{pmatrix} -g(q) &D(q)d(q)&0&0 &0& \dots & 0 & 0\\ \alpha(^0\!c+^1\!c)-^0\!\!cg(q) & -\alpha +^0\!\!cD(q)d(q)& \alpha &0 & 0 & \dots & 0 & 0 \\ \alpha \cdot^2\!c & 0 & -\alpha & \alpha & 0 & \dots & 0 & 0\\ \alpha \cdot^3\!c & 0 & 0 & -\alpha & \alpha & \dots & 0 & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots\\ \alpha \cdot^{n-1}\!c & 0 & 0 & 0 & 0 & \dots & -\alpha & \alpha \\ \alpha \cdot^n\!c & 0 & 0 & 0 & 0 & \dots & 0 & -\alpha \end{pmatrix}, \end{equation} where $ g(q)$, $D(q)$, $d(q)$ are given by (\ref{yslovia}). Let us introduce the property (AS): $$ (\exists \varepsilon>0)\; (\forall q\in (0,\varepsilon)),\; J(q) \mbox{ is stable }. $$ For study this property we will use the Routh-Hurwitz condition. \begin{proposition} \label{prop7.5} Let the equation (\ref{eq-Z-Scalar}) have a solution $^0\!Z$ satisfying $0<^0\!\!\!Z<1$ and $D\neq 0$. Then the point $\hat{P}(^0\!x,\,^0\!(^1\!v),\dots,^0\!(^n\!v))$, where $^0\!x=^0\!(^1\!v)=\theta$, $^0\!(^i\!v)=(1-\sum_{k=0}^{i-1} {^k\!c})\theta$, $(i=2,\dots,n)$ is SSP for System (\ref{Eq.Main}) with operator $\Re$ given by (\ref{Eq.DelayOperatorwithouti})-(\ref{Eq.Functions-w-ip}). The point $\hat{P}$ is stable if and only if the property (AS) is true. \end{proposition} \subsection*{The Routh-Hurwitz condition} Let \begin{equation} \label{1} P(\lambda)=\lambda^{n+1}+A_1\lambda^n+A_2\lambda^{n-1}+\dots A_n\lambda+A_{n+1} \end{equation} be the characteristic polynomial of the matrix $J(q)$ given by (\ref{Eq.thStableSSP-pn}) multiplied by $(-1)^{n+1}$, i.e. $(-1)^{n+1} \det(J(q)-\lambda I)$. Let \begin{equation} \label{2} R(q):= \left(\begin{array}{ccccccccccc} A_1 & 1 & 0 & 0 & 0 & \dots & 0 & 0 & 0 & 0 & 0 \\ A_3 & A_2 & A_1 & 0 & 0 & \dots & 0 & 0 & 0 & 0 & 0 \\ A_5 & A_4 & A_3 & A_2 & A_1 & \dots & 0 & 0 & 0 & 0 & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \vdots & \vdots\\ 0 & 0 & 0 & 0 & 0 & \dots & A_{n+1} & A_{n} & A_{n-1} & A_{n-2} & A_{n-3} \\ 0 & 0 & 0 & 0 & 0 & \dots & 0 & 0 & A_{n-3} & A_{n-2} & A_{n-1} \\ 0 & 0 & 0 & 0 & 0 & \dots & 0 & 0 & 0 & 0 & A_{n+1} \end{array}\right) \end{equation} be its Hurwitz matrix. A necessary and sufficient condition for $J(q)$ to be stable is that all principle leading minors of this matrix are strictly positive, i.e. \begin{equation} \label{3} \Delta_k>0, \quad k=1,2,\dots,n+1 \end{equation} \begin{remark} \label{r1} \rm It is well-known that $A_k$ is a sum $C_n^k$ of all principle leading minors of order $k$ of the matrix $J(q)$ multiplied by $(-1)^k$. In particular, $A_1=-\mathop{\rm tr}J(q)$, $A_{n+1}=(-1)^{n+1}\det J(q)$. For example, for $n=1$, \begin{gather*} R(q)=\begin{pmatrix} A_1 & 1\\ 0 & A_2 \end{pmatrix} =\begin{pmatrix} -\mathop{\rm tr} J(q) & 1\\ 0 & \det J(q) \end{pmatrix}, \\ \Delta_1=-\mathop{\rm tr} J(q), \quad \Delta_2=A_1A_2=-\mathop{\rm tr} J(q)\cdot \det J(q). \end{gather*} Thus, condition (\ref{3}) becomes the well-known criterion of stability for a two dimensional matrix : $\mathop{\rm tr} J(q)<0$, $\det J(q)>0$. \end{remark} \begin{theorem} $D<0$ is necessary for (AS). In particular, if $D >0$ then $\hat{P}$ is unstable. \end{theorem} \begin{proof} According to the Routh-Hurwitz condition, if the matrix $J(q)$ is stable then $\Delta_1=A_1>0$. By calculation we get $$ A_1(q)=-\mathop{\rm tr} J(q)=g(q)-^0\!\!c \cdot \det J(q)d(q)+n\alpha. $$ From this formula and the condition (\ref{yslovia}) we obtain $$ \mathop{\rm sgn}(A_1(q))=-\mathop{\rm sgn}(\det J(q))=-\mathop{\rm sgn}(D) $$ for all small $q>0$. Therefore, $\Delta_1>0$ if and only if $D<0$ for all small $q>0$. \end{proof} For $n,k=0,1,2,\dots$ we put $$ C_n^k=\frac{n!}{k!(n-k)!}\quad \mbox{if} \quad k\le n; \qquad C_n^k=0 \quad \mbox{if} \quad k>n. $$ \begin{lemma} \label{l1} The coefficients of the characteristic polynomial $P(\lambda)$ for the equation \eqref{1} will be \begin{equation} \label{4} A_k=C_n^k\alpha^k+C_n^{k-1}\alpha^{k-1}g(q)-\alpha^{k-1}d(q)\det J(q) \sum_{j=1}^k C_{n+1-j}^{k-j}\cdot^{j-1}\!c, \end{equation} $k=1,2,\dots,n+1$; in particular, \begin{gather*} A_1=n\alpha+g(q)-d(q)\det J(q)^0\!c,\\ A_2=C_n^2\alpha^2+n\alpha g(q)-\alpha d(q)\det J(q)(n\cdot^0\!c+^1\!\!c),\\ A_{n+1}=\alpha^ng(q)-\alpha^nd(q)\det J(q) \sum_{j=0}^n {^j\!c}=\alpha^n\,g(q)-\alpha^n\,d(q)\det J(q). \end{gather*} \end{lemma} The proof of the above lemma is performed by direct calculation of $P(\lambda)=\det(\lambda I-J(q))$. It is omitted. Assume that the operator in System (\ref{Eq.Main}) is given by $$ (\Re x)(t)=^{0}\!\!\!cx(t)+\int_{-\infty}^{t}K(t-s)x(s)ds, \quad t \ge 0, $$ where $K(u)=\sum_{\nu=1}^{n}{^{\nu}}\!\!c \cdot ^{\nu}\!\!K(u)$, $\sum_{\nu=0}^{n}{^{\nu}}\!\!c=1$, ($\nu=0,\dots,n$). The analytical formulas for $n=2,3,4,5$ can be obtained with the help of Mathematica. The results for the case $n=2$ are shown in Theorems \ref{th-c-0-ne-0} and \ref{th-c-0=0}. For $n=3$ the Jacoby matrix is \begin{equation} \label{e7.24} J(q):=\begin{pmatrix} -g(q) &D(q)d(q)&0&0 \\ \alpha(^0\!c+^1\!\!c)-^0\!\!cg(q) & -\alpha +^0\!\!cD(q)d(q)& \alpha &0 \\ \alpha \cdot^2\!c & 0 & -\alpha & \alpha \\ \alpha \cdot^3\!c & 0 & 0 & -\alpha \end{pmatrix}, \end{equation} where $ g(q), \, D(q),\,d(q)$ are given by (\ref{yslovia}). \begin{proposition} \label{r2} If \begin{itemize} \item[(1)] $^0\!c>0$, $D<0$ and $9\,^0\!c^2+^1\!\!c\,(2\,^1\!c+^2\!\!c)+^0\!\!c\,(9\,^1\!c+3\,^2\!c-1)>0$ or \item[(2)] $^0\!c=0$, $D<0$ and $\alpha(^1\!c-^2\!\!c)+^1\!\!c\,\,G(^0\!Z)>0$, \end{itemize} then the point $\hat{P}$ is asymptotically stable. \end{proposition} For $n=4$ the Jacoby matrix \begin{equation} \label{e7.25} J(q):=\begin{pmatrix} -G(^0\!Z) &D(q)d(q)&0&0&0 \\ \alpha(^0\!c+^1\!c)-^0\!\!cg(q) & -\alpha +^0\!\!cD(q)d(q)& \alpha &0 &0 \\ \alpha \cdot^2\!c & 0 & -\alpha & \alpha &0\\ \alpha \cdot^3\!c & 0 & 0 & -\alpha & \alpha \\ \alpha \cdot^4\!c & 0 & 0 & 0 & -\alpha \\ \end{pmatrix}, \end{equation} where $ g(q), \, D(q),\,d(q)$ are given by (\ref{yslovia}). \begin{proposition} If \begin{itemize} \item[(1)] $^0\!c>0$, $D<0$, $20\,^0\!c^2+^1\!\!c\,(3\,^1\!c+^2\!\!c)+^0\!\!c\,(15\,^1\!c+2\,^2\!c-^3\!\!c)>0$ and $80\,^0\!c^3+8\,^0\!c^2\,(15\,^1\!c+6\,^2\!c+2\,^3\!c-2)+^1\!\!c\,(9\,^1\!c^2+^2\!\!c\,(2\,^2\!c+^3\!\!c)+^1\!\!c\,(9\,^2\!c+3\,^3\!c-1))+\linebreak ^0\!\!c\,(57\,^1\!c^2+4\,^2\!c^2-^3\!\!c^2+4\,^1\!c\,(10\,^2\!c+3\,^3\!c-2))>0$ or \item[(2)] $^0\!c=0$, $D<0$, $\alpha(^1\!c-^2\!c)+^1\!cG(^0\!Z)>0$ and $9\,^1\!c^2+^2\!c\,(2\,^2\!c+^3\!c)+^1\!\!c\,(9\,^2\!c+3\,^3\!c-1)>0$, \end{itemize} then the point $\hat{P}$ is asymptotically stable. \end{proposition} For $n=5$ \begin{equation} J(q):=\begin{pmatrix} -G(^0\!Z) &D(q)d(q)&0&0&0 &0\\ \alpha(^0\!c+^1\!c)-^0\!\!cg(q) & -\alpha +^0\!\!cD(q)d(q)& \alpha &0 &0 &0\\ \alpha \cdot^2\!c & 0 & -\alpha & \alpha &0&0\\ \alpha \cdot^3\!c & 0 & 0 & -\alpha & \alpha&0 \\ \alpha \cdot^4\!c & 0 & 0 & 0 & -\alpha &\alpha\\ \alpha \cdot^5\!c & 0 & 0 & 0 & 0 &-\alpha\\ \end{pmatrix}, \end{equation} where $ g(q), \, D(q),\,d(q)$ are given by (\ref{yslovia}). \begin{proposition} \label{prop7.11} If \begin{itemize} \item[(1)] $^0\!c>0$, $D<0$, $40\,^0\!c^2+^1\!\!c\,(4\,^1\!c+^2\!\!c)+^0\!\!c\,(24\,^1\!c +2\,^2\!c-^3\!\!c)>0$, $275\,^0\!c^3+^0\!c\,(139\,^1\!c^2+(2\,^2\!c-^3\!\!c)(3\,^2\!c+^3\!\!c) +^1\!\!c\,(64\,^2\!c-2^3\!\!c-10\,^4\!c+1)) +^1\!\!c\,(20\,^1\!c^2+\,^2\!\!c(3\,^2\!c+^3\!\!c) +^1\!\!c\,(15\,^2\!c+2\,^3\!c-^4\!\!c))+5\,^0\!c^2\,(66\,^1\!c +13\,^2\!c-4\,^3\!c-5\,^4\!c+1)>0$ and $1375\,^0\!c^4+50\,^0\!c^2\,(55\,^1\!c+23\,^2\!c+9^3\!\!c+3\,^4\!c-7) +^0\!\!c^2\,(2015\,^1\!\!c^2+225\,^2\!c^2+30\,^3\!\!c-45\,^3\!c^2-1 +5\,^2\!c\,(13\,^3\!c-2\,^4\!c-6)+10\,^4\!c-70\,^3\!c\,^4\!c-25\,^4\!c^2 +10\,^1\!c\,(157\,^2\!c+57\,^3\!c+18\,^4\!c-35))+ ^1\!\!c\,(80\,^1\!c^3+8\,^1\!c^2\,(15\,^2\!c+6\,^3\!c +2\,^4\!c-2)+^2\!\!c\,(9\,^2\!c^2+^3\!\!c\,(2\,^3\!c+^4\!\!c) +^2\!\!c\,(9\,^3\!c+3\,^4\!c-1\,))+\linebreak ^1\!c\,(57\,^2\!c^2+4\,^3\!c^2-^4\!\!c^2+4\,^2\!c\,(10\,^3\!c +3\,^4\!c-2)))+^0\!c\,(656\,^1\!c^3+2\,^1\!c^2\,(374\,^2\!c +140\,^3\!c+47\,^4\!c-64)+(2\,^2\!c-^3\!c)(9\,^2\!c^2+^3\!\!c\,(2\,^3\!c +^4\!\!c)+^2\!c\,(9\,^3\!c+3\,^4\!c-1))+^1\!c\,(231\,^2\!c^2 +^2\!c\,(123\,^3\!c +34\,^4\!c-36)+2\,(4\,^3\!c-4\,^3\!c^2+^4\!\!c-11\,^3\!c\,^4\!c -5\,^4\!c^2)))>0$ or \item[(2)] $^0\!c=0$, $D<0$, $\alpha(^1\!c-^2\!c)+^1\!\!cG(^0\!Z)>0$, $20\,^1\!c^2+^2\!\!c\,(3\,^2\!c+^3\!\!c)+^1\!\!c\,(15\,^2\!c +2\,^3\!c-^4\!\!c)>0$ and $80\,^1\!c^3+8\,^1\!c^2\,(15\,^2\!c+6\,^3\!c+2\,^4\!c-2) +^2\!\!c(9\,^2\!c^2+^3\!\!c\,(2\,^3\!c+^4\!\!c)+^2\!c\,(9\,^3\!\!c +3\,^4\!c-1))+ ^1\!\!c\,(57\,^2\!c^2+4\,^3\!c^2-^4\!\!c^2+4\,^2\!c\,(10\,^3\!c +3\,^4\!c-2))>0$, $G(^0\!Z)>0$, \end{itemize} then the point $\hat{P}$ is asymptotically stable. \end{proposition} \subsection*{Acknowledgment} This work was supported by the National Programme for Research for Functional Genomics in Norway (FUGE) in the Research Council of Norway and by the Norwegian Council of Universities' Committee for Development Research and Education (NUFU), grant no. PRO 06/02. \begin{thebibliography}{99} \bibitem{Ber} {L. M. Berezansky}; \emph{Development of N. V. Azbelev's W-method for stability problems for solutions of linear functional differential equations}, Differential Equations, v. 22 (1986), 521-529. \bibitem{Dom} {A. Domoshnitsky and Ya. Goltser}; \emph{One approach to study of stability of integro-differential equations}, Nonlinear analysis: Theory, Methods and Applications, v. 47 (2001), 3885-3896. \bibitem{deJong} {H. de Jong}; \emph{Modeling and simulation of genetic regulatory systems: a literature review}, J. Comp. Biol., v. 9 (2002), 67-104. \bibitem{Glass} {L. Glass and S. A. Kauffman }; \emph{The logical analysis of continuous, non-linear biochemical control networks}, J. Theor. Biol., v. 39 (1973), 103-129. \bibitem{Mc} {N. McDonald}; \emph{Time lags in biological models}, Lect. Notes in Biomathematics, Springer-Verlag, Berlin-Heidelberg-New York, 1978, 217\,p. \bibitem{Mestl-95} {T. Mestl, E. Plahte, and S. W. Omholt}; \emph{A mathematical framework for describing and analysing gene regulatory networks}, J. Theor. Biol., v. 176 (1995), 291-300. \bibitem{MPS} {J. J. Miguel, A. Ponosov, and A. Shindiapin A.}; \emph{The W-transform links delay and ordinary differential equations}, J. Func. Diff. Eq., v. 9 (4) (2002), 40-73. \bibitem{PlahteKjogl} {E. Plahte and S. Kj{\o}glum}; \emph{Analysis and generic properties of gene regulatory networks with graded response functions}, Physica D, v. 201, no. 1-2 (2005), 150-176. \bibitem{Plahte-94} {E. Plahte, T. Mestl, and S. W. Omholt}; \emph{Global analysis of steady points for systems of differential equations with sigmoid interactions}, Dynam. Stabil. Syst., v. 9, no. 4 (1994), 275-291. \bibitem{Plahte-98} {E. Plahte, T. Mestl, and S. W. Omholt}; \emph{A methodological basis for description and analysis of systems with complex switch-like interactions}, J. Math. Biol., v. 36 (1998), 321-348. \bibitem{PoSh} {A. Ponosov}; \emph{Gene regulatory networks and delay differential equations}. Electron. J. Diff. Eqns., Conference 12, 2005, pp. 117-141. \bibitem{PonShinNep} {A. Ponosov, A. Shindiapin A., Yu. Nepomnyashchikh}; \emph{Stability analysis of systems with switch-like interactions and distributed delays: a case study}, Func. Diff. Eqs. v. 13, no. 3-4 (2006), pp. 539-570. \bibitem{Thomas} {E. H. Snoussi, R. Thomas}; \emph{Logical identification of all steady states: the concept of feedback loop characteristic states}, Bull. Math. Biol., v. 55 (1993), 973-991. \end{thebibliography} \end{document}