\documentclass[reqno]{amsart} \usepackage{hyperref} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2007(2007), No. 68, pp. 1--23.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2007 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2007/68\hfil Maximum principles] {Maximum principles, sliding techniques and applications to nonlocal equations} \author[J. Coville\hfil EJDE-2007/68\hfilneg] {J\'er\^ome Coville} \address{ J\'er\^ome Coville \newline Laboratoire CEREMADE\\ Universit\'e Paris Dauphine\\ Place du Mar\'echal De Lattre De Tassigny\\ 75775 Paris Cedex 16, France} \curraddr{Centro de Modelamiento Matem\'atico\\ UMI 2807 CNRS-Universidad de Chile\\ Blanco Encalada 2120 - 7 Piso\\ Casilla 170 - Correo 3\\ Santiago, Chile} \email{coville@dim.uchile.cl} \thanks{Submitted August 7, 2006. Published May 10, 2007.} \thanks{Supported by the Ceremade Universit\'e Paris Dauphine and by the CMM-Universidad \hfill\break\indent de Chile on an Ecos-Conicyt project} \subjclass[2000]{35B50, 47G20, 35J60} \keywords{Nonlocal diffusion operators; maximum principles; sliding methods} \begin{abstract} This paper is devoted to the study of maximum principles holding for some nonlocal diffusion operators defined in (half-) bounded domains and its applications to obtain qualitative behaviors of solutions of some nonlinear problems. It is shown that, as in the classical case, the nonlocal diffusion considered satisfies a weak and a strong maximum principle. Uniqueness and monotonicity of solutions of nonlinear equations are therefore expected as in the classical case. It is first presented a simple proof of this qualitative behavior and the weak/strong maximum principle. An optimal condition to have a strong maximum for operator $\mathcal{M}[u] :=J\star u -u$ is also obtained. The proofs of the uniqueness and monotonicity essentially rely on the sliding method and the strong maximum principle. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{definition}[theorem]{Definition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{claim}[theorem]{Claim} \newtheorem{remark}[theorem]{Remark} \newtheorem{proposition}[theorem]{Poroposition} \section{Introduction and Main results} This article is devoted to maximum principles and sliding techniques to obtain the uniqueness and the monotone behavior of the positive solution of the following problem \begin{equation}\label{p1} \begin{gathered} J\star u -u -cu'+ f(u)=0 \quad \text{in } \Omega \\ u=u_0 \quad\text{in } \mathbb{R} \setminus \Omega \end{gathered} \end{equation} where $\Omega\subset \mathbb{R}$ is a domain, $J$ is a continuous non negative function such that $\int_{\mathbb{R}}J(z)\,dz=1$ and $f$ is a Lipschitz continuous function. Such problem arises in the study of so-called {\it Traveling Fronts} (solutions of the form $u(x,t)=\phi(x+ct)$) of the following nonlocal phase-transition problem \begin{equation} \frac{\partial u}{\partial t}- (J\star u - u) = f(u) \quad\text{in } \mathbb{R}\times\mathbb{R}^+. \label{moPT} \end{equation} The constant $c$ is called the speed of the front and is usually unknown. In such model, $J(x-y)dy$ represent the probability of an individual at the position $y$ to migrate to the position $x$, then the operator $J\star u-u$ can be viewed as a diffusion operator. This kind of equation was initially introduced in 1937 by Kolmogorov, Petrovskii and Piskunov \cite{KPP,F1} as a way to derive the Fisher equation (i.e \eqref{mord} below with $f(s)=s(1-s)$) \begin{equation} \frac{\partial U}{\partial t}= U_{xx} + f(U) \quad\text{for } (x,t) \in \mathbb{R}\times\mathbb{R}^+. \label{mord} \end{equation} In the literature, much attention has been drawn to reaction-diffusion equations like \eqref{mord}, as they have proved to give a robust and accurate description of a wide variety of phenomena, ranging from combustion to bacterial growth, nerve propagation or epidemiology. For more information, we point the interested reader to the following articles and reference therein: \cite{AW,BL,BLL,F1,Fi,GK,KPP,M,ZFK}. Equation \eqref{p1} can be seen as a nonlocal version of the well known semi-linear elliptic equation \begin{equation}\label{p2} \begin{gathered} u'' -cu'+ f(u)=0 \quad\text{in }\Omega,\\ u=u_0 \quad\text{in }\partial \Omega. \end{gathered} \end{equation} When $\Omega=(r,R)$, it is well known \cite{BN1,BN2,Ve} that the positive solution of \eqref{p2} is unique and monotone provided that $u_0(r)\neq u_0(R)$ are zeros of $f$. More precisely, assume that $u_0(r)=0$ and $u_0(R)=1$ are respectively a sub and a super-solution of \eqref{p2}, then \begin{theorem}[\cite{BN1,BN2,Ve}] \label{thm1.1} Any smooth solution $u$ of \begin{equation}\label{p3} \begin{gathered} u'' -cu'+ f(u)=0 \quad\text{in }(r,R)\\ u(r)=0,\quad u(R)=1. \end{gathered} \end{equation} is unique and monotone. \end{theorem} \begin{remark} \label{rmk1.2} \rm The above theorem holds as well, if you replace $0$ and $1$ by any constant sub and super-solution of \eqref{p3}. \end{remark} \begin{remark} \label{rmk1.3} \rm Obviously, by interchanging $0$ and $1$, $u$ will be a decreasing function. \end{remark} Since, \eqref{p1} shares many properties with \eqref{p2}, we expect to obtain similar result. Indeed, assume that $u_0(r)=0$ and $u_0(R)=1$ are respectively a sub and a super-solution of \eqref{p1}, then one has \begin{theorem} \label{pmmg.thmi} Let $\Omega=(r,R)$ for some real $r<00$ and $J(b)>0$ for some reals $a<0 0$ in $(0,1)$, $f'(0)>0$ and $f$ non-increasing near $1$. Assume that $J$ has compact support and even. Then any smooth solution $u$ of \begin{equation}\label{p8} \begin{gathered} J\star u -u -cu'+ f(u)=0 \quad\text{in }\mathbb{R}\\ u(-\infty)=0,\quad u(+\infty)=1, \end{gathered} \end{equation} is unique (up to translation) and monotone. \end{theorem} \begin{remark} \label{rmk1.4} \rm In both situations, bistable and monostable, the behavior of $u$ is governed by the assumption made on $f$ near the value $u(\pm \infty)$. \end{remark} \begin{remark} \label{rmk1.5} \rm All the above theorems stand if we replace $0$ and $1$ by any constant $\alpha$ and $\beta$ which are respectively a sub and super-solution of \eqref{p1}. \end{remark} \subsection{General comments} Equation \eqref{moPT} appears also in other contexts, in particular in Ising model and in some Lattice model involving discrete diffusion operator. I point the interested reader to the following references for deeper explanations \cite{BFRW,CG,M,Sch,W1}. A significantly part of this paper is devoted to maximum and comparison principles holding for \eqref{p3}, \eqref{p4} and some nonlinear operators. I obtain weak and strong maximum principle for those problems. These maximum principles are analogue of the classical maximum principles for elliptic problem that we find in \cite{GT,PW}. I have so far only investigate the one dimensional case. Maximum and comparison principles in multi-dimension for various type of nonlocal operators are currently under investigation and appears to be largely be an open question. As a first consequence of this investigation on maximum principles, I obtain a generalized version of Theorem \ref{pmmg.thmi}. More precisely, I prove the following result. \begin{theorem} \label{pmmg.thmig} Let $\Omega=(r,R)$ for some real $r<00$ we have $L[U_h](x)\le L[U](x +h)$ for all $x \in \mathbb{R}$. \item Let $v$ a positive constant then we have $L[v]\le 0$. \item If $u$ achieves a global minimum (resp.a global maximum) at some point $\xi$ then the following holds: \begin{itemize} \item Either $L[u](\xi)> 0$ (resp. $L[u](\xi)< 0$) \item Or $L[u](\xi)= 0 $ and $u$ is identically constant. \end{itemize} \end{enumerate} Such condition are easily verified by the operator $J\star u -u $ when $J$ is even. In this present note, I establish a necessary and sufficient condition on $J$ to have the above conditions. This may therefore generalized Theorem \ref{pmmg.bis} for a new class of kernel. \subsection{Methods and plan} The techniques used to prove Theorems \ref{pmmg.thmi} and \ref{pmmg.thmsi} are mainly based on an adaption to nonlocal situation of the sliding techniques introduced by Berestycki and Nirenberg \cite{BN1} to obtain the uniqueness and monotonicity of solutions of \eqref{p2}. These techniques crucially rely on maximum and comparison principles which hold for the considered operators. In the first two sections, I study some maximum principles and comparison principles satisfied by operators of the form: \begin{equation} \int_{\Omega}J(x-y)u(y)\,dy - u \end{equation} Then in the last two section, using sliding methods, I deal with the proof of Theorem \ref{pmmg.thmi} and \ref{pmmg.thmsi}. \section{Maximum principles}\label{pmmgslinear} In this section, I prove several maximum principles holding for integrodifferential operators defined respectively in bounded and unbounded domain. I have divided this section into two subsections each of them devoted to Maximum principles in bounded domains and unbounded domains. We start with some notation that we will constantly use along this paper. Let $\mathcal{L},\mathcal{S},\mathcal{M}$ be the following operators: \begin{align} &\mathcal{L}[u]:=\int_\Omega J(x-y)u(y)dy -u +c(x)u,\quad \text{when } \Omega=(r,R), \label{defL}\\ &\mathcal{S}[u]:=\int_\Omega J(x-y)u(y)dy -u,\quad \text{when } \Omega=(r,+\infty)\,\text{ or }\,\Omega=(-\infty,r), \label{defS}\\ &\mathcal{M}[u]:=\int_\mathbb{R} J(x-y)u(y)dy -u :=J\star u -u,\label{defM} \end{align} where $J\in C^0(\mathbb{R})\cap L^1(\mathbb{R})$ so that $\int_{\mathbb{R}}J=1$ and $c(x)\in C^0(\Omega)$ so that $c(x)\le0$. \subsection{ Maximum principles in bounded domains} Along this subsection, $\Omega$ will always refer to $\Omega=(r,R)$ for some $r\gamma^+$. By definition of $\tilde u$, we have $\tilde u(x_0)=\max_{\mathbb{R}}\tilde u>\gamma^+$. Therefore, at $x_0$, $\tilde u$ satisfies \begin{gather} \int_{\mathbb{R}}J(x_0-y)[\tilde u(y)-\tilde u(x_0)]\,dy\leq 0,\label{pmmgwmpmax2}\\ c(x_0)\tilde u(x_0)\leq 0.\label{pmmgwmpmax3} \end{gather} Combining now \eqref{pmmgwmpmax2}-\eqref{pmmgwmpmax3} with \eqref{pmmgwmpsous} we end up with $$ \begin{array}{lcccl} 0\leq&\underbrace{J\star\tilde u (x_0)-\tilde u(x_0)} & +&\underbrace{c(x_0)\tilde u(x_0)}&\leq0\\ & \leq 0 & & \leq 0 & \end{array} $$ Therefore, $$ J\star\tilde u (x_0)-\tilde u(x_0) =\int_{\mathbb{R}}J(x_0-y)[\tilde u(y)-\tilde u(x_0)]\,dy=0. $$ Hence, for all $y \in x_0 - \mathop{\rm supp}(J)$, $\tilde u(y)=\tilde u(x_0)$. In particular for all $y \in x_0 - [a,b]$, $\tilde u(y)=\tilde u(x_0)$ for some $a0$, following the above argumentation, we can prove that \begin{equation} \max_{\bar\Omega}\tilde u \le \max_{\mathbb{R}\setminus \Omega}\tilde u. \label{pmmgwmpmax4} \end{equation} Hence, $$ \max_{\partial \Omega}u\le \max_{\bar\Omega}u \le \max_{\mathbb{R}\setminus \Omega}\tilde u=\max_{\partial \Omega}u. $$ \end{proof} \begin{remark} \label{rmk2.1b} \rm Note that the weak maximum principle will also holds when $h_\alpha^-$ and $h_\beta^+$ are replace by any function $g^-$ and $ g^+$ satisfying $h^-_{\alpha}\geq g^-$ and $h^+_{\beta}\geq g^+$. \end{remark} \begin{remark} \label{rmk2.2} \rm When $c(x)\equiv 0$, the assumption $\max_{\bar \Omega}u\ge 0$ is not needed to have $$ \max_{\partial\Omega} u\le\max_{\bar\Omega}u\le \max_{\mathbb{R}\setminus \Omega} \tilde u=\max_{\partial\Omega} u. $$ Indeed it is needed to guaranties that \eqref{pmmgwmpmax3} holds. When $c(x)\equiv 0$, \eqref{pmmgwmpmax3} trivially holds. \end{remark} Next, we give a sufficient condition on $J$ and $\Omega$ such that $\mathcal{L}$ satisfies a strong maximum principle. Assume that $J$ satisfies the following conditions \begin{itemize} \item[(H1)] $\Omega\cap R^+\neq \emptyset$ and $\Omega\cap\mathbb{R}^-\neq\emptyset$ \item[(H2)] There exists $b>a\ge 0$ such that $[-b,-a]\cup[a,b]\subset \mathop{\rm supp}(J)\cap \Omega$ \end{itemize} Then we have the following strong maximum principle \begin{theorem}[Strong Maximum principle] \label{pmmgsmp} Let $u\in C^{0}(\bar \Omega) $ be such that \begin{gather*} \mathcal{L}[u]+h^-_\alpha +h^+_\beta \geq 0 \quad\text{in } \Omega \quad \text{(resp. $\mathcal{L}[u]+h^-_\alpha +h^+_\beta \leq 0$ in $\Omega$)},\\ u(r)\geq \alpha \quad \text{(resp. $u(r)\leq \alpha$)}, \\ u(R)\geq \beta \quad \text{(resp. $u(R)\leq \beta$)}. \end{gather*} Assume that $J$ satisfies (H1)--(H2) then $u$ may not achieve a non-negative maximum (resp. non-positive minimum) in $\Omega$ without being constant and $u(r)=u(R)$. \end{theorem} From these two maximum principles we obtain immediately the following practical corollary. \begin{corollary} \label{pmmgcp} Assume that $J$ satisfies (H1)--(H2). Let $u\in C^{0}(\bar \Omega)$ be such that \begin{gather*} \mathcal{L}[u]+h^-_\alpha+h^+_\beta\geq 0 \quad \text{in } \Omega,\\ u(r)=\alpha\leq 0, \\ u(R)=\beta\leq 0. \end{gather*} Then: Either $u < 0$, Or $u\equiv 0$. \end{corollary} \begin{remark} \label{rmk2.2b} \rm Similarly if $\mathcal{L}[u]\leq 0$, $u(r)=\alpha\geq 0$ and $u(R)=\beta\geq 0$ then $u$ is either positive or identically $0$. \end{remark} The proof of the corollary is a straightforward application of theses two theorems. Now let us prove the strong maximum principle. \begin{proof}[Proof of Theorem \ref{pmmgsmp}] The proof in the other cases being similar, I only treat the case of continuous function $u$ satisfying \begin{gather*} \mathcal{L}[u]+h^-_\alpha+h^+_\beta\geq 0 \text{ in } \Omega\\ u(r)\geq \alpha \\ u(R)\geq \beta. \end{gather*} Assume that $u$ achieves a non-negative maximum in $\Omega$ at $x_0$. Using the weak maximum principle yields \begin{gather*} u(x_0)=\max\{u(r),u(R)\},\\ \tilde u(x_0)=u(x_0)=\max_{\bar\Omega}u=\max_{\partial \Omega}u =\max_{\mathbb{R}\setminus\Omega}\tilde u, \end{gather*} where $\tilde u$ is defined by \eqref{pmmgwmpdtu}. Therefore, $\tilde u$ achieves a global non-negative maximum at $x_0$. To obtain $u\equiv u(x_0)$, we show that $\tilde u\equiv \tilde u(x_0)$. The later is obtained via a connexity argument. Let $\Gamma$ be the set $$ \Gamma =\{x\in\Omega|\tilde u (x)=\tilde u (x_0)\}. $$ We will show that it is a nonempty open and closed subset of $\Omega$ for the induced topology. Since $\tilde u$ is a continuous function then $\Gamma$ is a closed subset of $\Omega$. Let us now show that $\Gamma$ is a open subset of $ \Omega$. Let $x_1\in \Gamma$ then $\tilde u$ achieves a global non-negative maximum at $x_1$. Arguing as in the proof of the weak maximum principle, we get $$ J\star\tilde u (x_1)-\tilde u(x_1)=\int_{\mathbb{R}}J(x_1-y)[\tilde u(y) -\tilde u(x_1)]\,dy=0. $$ Since $\tilde u$ achieves a global maximum at $x_1$, we have for all $ y \in \mathbb{R}$, $\tilde u(y)-\tilde u(x_1)\leq 0$. Therefore, for all $ y \in x_1 + \mathop{\rm supp}(J)$, $\tilde u(y)=\tilde u(x_1)$. In particular for all $y \in x_1 + [-b,-a]\cup [a,b]$, $\tilde u(y)=\tilde u(x_1)$. We are lead to consider the following two cases: \begin{itemize} \item $x_1 + b \in \Omega$: In this case, we repeat the previous computation with $(x_1+b)$ instead of $x_1$ to get for all $y \in (x_1 +b) +\; [-b,-a]\cup[a,b]$, $\tilde u(y)=\tilde u(x_1)$. Now from the assumption on $J$ and $\Omega$, we have $x_1 +a\in \Omega$. Repeating the previous computation with $x_1+a$ instead of $x_1$, it follows that for all $y \in (x_1 +a)+ [-b,-a]\cup[a,b]$, $\tilde u(y)=\tilde u(x_1)$. Combining these two results, yields for all $y \in x_1 + [-b+a,b-a]$, $\tilde u(y)=\tilde u(x_1)$. \item $ x_1 + b \not\in \Omega$: In this case, using the assumption on $a$ and $b$, it easy to see that $x_1-b$ and $x_1-a$ are in $\Omega$. Using the above arguments, we end up with for all $y \in (x_1 -b) + [-b,-a]\cup[a,b]$, $\tilde u(y)=\tilde u(x_1)$ and for all $y \in (x_1 -a)+ [-b,-a]\cup[a,b]$, $\tilde u(y)=\tilde u(x_1)$. Again combining these two results yields to for all $y \in x_1 + [-b+a,b-a]$, $\tilde u(y)=\tilde u(x_1)$. \end{itemize} From both cases we have $\tilde u(y)=\tilde u(x_1)$ on $(x_1 + (-(b-a),(b+a)))\cap \Omega$, which implies that $\Gamma$ is an open subset of $\Omega$. \end{proof} \begin{remark} \label{rmk2.3} \rm Observe that the strong maximum principle relies on the possibility of ``covering'' $\Omega$ with closed sets. \end{remark} When $h^-_\alpha + h^+_\beta$ has a sign, we can improve the strong maximum principle. Indeed, in that case we have the following result. \begin{theorem} \label{pmmgsmp2} Let $u\in C^{0}(\bar \Omega) $ be such that \begin{equation} \mathcal{L}[u] \geq 0 \quad\text{in } \Omega\quad \text{(resp. $\mathcal{L}[u] \leq 0$ in $\Omega$)}. \end{equation} Assume that $J$ satisfies (H1)--(H2) then $u$ cannot achieve a non-negative maximum (resp. non-positive minimum) in $\Omega$ without being constant. \end{theorem} \begin{proof} The proof follows the lines of Theorem \ref{pmmgsmp}. Since $\int_{\mathbb{R}}J(z)\,dz=1$, we can rewrite $\mathcal{L}[u] $ the following way \begin{equation} \mathcal{L}[u]=\int_{r}^{R}J(x-y)[u(y)-u(x)]dy + \widetilde c(x)u, \end{equation} where $\widetilde c(x)= c(x)-h^-_1 -h^+_1\leq c(x)\leq 0$. Therefore, if $u$ achieves a non-negative maximum at $x_0$ in $\Omega$, then we have at this maximum $$ \begin{array}{lcccl} 0\leq&\underbrace{\int_{r}^{R}J(x_0-y)[u (y) - u(x_0)]\,dy} & +&\underbrace{\widetilde c(x_0)u(x_0)}&\leq0\\ & \leq 0 & & \leq 0 & \end{array} $$ and in particular \begin{equation} \int_{r}^{R}J(x_0-y)[u(y)-u(x_0)]\,dy=0. \end{equation} We now argue as in Theorem \ref{pmmgsmp} to obtain $u\equiv u(x_0)$. \end{proof} \subsection{Maximum principles in unbounded domains} In this subsection, I deal with maximum principles in unbounded domains. Along this section, $\Omega$ will refer to $(r,+\infty)$ or $(-\infty,r)$ for some $r\in\mathbb{R}$. We also assume that $supp(J)\cap \Omega\neq\emptyset$. Provided that $J$ satisfies the following condition. \begin{itemize} \item[(H3)] $\mathop{\rm supp}(J)\cap \mathbb{R}^+\neq \emptyset$ and $\mathop{\rm supp}(J)\cap \mathbb{R}^-\neq \emptyset$. \end{itemize} One can show that the strong maximum principles (Theorems \ref{pmmgsmp}) holds as well for operators $\mathcal{S}$ and $\mathcal{M}$. More precisely , let $\Omega:=(r,+\infty)$ or $(-\infty,r)$, we have the following result. \begin{theorem} \label{pmmgsmp3} Let $u\in C^{0}(\mathbb{R}) $ be such that $$ \mathcal{M}[u] \geq 0 \quad\text{in } \Omega \quad \text{(resp. $\mathcal{M}[u] \leq 0$ in $\Omega$)}. $$ Assume that $J$ satisfies (H3) then $u$ cannot achieve a global maximum (resp. global minimum) in $\Omega$ without being constant. \end{theorem} As a special case of Theorem \ref{pmmgsmp3}, we have the following theorem. \begin{theorem} \label{pmmgsmp5} Let $u\in C^{0}(\bar \Omega) $ be such that \begin{gather*} \mathcal{S}[u] + h_\alpha \geq 0 \quad\text{in } \Omega \quad \text{(resp. $\mathcal{S}[u] + h_\alpha \leq 0$ in $\Omega$)}\\ u(r)\geq \alpha \quad \text{(resp. $u(r)\leq \alpha$)}, \end{gather*} where $h_\alpha=\alpha\int_{\mathbb{R}\setminus\Omega}J(x-y)\,dy$. Assume that $J$ satisfies (H3) then $u$ cannot achieve a global maximum (resp. global minimum) in $\Omega$ without being constant. \end{theorem} Indeed, let us define $\tilde u$ by \begin{equation} \tilde u(x) := \begin{cases} u(x)& \text{in } \Omega\\ u(r) & \text{in } \mathbb{R}\setminus \Omega \end{cases}\label{tildu} \end{equation} and observe that in $\Omega$, $\tilde u$ satisfies $$ \mathcal{M}[\tilde u]=\mathcal{S}[u] +u(r)\int_{\mathbb{R}\setminus\Omega}J(x-y)\,dy. $$ Hence $$ \mathcal{M}[\tilde u] \geq \Big( u(r)\int_{\mathbb{R}\setminus\Omega}J(x-y)\,dy-h_{\alpha}\Big) \ge 0 $$ (resp. $$ \mathcal{M}[\tilde u] \leq \Big( u(r)\int_{\mathbb{R}\setminus\Omega}J(x-y)\,dy-h_{\alpha}\Big) \le 0). $$ From Theorem \ref{pmmgsmp3}, $\tilde u$ cannot achieve a global maximum (resp. global minimum) in $\Omega$ without being constant. Using the definition of $\tilde u$, we easily get that $u$ cannot achieves a global maximum (resp. global minimum) in $\Omega$ without being constant. When $\Omega=\mathbb{R}$, the following statement holds. \begin{theorem} \label{pmmgsmp4} Let $u\in C^{0}(\mathbb{R}) $ be such that $$ \mathcal{M}[u] \geq 0 \quad \text{in } \mathbb{R}\quad \text{(resp. $\mathcal{M}[u] \leq 0$ in $\mathbb{R}$)}. $$ Assume that $J$ satisfies (H3) then $u$ cannot achieve a non-negative maximum (resp. non-positive minimum) in $\mathbb{R}$ without being constant. \end{theorem} In fact, (H3) is optimal to obtain a strong maximum principle for $\mathcal{M}$. Indeed, we have the following result. \begin{theorem} \label{cns} Let $J\in C^0(\mathbb{R})$, then $\mathcal{M}$ satisfies the strong maximum principle (i.e. Theorem \ref{pmmgsmp4}) if and only if (H3) is satisfied. \end{theorem} \begin{proof}[Proof of Theorem \ref{pmmgsmp3}] The argumentation being similar in the other cases, I only deal with $\Omega:=(r,+\infty)$. Assume that $u$ achieves a global maximum in $\Omega$ at some point $x_0$. At $x_0$, we have $$ 0\le \mathcal{M}[u](x_0) \le 0. $$ Hence, $ u(y)= u(x_0)$ for all $y\in x_0-supp(J)$. Using (H3), we have in particular, \begin{equation} \label{recov0} u(y)= u(x_0) \quad\text{for all} y\in \Big(x_0-[-d,-c]\cup[a,b]\Big)\cap \Omega, \end{equation} for some positive reals $a,b,c,d$. We proceed now in two step. First, we show that there exists $r_0$ such that $ u= u(x_0)$ in $[r_0,+\infty)$. Then, we show that $ u\equiv u(x_0)$ in $\bar \Omega$. \subsection*{Step 1} Since $x_0 \in \Omega$ then $x_0 + [c,d] \subset \Omega$ and $u(y)=u(x_0)$ for all $y\in x_0 +[c,d]$. We can repeat this argument with $x_0+c$ and $x_0+d$ to obtain $u(y)=u(x_0)$ for all $y\in x_0 +[nc,nd]$ with $n\in\{0,1,2\}$. By induction, we easily see that \begin{equation} u(y)=u(x_0) \quad \text{for all } y\in \cup_{n\in \mathbb{N}} \Big( x_0 +[nc,nd]\Big). \label{recovn} \end{equation} Choose $n_0$ so that $1r_0$. Such $p$ exists since $b>0$. From Step1, we have $u(x+pb)=u(x_0)$. Repeating the previous argumentation yields to $$ u(y)= u(x_0) \quad\text{for all } y\in \big(x+pb - [-d,-c]\cup[a,b]\big)\cap\Omega. $$ In particular, $ u(x+(p-1)b)= u(x_0)$. Using induction, we easily get that $ u(x)= u(x_0)$, thus $$ u(x)\equiv u(x_0)\quad \text{in } \bar \Omega. $$ \end{proof} Observe that up to minor change the previous argumentation holds as well to show Theorem \ref{pmmgsmp4}. Let us now show Theorem \ref{cns}. For sake of simplicity, we expose an alternative proof of Theorem \ref{pmmgsmp4} suggested by Pascal Autissier. \begin{proof}[Proof of Theorems \ref{pmmgsmp4} and \ref{cns}]\quad \subsection*{Necessary Condition} If this condition fails, then $\mathop{\rm supp}(J)\subset \mathbb{R}^-$ or $\mathop{\rm supp}(J)\subset \mathbb{R}^+$. Assume first that $\mathop{\rm supp}(J)\subset \mathbb{R}^-$. Let $u$ be a non-decreasing function which is constant in $\mathbb{R}^+$. Then a simple computation shows that $\mathcal{M}[u]:=J\star u -u \geq 0$. Hence, $\mathcal{M}[u]\geq 0$ and $u$ achieves a global maximum without being constant. Hence $\mathcal{M}$ does not satisfy the strong maximum principle. If $\mathop{\rm supp}(J)\subset \mathbb{R}^+$, a similar argument holds. By taking $v$ a non-increasing function which is constant in $\mathbb{R}^-$, we obtain $\mathcal{M}[v]\geq 0$. Hence, $\mathcal{M}[v]\geq 0$ and $v$ achieves a global maximum without being constant. This end the proof of the necessary condition. \subsection*{Sufficient Condition} Since $J$ is continuous, from (H3), there exists positive reals $a,b,c,d$ such that $[-c,-d]\cup[a,b]\subset supp(J)$. Assume that $\mathcal{M}[u]\geq 0$ and $\tilde u$ achieves a global maximum at some point $x_0$. Let $\Gamma$ be the following set $$ \Gamma=\{y\in\mathbb{R}|u(y)=u(x_0)\}. $$ Since $u$ is continuous, $\Gamma$ is a nonempty closed subset of $\mathbb{R}$. From $\mathcal{M}[u](x_0)\geq 0, J\geq 0$ and for all $y \in \mathbb{R}$ $u(y)-u(x_0)\leq 0$, at $x_0$, $u$ satisfies $$ \mathcal{M}[u](x_0)=\int_{\mathbb{R}}J(x_0-y)[u(y)-u(x_0)]\,dy=0. $$ Hence, $(x_0 - [-c,-d]\cup[a,b]) \subset \Gamma$. Let choose $-C \in [-c,-d]$ and $A\in [a,b]$ such that $ \frac{A}{C} \in \mathbb{R}\setminus \mathbb{Q}$. This is always possible since $[-c,-d]$ and $[a,b]$ have nonempty interiors. Therefore $x_0 - C \in \Gamma$ and $x_0 - A \in \Gamma$. Now repeating this argument at $x_0+C$, $x_0-A$, leads to $(x_0 - C - [-c,-d]\cup[a,b]) \subset \Gamma$ and $(x_0 - A - [-c,-d]\cup[a,b]) \subset \Gamma$. Thus, $$ \{x_0+pC-qA|(p,q)\in\{0,1,2\}^2\}\subset \Gamma. $$ By induction, we then have $$ \{x_0+pC-qA|(p,q)\in\mathbb{N}^2\}\subset \Gamma. $$ Since $ \frac{A}{C} \in \mathbb{R}\setminus \mathbb{Q}$, $\{x_0+pC-qA|(p,q)\in\mathbb{N}^2\}$ is a dense partition of $\mathbb{R}$. Hence, $\Gamma=\mathbb{R}$ since it is closed and contains a dense partition of $\mathbb{R}$. \end{proof} \subsection{Some remarks and general comments} We can easily extend all the above augmentations to operators of the form $$ \mathcal{L} + \mathcal{E},\quad \mathcal{S} +\mathcal{E},\quad \mathcal{M} +\mathcal{E} $$ where $\mathcal{E}$ is any elliptic operator, which can be degenerate. Thus $\mathcal{L} + \mathcal{E}$, $\mathcal{S} +\mathcal{E}$, $\mathcal{M} +\mathcal{E} $ verify also maximum principles. \begin{remark} \label{rmk2.4} \rm In such case, the regularity required for $u$ has to be adjusted with the considered operator. \end{remark} The maximum principles can be also obtain for nonlinear operators of the form $$ \mathcal{L}[g(\cdot)],\quad \mathcal{S}[g(\cdot)],\quad \mathcal{M}[g(\cdot)], $$ where $g$ is a smooth increasing function. In that case, we simply use the fact that $g[u(y)]-g[u(x)]=0$ implies $u(y)=u(x)$. For example, assume that $$ \mathcal{L}[g(u)]\geq 0 \quad \text{in } \Omega $$ If $u$ achieves a global non-negative maximum at $x_0$ then $u$ satisfies $$ \begin{array}{lccl} 0\leq& \!\underbrace{\int_{r}^{R}J(x_0-y)\big(g[u(y)] - g[u(x_0)]\big)\,dy} + &\!\underbrace{g[u(x_0)](h^-_{1}(x_0) +h^+_{1}(x_0)-1)}\leq 0\\ & \leq 0 & \leq 0 \end{array} $$ Hence, $g[u(y)]-g[u(x_0)]=0$ for $y \in (x_0 -\mathop{\rm supp} J)\cap \Omega$. Using the strict monotonicity of $g$, we achieve $u(y)=u(x_0)$ for $y \in (x_0 -Supp J)\cap \Omega$. Then, we are reduce to the linear case. \begin{remark} \label{rmk2.5} \rm Nonlinear operator $\mathcal{M}[g(\cdot)]$ appears naturally in models of propagation of information in a Neural Networks see \cite{EMc,M}. \end{remark} \begin{remark} \label{rmk2.6} \rm When $g$ is decreasing, the nonlinear operators $\mathcal{L}[g(\cdot)]$, $\mathcal{S}[g(\cdot)]$, and $\mathcal{M}[g(\cdot)]$ satisfy some strong maximum principle. For example, assume that $$ \mathcal{L}[g(u)]\geq 0 \quad \text{in } \Omega. $$ Then $u$ cannot achieve a non-positive global minimum without being constant. Note that in this case, it is a global minimum rather than a global maximum which is required. \end{remark} Recently, Cortazar et al. \cite{CER}, introduce another type of nonlinear diffusion operator, $$ \mathcal{R}[u]:= \int_{\mathbb{R}}J\Big(\frac{x-y}{u(y)}\Big)\,dy -u. $$ Assuming that $J$ is increasing in $\mathbb{R}^-\cap supp(J)$ and decreasing in $\mathbb{R}^+\cap supp(J)$, they prove that $\partial_{t}-\mathcal{R} $ satisfies a parabolic comparison principle. One can show that $\mathcal{R}[g(\cdot)]$ satisfies also a strong maximum principle provided that $g$ is a positive increasing function. Indeed, assume that $$ \mathcal{R}[g(u)]\geq 0 \quad \text{in } \mathbb{R} $$ If $u$ achieves a global positive maximum at $x_0$ then \begin{gather*} \frac{x_0-y}{g(u(y))}>\frac{x_0-y}{g(u(x_0))} \quad\text{when } x_0-y>0 \\ \frac{x_0-y}{g(u(y))}<\frac{x_0-y}{g(u(x_0))} \quad\text{when } x_0-y<0 \end{gather*} Using the assumption made on $J$, we have for every $y\in \mathbb{R}$, $$ \Big[J\Big(\frac{x_0-y}{g(u(y))}\Big)-J\Big(\frac{x_0-y}{g(u(x_0))}\Big)\Big] \le 0. $$ Therefore $u$ satisfies $$ 0\le\int_{-\infty}^{+\infty}\Big[J\Big(\frac{x_0-y}{g(u(y))}\Big) -J\Big(\frac{x_0-y}{g(u(x_0))}\Big)\Big]\,dy\leq 0 $$ Hence, $g[u(y)]-g[u(x_0)]=0$ for $y \in x_0 -Supp J$. Using the strict monotonicity of $g$, we achieve $u(y)=u(x_0)$ for $y \in x_0 -Supp J$. Then, we are reduce to the linear case. These density dependant operator can be viewed as a nonlocal version of the classical porous media operator. A consequence of the proofs of the strong maximum principle, is the characterization of global extremum of $u$. Namely, we can derive the following property. \begin{lemma} \label{pmmgMProp} Assume $J$ satisfies (H3). Let $u$ be a smooth ($C^0$) function. If $u$ achieves a global minimum (resp.a global maximum) at some point $\xi$ then the following holds: \begin{itemize} \item Either $\mathcal{M}[u](\xi)> 0 \ \ (resp.\ \ \mathcal{M}[u](\xi)< 0)$ \item Or $\mathcal{M}[u] (\xi)= 0 $ and $u$ is identically constant. \end{itemize} \end{lemma} \begin{remark} \label{rmk2.7} \rm An easy adaptation of the proof shows that Lemma \ref{pmmgMProp} stands for $u$ continuous by parts and with a finite number of discontinuities. \end{remark} \begin{remark} \label{rmk2.8} \rm Lemma \ref{pmmgMProp} holds as well for $\mathcal{M}+\mathcal{E}$, $\mathcal{L}$, $\mathcal{L}+\mathcal{E}$, $\mathcal{S}$, $\mathcal{S}+\mathcal{E}$ and $\mathcal{R}$, provided that the considered operator satisfies a strong maximum principle. \end{remark} \section{Comparison principles}\label{pmmgscp} In this section I deal with Comparison principles satisfied by operators $\mathcal{L}$, $\mathcal{S}$ and $\mathcal{M}$. This property comes often as a corollary of a maximum principle. Here we present two comparison principles which are not a direct application of the maximum principle. The first is a linear comparison principle, the second concerns a nonlinear comparison principle satisfied by $\mathcal{S}$. This section is divided into two subsections, each one devoted to a comparison principle. \subsection{Linear Comparison principle} \begin{theorem}[Linear Comparison Principle] \label{pmmgcompl} Let $u$ and $v$ be two smooth functions ($C^{0}(\mathbb{R})$) and $\omega$ a connected subset of $\mathbb{R}$. Assume that $u$ and $v$ satisfy the following conditions : \begin{itemize} \item $\mathcal{M}[v] \geq 0 $ in $\omega \subset \mathbb{R}$ \item $\mathcal{M}[u] \leq 0 $ in $\omega \subset \mathbb{R}$ \item $u\geq v $ in $\mathbb{R} - \omega $ \item if $\omega$ is an unbounded domain, assume also that $\lim_{ \infty} u-v \geq 0$. \end{itemize} Then $u\geq v $ in $\mathbb{R}$. \end{theorem} \begin{proof} Let first assume, that $\omega$ is bounded. Let $w=u-v$, so $w$ will satisfy: \begin{itemize} \item $w\geq 0$, $w\not \equiv 0$ in $\mathbb{R} -\omega$, \item $\mathcal{M}[w] \leq 0$ in $ \omega .$ \end{itemize} Let us define the quantity $\gamma:=\inf_{\mathbb{R}\setminus \omega}w$. Now, we argue by contradiction. Assume that $w$ achieves a negative minimum at $x_{0}$. By assumption $x_{0}\in \omega $ and is a global minimum of $w$. So, at this point, $w$ satisfies $$ 0\geq \mathcal{M}[w(x_{0})]= (J\star w -w)(x_{0})=\int_{\mathbb{R}}J(x_{0}-z) (w(z)-w(x_{0}))dz\le 0. $$ It follows that $w(y)= w(x_{0})$ on $y-\mathop{\rm supp}(J)$. Hence, for some reals $a,b$, we have the following alternative: \begin{itemize} \item Either $(\mathbb{R}\setminus\omega) \cap (x_0 - [a,b])\neq \emptyset $ and then we have a contradiction since there exits $y\in \mathbb{R}$ such that $0\leq \gamma\leq w(x_0)=w(y)<0$. \item Or $(\mathbb{R}\setminus\omega) \cap \left(x_0 - [a,b] \right)= \emptyset $ and then $\left(x_0 - [a,b] \right)\subset\subset \omega$. \end{itemize} In the later case, arguing as for the proof of Theorem \ref{pmmgwmp}, we can repeat the previous computation at the points $x_0-b$ and $x-a$ and using induction we achieve, \begin{gather*} (\mathbb{R}\setminus\omega) \cap \left(x_0 - [na,nb] \right)\neq \emptyset, \\ \forall \; y \in x_0 - [na,nb],\quad w(y)=w(x_0), \end{gather*} for some positive $n\in \mathbb{N}$. Thus $0\leq \gamma\leq w(x_0)<0$, which is a contradiction. In the case of $\omega$ unbounded, by assumption $\lim_{x\to \infty}w\geq 0$, then there exists a compact subset $\omega_1$ such that $x_0\in \omega_1$ and $w(x_0)<\inf_{\mathbb{R}\setminus \omega_1}w$. Then the above argument holds with $\mathbb{R}\setminus \omega_1$ instead of $\mathbb{R}\setminus \omega$. \end{proof} \subsection{Nonlinear Comparison Principle} In this subsection, I obtain the following nonlinear comparison principles. \begin{theorem}[Nonlinear comparison principle] \label{nlcp} Assume that $\mathcal{M}$ defined by \ref{defM} verifies (H3), $\Omega=(r,+\infty)$ for some $r\in\mathbb{R}$ and $f\in C^{1}(\mathbb{R})$, satisfies, $f'_{|(\beta, +\infty) }<0$. Let $z$ and $v$ smooth ($C^0(\mathbb{R})$) functions satisfying, \begin{gather} \mathcal{M}[z] + f(z)\geq 0 \quad \text{in } \Omega,\label{nlcpsisub}\\ \mathcal{M}[v] + f(v)\leq 0 \quad \text{in } \Omega,\label{nlcpsisup}\\ \lim_{x\to +\infty} z(x)\leq \beta, \quad \lim_{x\to +\infty} v(x)\geq \beta,\label{nlcpsibc0} \\ z(x)\leq \alpha,\quad v(x)\geq \alpha \quad \text{when } x\leq r\label{nlcpsibc1}. \end{gather} If in $[r,+\infty)$, $z<\beta$ and $v> \alpha$, then there exists $\tau\in \mathbb{R}$ such that $z\leq v_\tau$ in $\mathbb{R}$. Moreover, either $z0$ be such that $f'(s)<0$ for $s\ge \beta -\epsilon$. Choose $\delta\le \frac{\epsilon}{4}$ positive, such that \begin{equation} f'(p)<-2\delta \quad \forall p \quad \text{such that } \beta-p <\delta. \label{pmmg.si1} \end{equation} If $\lim_{x \to +\infty}z(x)=\beta$, choose $ M>0$ such that: \begin{gather} \beta-v(x) <\frac {\delta}{2}\ \ \forall x >M, \label{pmmg.si2}\\ \beta-z(x) <\frac {\delta}{2}\ \ \forall x >M. \label{pmmg.si3} \end{gather} Otherwise, we choose $M$ such that \begin{equation} \label{pmmg.siM} v(x)>z(x)\quad \forall x>M. \end{equation} The proof of this theorem follows ideas developed by the author in \cite{Co1} for convolution operators. It essentially relies, on the following technical lemma which will be proved later on. \begin{lemma} \label{pmmg.silemslid} Let $z$ and $v$ be respectively smooth positive sub and supersolution satisfying \eqref{nlcpsisub}-\eqref{nlcpsibc1}. If there exists positive constant $a\leq \frac{\delta}{2}$ and $b$ such that $z$ and $v$ satisfy: \begin{gather} v(x+b)>z(x)\quad \forall x \in [r,M+1] \label{pmmg.sib1},\\ v(x+b)+a>z(x)\quad \forall x \in \Omega \label{pmmg.sib2}. \end{gather} Then we have $v(x+b)\geq z(x)\;\forall x \in \mathbb{R}$. \end{lemma} \begin{proof}[Proof of Theorem \ref{nlcp}] Observe, that if $\inf_{\mathbb{R}}v\geq \sup_{\mathbb{R}}z$ then $v\geq z$ trivially holds. In the sequel, we assume that $\inf_{\mathbb{R}} v < \max_{\mathbb{R}} z$. Assume for a moment that Lemma \ref{pmmg.silemslid} holds. To prove Theorem \ref{nlcp}, by construction of $M$ and $\delta$, we just have to find an appropriate constant $b$ which satisfies \eqref{pmmg.sib1} and \eqref{pmmg.sib2} and showing that either $v_\tau>z$ in $\Omega$ or $z\equiv v_\tau$ in $\Omega$. Since $v$ and $z$ satisfy in $[r,+\infty)$: $z<\beta$ and $v> \alpha$, using \eqref{nlcpsibc0}-\eqref{nlcpsibc1} we can find a constant $D$ such that on the compact set $[r,M+1]$, we have for every $b\geq D$ $$ v(x+b)>z(x)\quad \forall x \in [r,M+1]. $$ Now, we claim that there exists $b\geq D$ such that $v(x+b)+\frac{\delta}{2}>z(x)$ for all $x \in \mathbb{R} $. If not, then we have \begin{equation} \label{pmmg.sineg} \text{for all $b\geq D$ there exists $x(b)$ such that $v(x(b)+b)+\frac{\delta}{2}\leq z(x(b))$}. \end{equation} Since $v\geq \alpha$ and $v$ satisfies \eqref{nlcpsibc1} we have \begin{equation} \label{pmmg.si6} v(x+b)+\frac{\delta}{2}> z(x) \quad \text{for all $b>0$ and $ x \leq r$}. \end{equation} Take now a sequence $(b_n)_{n\in\mathbb{N}}$ which tends to $+\infty$. Let $x(b_n)$ be the point defined by \eqref{pmmg.sineg}. Thus we have for that sequence \begin{equation} \label{pmmg.si7} v(x(b_n)+b_n)+\frac{\delta}{2}\leq z(x(b_n)). \end{equation} According to \eqref{pmmg.si6} we have $x(b_n)\geq M+1$. Therefore, the sequence $x(b_n)+b_n$ converges to $+\infty$. Passing to the limit in \eqref{pmmg.si7} to get \[ \beta+\frac{\delta}{2}\leq \lim_{n\to +\infty} v(x(b_n)+b_n) +\frac{\delta}{2}\leq \limsup_{n\to +\infty} z(x(b_n)) \leq \beta, \] which is a contradiction. Therefore there exists a $b>D$ such that $$ v(x+b)+\frac{\delta}{2}>z(x)\quad \forall x \in \Omega. $$ Since we have found our appropriate constants $a=\frac{\delta}{2}$ and $b$, we can apply Lemma \ref{pmmg.silemslid} to obtain $$ v(x+\tau )\geq z(x)\quad \forall x \in \mathbb{R}, $$ with $\tau=b$. It remains to prove that in $\Omega$ either $v_\tau> z$ or $u_\tau \equiv v$. We argue as follows. Let $w:=v_\tau - z $, then either $w>0$ in $\Omega$ or $w$ achieves a non-negative minimum at some point $x_0 \in \Omega$. If such $x_0$ exists then at this point we have $w(x)\geq w(x_0)=0$ and \begin{equation} 0\leq \mathcal{M}[w(x_0)] \leq f(z(x_0))-f(v(x_0+\tau))=f(z(x_0))-f(z(x_0))= 0. \end{equation} Then using the argumentation in the proof of Theorem \ref{pmmgsmp3}, we obtain $w\equiv 0$ in $\bar \Omega$, which means $v_\tau \equiv z$ in $\bar \Omega$. This ends the proof of Theorem \ref{nlcp}. \end{proof} Let now turn our attention to the proof of the technical Lemma \ref{pmmg.silemslid}. \begin{proof}[Proof of Lemma \ref{pmmg.silemslid}] Let $v$ and $z$ be respectively a super and a subsolution of \eqref{nlcpsisub}-\eqref{nlcpsibc1} satisfying \eqref{pmmg.si2} and \eqref{pmmg.si3} or \eqref{pmmg.siM}. Let $a>0$ be such that \begin{equation} \label{pmmg.sia1} v(x+b)+a>z(x)\quad \forall x \in \Omega. \end{equation} Note that for $b$ defined by \eqref{pmmg.sib1} and \eqref{pmmg.sib2}, any $a \geq \frac{\delta}{2}$ satisfies \eqref{pmmg.sia1}. Define \begin{equation} \label{pmmg.sia2} a^*=\inf\{ a>0 : v(x+b)+a>z(x)\;\forall x \in \Omega\}. \end{equation} We claim that \begin{claim} $a^*=0$.\label{pmmg.sicla1} \end{claim} Observe that Claim \ref{pmmg.sicla1} implies that $v(x+b)\geq z(x)$ for all $x \in \Omega$, which is the desired conclusion. \end{proof} \begin{proof}[Proof of claim \ref{pmmg.sicla1}] We argue by contradiction. If $a^*>0$, since $\lim_{x\to + \infty}v(x+b)+a^* -z(x)\geq a^*>0$ and $v(x+b) -z(x)+a^*\geq a^*>0$ for $x\leq r$, there exists $x_0 \in \Omega$ such that $v(x_0+b)+a^*=z(x_0)$. Let $w(x):= v(x+b)+a^*-z(x)$, then \begin{equation} \label{pmmg.simin} 0=w(x_{0})=\min_{\mathbb{R}} w(x). \end{equation} Observe that $w$ also satisfies the following equations: \begin{gather} \mathcal{M}[w] \leq f(z(x))-f(v(x+b)) \label{pmmg.sieqp}\\ w(+ \infty )\geq a^* \label{pmmg.sicl1} \\ w(x)\geq a^* \quad\text{for } \ \ x\leq r . \label{pmmg.sicl2} \end{gather} By assumption, $v(x+b)>z(x)$ in $(-\infty,M+1]$. Hence $x_0>M+1$. Let us define \begin{equation} Q(x):=f(z(x))-f(v(x+b)). \label{pmmg.siq} \end{equation} Computing $Q(x)$ at $x_0$, it follows \begin{equation} \label{pmmg.siq+} Q(x_0)=f(v(x_0+b)+a^*)-f(v(x_0+b))\leq 0, \end{equation} since $x_0>M+1$ $f$ is non-increasing for $s\geq \beta- \epsilon$, $a^*>0$ and $\beta-\epsilon< \beta- \frac{\delta}{2}\leq v$ for $x>M$. Combining \eqref{pmmg.sieqp},\eqref{pmmg.simin} and \eqref{pmmg.siq+} yields $$ 0\le\mathcal{M}[w(x_0)]\leq Q(x_0)\leq 0. $$ Following the argumentation of Theorem \ref{pmmgsmp3}, we end up with $w=0$ in $\Omega$ which contradicts \eqref{pmmg.sicl1}. Hence $a^*=0$, which ends the proof of Claim \ref{pmmg.sicla1}. \end{proof} \begin{remark} \label{rmk3.1} \rm The previous analysis only holds for linear operators. It fails for operators such as $\mathcal{M}[g(\cdot)]$ or $\mathcal{R}$. \end{remark} \begin{remark} \label{pmmg.rem.nlcp} \rm The regularity assumption on $f$ can be improved. Indeed, the above proof holds as well with $f$ continuous and non-increasing in $(\beta-\epsilon,+\infty)$ for some positive $\epsilon$. \end{remark} \section{Sliding techniques and applications}\label{pmmgssta} In this section, using sliding techniques, I prove uniqueness and monotonicity of positive solution of the following problem: \begin{gather} \int_{r}^{R}J(x-y)g(u(y))\,dy+f(u)+t^-_\alpha +t^+_\beta =0 \quad \text{in } \Omega\label{pmmgopern}\\ u(r)= \alpha \label{pmmgopernbc-} \\ u(R)= \beta, \label{pmmgopernbc+} \end{gather} where $t^-_\alpha=g(\alpha)\int^{r}_{-\infty}J(x-y)dy$, $t^+_\beta=g(\beta)\int^{\infty}_{R}J(x-y)dy$, $g$ is an increasing function. We also assume that $f$ is continuous functions and that $J$ satisfies (H1)--(H2). More precisely, I prove the following result. \begin{theorem}\label{opn.thm} Let $\alpha < \beta$. Assume that $f\in C^0$. Then any solution $u$ of \eqref{pmmgopern}-\eqref{pmmgopernbc+}, satisfying $\alpha\beta$, then any solution $u$ of \eqref{pmmgopern}-\eqref{pmmgopernbc+}, satisfying $\beta\tilde u \text{ in } \Omega\} \end{equation} Observe that $\tau^*$ is well defined since for any $\tau > R-r$, by assumption and the definition of $\tilde u$, we have $\tilde u\le \tilde u_\tau$ in $\mathbb{R}$ and $\tilde u<\tilde u_\tau$ in $\Omega$. Hence $\tau^*\le R-r$. We now show that $\tau^*=0$. Observe that by proving the claim below we obtain the monotonicity of the solution $u$. \begin{claim} $\tau^* =0$ \end{claim} \begin{proof}[Proof of the claim] We argue by contradiction. Assume that $\tau^*>0$, then since $\tilde u$ is a continuous function, we will have $\tilde u \le \tilde u_{\tau^*}$ in $\mathbb{R}$. Let $w:=\tilde u_{\tau^*}-\tilde u$. From the definition of $\tau^*$ and the continuity of $\tilde u$, $w$ must achieve a non positive minimum at some point $x_0$ in $\Omega$. Namely, since $w\ge 0$, we have $w(x_0)=0$. We are now lead to consider the following two cases: \begin{itemize} \item Either $x_0 \in [R-\tau^*,R)$ \item Or $x_0 \in (r,R-\tau^*)$ \end{itemize} We will see that in both case we end up with a contradiction. First assume that $x_0 \in [R-\tau^*,R)$. Since $\tau^*>0$, using the definition of $\tilde u$ we have $\tilde u_{\tau^*}\equiv\beta$ in $[R-\tau^*,R)$. We therefore get a contradiction since $0=w(x_0)=\beta-\tilde u(x_0)>0$. In the other case, $w$ achieves its minimum in $(r,R-\tau^*)$. Now, using \eqref{pmmgeqn} and \eqref{pmmgeqntau}, at $x_0$, we have \begin{equation} \mathcal{N}{\tilde u_{\tau^*}}-\mathcal{N}[\tilde u] =\int_{-\infty}^{+\infty}J(x_0-y)[g(\tilde u_{\tau^*}(y)) -g(\tilde u(y))]\,dy = 0 \end{equation} Since, $g$ is increasing and $\tilde u_{\tau^*}\ge \tilde u$, it follows that $g(\tilde u_{\tau^*}(y))-g(\tilde u(y))=0$ for all $y \in x_0 -Supp(J)$. Using the monotone increasing property of $g$ yields to $w(y)=\tilde u_{\tau^*}(y)-\tilde u(y)=0$ for all $y \in x_0 -Supp(J)$. Arguing now as in Theorem \ref{pmmgsmp}, we end up with $w\equiv 0$ in all $[r,R-\tau^*]$. Hence, $0=w(r)=\tilde u(r+\tau^*)-\alpha>0$ since $\tau^*>0$, which is our desired contradiction. Thus $\tau^* =0$, which ends the proof of the claim and the proof of the monotonicity of $\tilde u$. \end{proof} \subsection*{Uniqueness} We now prove that problem \eqref{pmmgopern}-\eqref{pmmgopernbc+} has a unique solution. Let $u$ and $v$ be two solution of \eqref{pmmgopern}-\eqref{pmmgopernbc+}. From the previous subsection without loss of generality, we can assume that $u$ and $v$ are monotone increasing in $\Omega$ and we can extend by continuity $u$ and $v$ in all $\mathbb{R}$ by $\tilde u$ and $\widetilde v$. We prove that $\tilde u\equiv \widetilde v$ in $\mathbb{R}$, this give us $u\equiv v$ in $\Omega$. As in the above subsection, we use sliding method to prove it. Let us define \begin{equation} \label{pmmgumin} \tau^{**}=\inf \{\tau\ge 0 : \widetilde v_{\tau}>\tilde u \text{ in } \Omega\} \end{equation} Observe that $\tau^{**}$ is well defined since for any $\tau > R-r$, by assumption and the definition of $\tilde u$, we have $\tilde u\le \widetilde v_\tau$ in $\mathbb{R}$ and $\tilde u<\widetilde v_\tau$ in $\Omega$. Therefore $\tau^{**}\le R-r$. Following now the argumentation of the above subsection with $\widetilde v_{\tau^{**}}$ instead of $u_{\tau^*}$, it follows that $\tau^{**}=0$. Hence, $\widetilde v \ge \tilde u$. Since $u$ and $v$ are solution of \eqref{pmmgopern}-\eqref{pmmgopernbc+}, the same analysis holds with $\tilde u$ replace by $\widetilde v$. Thus, $\widetilde v \le \tilde u$ which yields to $\tilde u\equiv \widetilde v$. \end{proof} \begin{remark} \label{rmk4.1} \rm Theorem \ref{opn.thm} holds for the operator $\mathcal{R}$ introduced by Cortazar et al. \end{remark} \section{Qualitative properties of solutions of integrodifferential equation in unbounded domains \label{pmmg.ssi}} In this section, I study the properties of solutions to the problem \begin{equation} \begin{gathered} \mathcal{S}[u] +f(u)+h_\alpha(x)=0 \quad \text{in } \Omega\\ u(r)=\alpha < \beta \\ u(x)\to \beta \quad \text{as } x \to +\infty \end{gathered} \label{pmmgsi.eq0} \end{equation} where $\mathcal{S}$ is defined by \eqref{defS} satisfies (H3), $h_\alpha(x)=\alpha\int_{-\infty}^r J(x-y)dy$, $\Omega:=(r,+\infty)$ for some $r\in \mathbb{R}$ and $f\in C^{1}(\mathbb{R})$, satisfying $f'_{|[\beta, +\infty) }<0$. For \eqref{pmmgsi.eq0}, I prove the following result. \begin{theorem} \label{pmmgsi.thm} Any smooth $(C^0)$ solution of \eqref{pmmgsi.eq0} satisfying $\alpha0 : \forall \widetilde \tau >\tau,\; u(x+\widetilde \tau)\geq u(x)\; \forall x \in \mathbb{R}\}=0. $$ \end{enumerate} We easily see that the last step provided the conclusion. \noindent\textbf{Step One:} The first step is a direct application of the nonlinear comparison principle, i.e. Theorem \ref{nlcp}. Since $u$ is a sub- and a super-solution of \eqref{pmmgsi.eq1} one has $u_\tau \ge u$ for some positive $\tau$. \noindent\textbf{Step Two:} We achieve the second step with the following proposition. \begin{proposition} \label{pmmgsi.prop1} Let $u$ be a solution of \eqref{pmmgsi.eq1}. If there exists $\tau$ such that $u_{\tau}\geq u$. Then, for all $\widetilde \tau\ge \tau $ we have, $u_{\widetilde \tau}\geq u$. \end{proposition} Indeed, using the first step we have $u_\tau\ge u$ for some $\tau>0$. Step Two is then a direct application of Proposition \ref{pmmgsi.prop1}. The proof of Proposition \ref{pmmgsi.prop1} is based on the two following technical lemmas. \begin{lemma} \label{pmmgsi.lemstrict} Let $u$ be a solution of \eqref{pmmgsi.eq1} and $\tau >0$ be such that $u_{\tau}\geq u$. Then $u(x+\tau)> u(x)$ for all $x \in \bar\Omega$. \end{lemma} \begin{lemma}\label{pmmgsi.lem1} Let $u$ be a solution of \eqref{pmmgsi.eq1} and $\tau >0$ be such that \begin{gather*} u_{\tau}\geq u\\ u(x+\tau)> u(x)\quad \forall x \in \bar\Omega. \end{gather*} Then, there exists $\epsilon_0(\tau)>0 $ such that for all $\widetilde \tau \in [\tau,\tau+\epsilon_0]$, $u_{\widetilde\tau}\ge u$. \end{lemma} \begin{proof}[Proof of Proposition \ref{pmmgsi.prop1}] Assume that the two technical lemmas hold and that we can find a positive $\tau$, such that, $$ u(x+\tau)\geq u(x)\quad \forall x \in \mathbb{R}. $$ Using Lemmas \ref{pmmgsi.lemstrict} and \ref{pmmgsi.lem1}, we can construct an interval $[\tau,\tau +\epsilon]$, such that $u_{\widetilde \tau}\geq u$ for all $\widetilde \tau \in [\tau,\tau +\epsilon]$. Let us defined the quantity \begin{equation} \label{pmmgsi.eq3} \bar \gamma =\sup \{ \gamma : \forall \hat \tau \in [\tau,\gamma ],\; u_{\hat \tau}\geq u\}. \end{equation} We claim that $\bar \gamma =+\infty$, if not, $\bar \gamma < +\infty$ and by continuity we have $u_{\bar\gamma}\geq u$. Recall that from the definition of $\bar \gamma$, we have \begin{equation} \label{pmmgsi.eq4} \forall \hat \tau \in [\tau,\bar \gamma ],\quad \ \ u_{\hat\tau}\geq u. \end{equation} Therefore to get a contradiction, it is sufficient to construct $\epsilon_0$ such that \begin{equation} \label{pmmgsi.eq5} u_{\bar \gamma +\epsilon}\geq u, \quad \forall \epsilon \in [0,\epsilon_0] . \end{equation} Since $\bar \gamma>0$ and $u_{\bar \gamma}\ge u$, we can apply Lemma \ref{pmmgsi.lemstrict} to have \begin{equation} u(x+\bar\gamma)> u(x)\ \ \forall \ \ x \in \bar\Omega. \label{pmmgsi.eq6} \end{equation} Now apply Lemma \ref{pmmgsi.lem1}, to find the desired $\epsilon_0>0$. Therefore, from the definition of $\bar \gamma$ we get $$ u_{\hat\tau}\geq u, \quad \forall \hat \tau \in [\tau,+\infty]. $$ Which proves Proposition \ref{pmmgsi.prop1}. \end{proof} Let us now turn our attention to the proofs of the technical lemmas. \begin{proof}[Proof of lemma \ref{pmmgsi.lemstrict}] Using argumentation in the proof of the nonlinear comparison principle (Theorem \ref{nlcp}) one has: Either \begin{equation} \label{pmmgsi.eq7} u(x+\tau)> u(x)\quad \forall x \in \bar\Omega, \end{equation} or $u_\tau \equiv u$ in $\bar \Omega$. The latter is impossible, since for any positive $\tau$, $$ \alpha=u(r) u(x)\ \ \forall x \in \bar\Omega, \end{gather*} for a given $\tau>0$. Choose $M, \delta$ and $\epsilon$ such that \eqref{pmmg.si1}-\eqref{pmmg.si3} hold. Since $u$ is continuous, we can find $\epsilon_0$, such that for all $\epsilon\in[0,\epsilon_0]$, we have $$ u(x+\tau+\epsilon)> u(x) \quad \text{for } x \in [r,M+1]. $$ Choose $\epsilon_1$ such that for all $\epsilon\in[0,\epsilon_1]$, we have $$ u(x+\tau+\epsilon)+\frac{\delta}{2}> u(x)\quad \forall x \in \bar\Omega. $$ Let $\epsilon_3=\min\{\epsilon_0,\epsilon_1\}$. Observe that for all $\epsilon\in[0,\epsilon_3]$, $b:=\tau +\epsilon$ and $a=\frac{\delta}{2}$ satisfies assumptions \eqref{pmmg.sib1} and \eqref{pmmg.sib2} of Lemma \ref{pmmg.silemslid}. Applying now Lemma \ref{pmmg.silemslid} for each $\epsilon\in[0,\epsilon_3]$, we get $u_{\tau+\epsilon}\geq u$. Thus, we end up with $$ u_{\widetilde \tau}\geq u, \quad \forall\, \widetilde \tau \in [\tau,\tau+\epsilon_3], $$ which completes the proof of Lemma \ref{pmmgsi.lem1}. \end{proof} \noindent\textbf{Step Three:} From the first Step and Proposition \ref{pmmgsi.prop1}, we can define t he quantity: \begin{equation} \label{pmmgsi.inf} \tau^*=\inf\{\tau >0 : \forall \widetilde \tau >\tau,\; u_{\widetilde\tau}\geq u\}. \end{equation} We claim that \begin{claim} $\tau^*=0$ \label{pmmgsi.cla1} \end{claim} Observe that this lemma implies the monotony of $u$, which concludes the proof of Theorem \ref{pmmgsi.thm0}. \begin{proof}[Proof of Claim \ref{pmmgsi.cla1}] We argue by contradiction, suppose that $\tau^*> 0$. We will show that for $\epsilon$ small enough, we have, $$ u_{\tau^* -\epsilon}\geq u. $$ Using Proposition \ref{pmmgsi.prop1}, we will have $$ u_{\widetilde \tau}\geq u \forall\, \widetilde \tau \geq\tau^* -\epsilon, $$ which contradicts the definition of $\tau^*$. \end{proof} Now, we start the construction. By definition of $\tau^*$ and using continuity, we have $u_{\tau^*}\geq u $. Therefore, from Lemma \ref{pmmgsi.lemstrict}, we have $$ u(x+\tau^*)> u(x), \quad \text{for all } x \in \bar\Omega. $$ Thus, in the compact set $[r,M+1]$, we can find $\epsilon_1>0$ such that $$ \forall \epsilon \in [0,\epsilon_1),\quad u(x+\tau^*-\epsilon) > u(x) \quad \text{in }[r,M+1]. $$ Since $$ u_{\tau^*}+\frac{\delta}{2}>u \quad \text{in } \bar \Omega, $$ and $\lim_{x\to +\infty}u_{\tau^*}-u =0$, we can choose $\epsilon_2$ such that for all $\epsilon \in [0,\epsilon_2)$ we have $$ u(x+\tau^*-\epsilon)+\frac{\delta}{2} > u(x) \quad \text{for all } x \in \bar \Omega. $$ Let $\epsilon \in (0,\epsilon_3)$, where $\epsilon_3=\min\{\epsilon_1,\epsilon_2\}$, we can then apply Lemma \ref{pmmg.silemslid} with $u_{\tau^*-\epsilon}$ and $u$ to obtain the desired result. %\end{proof} \subsection{Uniqueness} The uniqueness of the solution of \eqref{pmmgsi.eq1} essentially follows from the argumentation in the above subsection, Step 3. Let $u$ and $v$ be to solutions of \eqref{pmmgsi.eq1}. Using the nonlinear comparison principle we can define the real number \begin{equation} \label{pmmgsi.tinf} \tau^{**}=\inf\{\tau\ge 0 |\ \ u_\tau \geq v \}. \end{equation} and make the following claim. \begin{claim} $\tau^{**}= 0$. \label{pmmgsi.tcla1.1} \end{claim} \begin{proof} In this context the argumentation in the above subsection (Step3) hold as well using $u_{\tau^{**}}$ and $v$ instead of $u_{\tau^{*}}$ and $u$. \end{proof} Thus $u\ge v$. Since $u$ and $v$ are both solution, interchanging $u$ and $v$ in the above argumentation yields $v\ge u$. Hence, $u\equiv v$, which prove the uniqueness of the solution. \begin{remark} \label{rmk5.1} \rm Since the proof of Theorem \ref{pmmgsi.thm0} mostly relies on the application of the nonlinear comparison principle, using Remark \ref{pmmg.rem.nlcp} the assumption made on $f$ can be relaxed. \end{remark} \subsection*{Acknowledgments} I would warmly thank Professor Pascal Autissier for enlightening discussions and his constant support. I would also thanks professor Louis Dupaigne for his precious advices. \begin{thebibliography}{00} \bibitem{AB} Giovanni Alberti and Giovanni Bellettini. \newblock A nonlocal anisotropic model for phase transitions. {I}. {T}he optimal profile problem. \newblock {\em Math. Ann.}, 310(3): 527--560, 1998. \bibitem{AW} D.~G. Aronson and H.~F. Weinberger. \newblock Multidimensional nonlinear diffusion arising in population genetics. \newblock {\em Adv. in Math.}, 30(1): 33--76, 1978. \bibitem{BFRW} Peter~W. Bates, Paul~C. Fife, Xiaofeng Ren, and Xuefeng Wang. \newblock Traveling waves in a convolution model for phase transitions. \newblock {\em Arch. Rational Mech. Anal.}, 138(2): 105--136, 1997. \bibitem{BL} H.~Berestycki and B.~Larrouturou. \newblock Quelques aspects math\'ematiques de la propagation des flammes pr\'em\'elang\'ees. \newblock In {\em Nonlinear partial differential equations and their applications. Coll\`ege de France Seminar, Vol.\ X (Paris, 1987--1988)}, volume 220 of {\em Pitman Res. Notes Math. Ser.}, pages 65--129. Longman Sci. Tech., Harlow, 1991. \bibitem{BLL} H.~Berestycki, B.~Larrouturou, and P.-L. Lions. \newblock Multi-dimensional travelling-wave solutions of a flame propagation model. \newblock {\em Arch. Rational Mech. Anal.}, 111(1): 33--49, 1990. \bibitem{BN1} H.~Berestycki and L.~Nirenberg. \newblock On the method of moving planes and the sliding method. \newblock {\em Bol. Soc. Brasil. Mat. (N.S.)}, 22(1): 1--37, 1991. \bibitem{BN2} Henri Berestycki and Louis Nirenberg. \newblock Travelling fronts in cylinders. \newblock {\em Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire}, 9(5): 497--572, 1992. \bibitem{CC} Jack Carr and Adam Chmaj. \newblock Uniqueness of travelling waves for nonlocal monostable equations. \newblock {\em Proc. Amer. Math. Soc.}, 132(8):2433--2439 (electronic), 2004. \bibitem{Ch} Xinfu Chen. \newblock Existence, uniqueness, and asymptotic stability of traveling waves in nonlocal evolution equations. \newblock {\em Adv. Differential Equations}, 2(1): 125--160, 1997. \bibitem{CG} Xinfu Chen and Jong-Sheng Guo. \newblock Uniqueness and existence of traveling waves for discrete quasilinear monostable dynamics. \newblock {\em Math. Ann.}, 326(1): 123--146, 2003. \bibitem{CER} Carmen Cortazar, Manuel Elgueta, and Julio~D. Rossi. \newblock A nonlocal diffusion equation whose solutions develop a free boundary. \newblock {\em Ann. Henri Poincar\'e}, 6(2): 269--281, 2005. \bibitem{CD} J{\'e}r{\^o}me Coville and Louis Dupaigne. \newblock Propagation speed of travelling fronts in non local reaction-diffusion equations. \newblock {\em Nonlinear Anal.}, 60(5): 797--819, 2005. \bibitem{Co1} Jérôme Coville. \newblock On the monotone behavior of solution of nonlocal reaction-diffusion equation. \newblock {\em Ann. Mat. Pura Appl. (4)}, To appear, 2005. \bibitem{Co2} Jérôme Coville. \newblock \'equation de réaction diffusion nonlocale. \newblock {\em Thése de L'Université Pierre et Marie Curie}, Nov. 2003. \bibitem{DGP} A.~De~Masi, T.~Gobron, and E.~Presutti. \newblock Travelling fronts in non-local evolution equations. \newblock {\em Arch. Rational Mech. Anal.}, 132(2): 143--205, 1995. \bibitem{DOPT1} A.~De~Masi, E.~Orlandi, E.~Presutti, and L.~Triolo. \newblock Uniqueness and global stability of the instanton in nonlocal evolution equations. \newblock {\em Rend. Mat. Appl. (7)}, 14(4): 693--723, 1994. \bibitem{EMc} G.~Bard Ermentrout and J.~Bryce McLeod. \newblock Existence and uniqueness of travelling waves for a neural network. \newblock {\em Proc. Roy. Soc. Edinburgh Sect. A}, 123(3): 461--478, 1993. \bibitem{F1} Paul~C. Fife. \newblock {\em Mathematical aspects of reacting and diffusing systems}, volume~28 of {\em Lecture Notes in Biomathematics}. \newblock Springer-Verlag, Berlin, 1979. \bibitem{F2} Paul~C. Fife. \newblock An integrodifferential analog of semilinear parabolic {PDE}s. \newblock In {\em Partial differential equations and applications}, volume 177 of {\em Lecture Notes in Pure and Appl. Math.}, pages 137--145. Dekker, New York, 1996. \bibitem{Fi} R.~A. Fisher. \newblock {\em The genetical theory of natural selection}. \newblock Oxford University Press, Oxford, variorum edition, 1999. \newblock Revised reprint of the 1930 original, Edited, with a foreword and notes, by J. H.\ Bennett. \bibitem{GT} David Gilbarg and Neil~S. Trudinger. \newblock {\em Elliptic partial differential equations of second order}. \newblock Classics in Mathematics. Springer-Verlag, Berlin, 2001. \newblock Reprint of the 1998 edition. \bibitem{GK} Brian~H. Gilding and Robert Kersner. \newblock {\em Travelling waves in nonlinear diffusion-convection reaction}. \newblock Progress in Nonlinear Differential Equations and their Applications, 60. Birkh\"auser Verlag, Basel, 2004. \bibitem{KPP} A.~N. Kolmogorov, I.~G. Petrovsky, and N.~S. Piskunov. \newblock \'etude de l'\'equation de la diffusion avec croissance de la quantit\'e de mati\`ere et son application \`a un probl\`eme biologique. \newblock {\em Bulletin Universit\'e d'\'Etat \`a Moscow (Bjul. Moskowskogo Gos. Univ)}, S\'erie Internationale(Section A): 1--26, 1937. \bibitem{M} J.~D. Murray. \newblock {\em Mathematical biology}, volume~19 of {\em Biomathematics}. \newblock Springer-Verlag, Berlin, second edition, 1993. \bibitem{PW} Murray~H. Protter and Hans~F. Weinberger. \newblock {\em Maximum principles in differential equations}. \newblock Prentice-Hall Inc., Englewood Cliffs, N.J., 1967. \bibitem{Sch} Konrad Schumacher. \newblock Travelling-front solutions for integro-differential equations. {I}. \newblock {\em J. Reine Angew. Math.}, 316: 54--70, 1980. \bibitem{S} Panagiotis~E. Souganidis. \newblock Interface dynamics in phase transitions. \newblock In {\em Proceedings of the International Congress of Mathematicians, Vol.\ 1, 2 (Z\"urich, 1994)}, pages 1133--1144, Basel, 1995. Birkh\"auser. \bibitem{Ve} Jos{\'e}~M. Vega. \newblock On the uniqueness of multidimensional travelling fronts of some semilinear equations. \newblock {\em J. Math. Anal. Appl.}, 177(2): 481--490, 1993. \bibitem{W1} H.~F. Weinberger. \newblock Long-time behavior of a class of biological models. \newblock {\em SIAM J. Math. Anal.}, 13(3): 353--396, 1982. \bibitem{ZFK} J.~B. Zeldovich and D.~A. Frank-Kamenetskii. \newblock A theory of thermal propagation of flame. \newblock {\em Acta Physiochimica URSS}, S\'erie Internationale(9), 1938. \end{thebibliography} \end{document}