\documentclass[reqno]{amsart} \usepackage{hyperref} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2004(2004), No. 27, pp. 1--13.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2004 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2004/27\hfil A radially symmetric anti-maximum principle] {A radially symmetric anti-maximum principle and applications to fishery management models} \author[Junping Shi\hfil EJDE-2004/27\hfilneg] {Junping Shi} \address{Junping Shi \hfill\break Department of Mathematics, College of William and Mary, Williamsburg, VA 23185, USA, and \hfill\break Department of Mathematics, Harbin Normal University, Harbin, Heilongjiang, China} \email{shij@math.wm.edu} \date{} \thanks{Submitted December 16, 2003. Published February 25, 2004.} \subjclass[2000]{34B05, 34B24, 92D25} \keywords{Anti-maximum principle, Sturm-Liouville comparison lemma, \hfill\break\indent nonlinear boundary value problem} \begin{abstract} For a boundary-value problem of an ordinary differential equation, we prove that the anti-maximum principle holds when the forcing term satisfies an integral inequality. As applications, we consider linear and nonlinear models arising from fishery management problems. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \section{Introduction} The maximum principle is one of most important tools to study linear and nonlinear elliptic equations. Let $L$ be a uniformly elliptic operator, \begin{equation} Lu=\sum_{i,j=1}^n a_{ij} \frac{\partial^2 u}{\partial x_i \partial x_j}+\sum_{i=1}^n a_i \frac{\partial u}{\partial x_i}+au, \end{equation} where $a_{ij}\in C(\overline{\Omega})$, $a_{ij}=a_{ji}$, and $\sum_{i,j=1}^n a_{ij}(x) \xi^i \xi^j>0$ for $x\in \overline{\Omega}$ and $\xi=(\xi^i)\in \mathbb{R}^n\backslash \{0\}$, and $a_i,a\in L^{\infty}(\Omega)$. We consider a Dirichlet boundary-value problem \begin{equation}\label{0.0} \begin{gathered} Lu+\lambda m u=f, \quad x\in \Omega, \\ u=0, \quad x\in \partial\Omega, \end{gathered} \end{equation} where $m\in L^{\infty}(\Omega)$. The maximum principle holds if for $f>0$, the solution (if exists) $u$ of \eqref{0.0} is negative. It is known that the maximum principle holds for any $f>0$ if and only if $\lambda<\lambda_1$, the principal eigenvalue of the homogeneous equation $L\phi+\lambda m \phi=0$. When $\lambda$ crosses $\lambda_1$, it was proved by Cl\'{e}ment and Peletier \cite{CP} (for the case $m\equiv 1$) and Hess \cite{H} (for sign-changing $m$) that for $f>0$ and $\lambda\in (\lambda_1,\lambda_1+\delta_f)$, the solution $u$ of \eqref{0.0} is positive. This phenomenon is called the anti-maximum principle. More general anti-maximum principles are proved in \cite{B}, \cite{T}, \cite{CS}, \cite{AG} and \cite{S}. In particular, the author of the present paper shows in \cite{S} that the set of nontrivial solutions of the equation $Lu+\lambda m u=(\lambda-\lambda_1)^2 f$ near $(\lambda_1,0)$ is a curve $\{(\lambda,u(\lambda))\}\subset \mathbb{R}\times C^{2,\alpha}(\overline{\Omega})$, and $u(\lambda)\approx -(\lambda-\lambda_1)\phi_1$ where $\phi_1$ is a multiple of the positive principal eigenfunction. That provides an explanation of the transition from the maximum principle to the anti-maximum principle. In this paper, we discuss the question whether the anti-maximum principle holds for $\lambda>\lambda_1$ and certain $f$ but beyond a small interval $(\lambda_1,\lambda_1+\delta_f)$. In particular, we are interested in that for which $f$ the anti-maximum principle holds for all $\lambda\in (\lambda_1,\lambda_2)$, where $\lambda_2$ is the second eigenvalue of $L\phi+\lambda m \phi=0$. (In general, $\lambda_2$ may not be a real number, but in the situations we will consider, $L$ is always self-adjoint, so $\lambda_2$ is real.) For general $f>0$ the anti-maximum principle obviously fails when $\lambda\to \lambda_2^-$ since the solution of \eqref{0.0} $u_f\approx (\lambda-\lambda_2)^{-1}\phi_2$ (which is a sign-changing function) if $\int_{\Omega}f \phi_2 dx\ne 0$. Thus a necessary condition for the anti-maximum principle to be extended up to $\lambda=\lambda_2$ is that $\int_{\Omega}f \phi_2 dx= 0$. For many domains with symmetry, it can be proved that $\phi_1$ is symmetric, and $\phi_2$ is asymmetric. Thus the necessary condition can be fulfilled if $f$ has the same symmetry as $\Omega$, and such situations do arise often from applications. So we will consider the anti-maximum principle when $\Omega$, $L$ and $f$ have a compatible symmetry. First we consider the one-dimensional case: a Sturm-Liouville boundary-value problem \begin{equation}\label{1.1} \begin{gathered}[c] [p(r)u']'+s(r)u +\lambda q(r) u=f(r), \quad r\in (-1,1),\\ u(-1)=u(1)=0. \end{gathered} \end{equation} Here $p,p',s,q,f$ are continuous, $p$ and $q$ are positive, and $p,s,q,f$ are even functions. for the homogeneous equation, it is well-known that the principal eigenfunction $\phi_1$ is an even function with one sign, and the eigenfunction $\phi_2$ corresponding to $\lambda_2$ is an odd function. For $f$ satisfying an integral constraint \eqref{2.2c}, we show that the anti-maximum principle holds for $\lambda\in (\lambda_1,\lambda_2]$. In the special case of \begin{equation}\label{1.2} \begin{gathered} u''+\lambda u=f(r), \quad r\in (-1,1), \\ u(-1)=u(1)=0, \end{gathered} \end{equation} we show that the anti-maximum principle holds for all $\lambda \in (\lambda_1,\lambda_2]$ if\\ $\int_0^1 f(r) \cos(\pi r) dr\ge 0$. In \cite{K}, Korman proved the anti-maximum principle holds for $\lambda=\lambda_2$ and $f$ satisfying the same integral inequality. Our result is much more general (for general Strum-Liouville problem \eqref{1.1} instead of \eqref{1.2}), and the proof is also different. We point out that an alternate proof of the result can also be given via the Green function of the problem, by using the ideas in, for example, Schr\"oder \cite{Sc}. Our second result is about the radially symmetric solutions of Sch\"{o}dinger type equation \begin{equation}\label{1.3} \begin{gathered} \Delta u +K(x)u+\lambda V(x)u =f(x),\quad x\in B^n, \\ u = 0, \quad x\in \partial B^n, \end{gathered} \end{equation} where $B^n$ is the unit ball in $\mathbb{R}^n$ with $n\ge 2$, and $V,K$ and $f$ are positive and radially symmetric. The principal eigenfunction $\phi_1$ of the homogeneous equation is radially symmetric and is of one sign. We show that the anti-maximum principle holds for $\lambda\in (\lambda_1,\lambda^*]$ for some $\lambda^*<\lambda_2$ when an integral inequality is satisfied. Note that the solution $u_f$ of \eqref{1.3} in this case is also radially symmetric, thus satisfies a Sturm-Liouville problem: \begin{equation}\label{1.4} \begin{gathered} (r^{n-1}u')'+r^{n-1}K(r)+\lambda r^{n-1}V(r)u=r^{n-1}f(r), \quad r\in (0,1), \\ u'(0)=u(1)=0. \end{gathered} \end{equation} \begin{figure}[ht] \begin{center} \setlength{\unitlength}{1mm} \begin{picture}(50,38)(0,0) \put(0,0){\line(1,0){45}} \put(-.85,35){$\uparrow$} \put(0,0){\line(0,1){35}} \put(44,-.85){$\rightarrow$} \qbezier(0,0)(70,14)(0,32) \put(34.5,0){$|$} \put(33,-3){$c_2$} \put(45,-3){$c$} \put(12,21){$u_1(\cdot,c)$} \put(12,8){$u_2(\cdot,c)$} \put(-4,36){$u$} \end{picture} \end{center} \caption{Precise bifurcation diagram for $\lambda_10$, $b\ge 0$, $c>0$, and $h(x)$ is an even non-negative function on $[-1,1]$. The solutions of \eqref{1.5} are the steady state solutions of a reaction-diffusion equation (with spatial dimension $n=1$) \begin{equation}\label{1.6} \begin{gathered} \frac{\partial u}{\partial t}=\Delta u +au-bu^2-ch(x), \quad (t,x)\in (0,T)\times\Omega; \\ u(t, x) = 0, \quad (t,x)\in (0,T)\times\partial \Omega; \\ u(0,x)=u_0(x)\ge 0, \quad x\in \Omega, \\ \end{gathered} \end{equation} where $\Omega$ is a smooth domain in $\mathbb{R}^n$. Here $u(t,x)$ is the population density of a fish species, $\Omega\subset \mathbb{R}^n$ ($n\ge 1$) is the habitat of the fish; the population is assumed to have a logistic growth when $b>0$, and it is a Malthus growth when $b=0$; $c\cdot h(x)$ represents harvesting effect, and we assume that $h(x)\ge 0$ for $x\in \overline{\Omega}$ and $\max_{x\in \overline{\Omega}} h(x)=1$ (thus $h(\cdot)$ determines the spacial fishing patterns, and $c$ determines the quantity of the fishing.) The steady state solutions of \eqref{1.6} was studied by Oruganti, Shivaji and the author in \cite{OSS}. In particular, it was proved in \cite{OSS} that when $b>0$, $a\in (\lambda_1,\lambda_1+\delta)$ for some $\delta>0$, then there exists $c_2>0$ such that \eqref{1.6} has exactly two positive steady state solution when $c\in (0,c_2)$, has exactly one positive steady state solution when $c=c_2$, and has no non-negative steady state solution when $c>c_2$ (see Figure 1.) This result is valid for general smooth domain $\Omega$ but only for $a\in (\lambda_1,\lambda_1+\delta)$ since we use a perturbation argument and the classical anti-maximum principle. In Section 4, we will show that a similar result can be extended to $a\in (\lambda_1,\lambda_2)$ and the one-dimensional case \eqref{1.5} with a more restrictive but natural $h(x)$ by using the ideas in Section 2 and a bifurcation approach. We will also consider the case when $b=0$ (Malthus growth.) \section{One-Dimensional Problem} We consider a linear non-homogeneous Strum-Liouville boundary-value problem \begin{equation}\label{2.1} \begin{gathered}[c] [p(r)u']'+s(r)u+ \lambda q(r) u=f(r), \quad r\in (-1,1), \\ u(-1)=u(1)=0. \end{gathered} \end{equation} Here we assume that \begin{enumerate} \item [(A1)] $p,p',s,q,f$ are continuous in $[-1,1]$; \item [(A2)] $p,s,q,f$ are all even functions, {\it i.e.} $g(-x)=g(x)$; \item [(A3)] $q(r)> 0$ for $r\in [-1,1]$; \item [(A4)] $p(r)> 0$ for $r\in [-1,1]$; \item [(A5)] $f(r)\ge 0$ for $r\in [-1,1]$. \end{enumerate} It is well-known that if (A1), (A3) and (A4) are satisfied, then the homogeneous equation \begin{equation}\label{2.2} \begin{gathered}[c] [p(r)\phi']'+s(r)\phi+\lambda q(r) \phi=0, \quad r\in (-1,1),\\ \phi(-1)=\phi(1)=0, \end{gathered} \end{equation} has a sequence of eigenvalues $\{\lambda_i\}_{i=1}^{\infty}$, such that $\lambda_i<\lambda_{i+1}$, $\lim_{i\to\infty}\lambda_i=\infty$, and the eigenfunction $\phi_i$ corresponding to $\lambda_i$ changes sign exactly ($i-1$) times in $(-1,1)$, and all zeros of $\phi_i$ are simple. If in addition (A2) is satisfied, one can easily show that $\phi_1$ is an even function of one sign on $[-1,1]$, and $\phi_2$ is an odd function on $[-1,1]$ which only changes sign at $r=0$. Similarly, the homogeneous equation with no-flux boundary condition \begin{equation}\label{2.1a} \begin{gathered}[c] [p(r)\phi']'+s(r)\phi+\lambda q(r) \phi=0, \quad r\in (-1,1), \phi'(-1)=\phi'(1)=0, \end{gathered} \end{equation} has a sequence of eigenvalues $\{\lambda_i^N\}_{i=1}^{\infty}$, such that $\lambda_i^N<\lambda_{i+1}^N$, $\lim_{i\to\infty}\lambda_i^N=\infty$, and the eigenfunction $\phi_i^N$ corresponding to $\lambda_i^N$ satisfies that $(\phi_i^N)'$ changes sign exactly ($i-1$) times in $(-1,1)$, and all zeros of $(\phi_i^N)'$ are simple. We recall a standard Sturm comparison lemma (see for example, \cite{OS}). \begin{lemma} \label{lem:6.1} Let $Lu(t)=[(p(t)u'(t)]'+q(t)u(t)$, where $p(t)$ and $q(t)$ are continuous in $[a,b]$ and $p(t)\ge 0$, $t \in [a,b]$. Suppose $Lw(t)=0$, $w \not\equiv 0$. \begin{enumerate} \item If there exists $v \in C^2[a,b]$ such that $v(t)\ge 0$ and $Lv(t)\le (\not \equiv) 0$, then $w$ has at most one zero in $[a,b]$. \item If there exists $v \in C^2[a,b]$ such that $v(t)\ge 0$ and $Lv(t)\ge (\not \equiv) 0$, and $v(a)=v(b)=0$, then $w$ has at least one zero in $(a,b)$. \end{enumerate} \end{lemma} To study the solution set $(\lambda,u)$ of \eqref{2.1}, we first collect a few well-known facts about the solutions of \eqref{2.1}: \begin{lemma}\label{lem:6.2} Assume that (A1)-(A5) are satisfied. \begin{enumerate} \item For any $\lambda\ne \lambda_i$, \eqref{2.1} has a unique solution $u(\lambda,r)$, which is an even function; \item For $\lambda=\lambda_1$, \eqref{2.1} has no solution; \item For $\lambda=\lambda_2$, \eqref{2.1} has infinite many solutions, and it has a unique even solution $u(\lambda,\cdot)$; \item When $\lambda<\lambda_1$, then $u(\lambda,r)<0$ for $r\in (-1,1)$, $u_r(\lambda,-1)<0$; \item There exists $\delta_f>0$ such that for $\lambda\in (\lambda_1,\lambda_1+\delta_f)$, $u(\lambda,r)>0$ for $r\in (-1,1)$, $u_r(\lambda,-1)>0$. \end{enumerate} \end{lemma} Consider the homogeneous equation \begin{equation}\label{2.2a} \begin{gathered}[c] [p(r)\varphi']'+s(r)\varphi+\lambda q(r) \varphi=0, \quad r\in (-1,1),\\ \varphi'(0)=0, \quad \varphi(0)=1>0. \end{gathered} \end{equation} From the existence and uniqueness of the solution of the initial value problem, \eqref{2.2a} has a unique solution $\varphi(\lambda,r)$ for any $\lambda>0$, and $\varphi(\lambda,r)$ is an even function. Moreover, from Lemma \ref{lem:6.1}, when $\lambda_1<\lambda\le \lambda_2$, there exists $r_0>0$ such that $\varphi(r)$ satisfies \begin{equation}\label{2.2b} \varphi(r)(r-r_0)>0, \quad r\in (0,1)\backslash \{r_0\}. \end{equation} The function $\varphi(\lambda,r)$ will play an important role in our main result. We say that \eqref{2.1} satisfies the \textit{anti-maximum principle for $f$} if for $\lambda\in (\lambda_1,\lambda_2]$ the even solution $u(\lambda,r)>0$ for $r\in (-1,1)$. Our main result in this section is as follows. \begin{theorem} \label{thm:6.3} Assume that (A1)-(A5) are satisfied, and $u(\lambda,r)$ is the unique even solution of \eqref{2.1} when $\lambda\in (\lambda_1,\lambda_2]$. Let $\varphi(\lambda,r)$ be the solution of \eqref{2.2a}. Then \eqref{2.1} satisfies the anti-maximum principle for $f$ at $\lambda \in (\lambda_1,\lambda_2]$ if and only if \begin{equation}\label{2.2c} \int_{-1}^1 f(r)\varphi(\lambda,r) dr=2\int_0^1 f(r)\varphi(\lambda,r) dr\ge 0. \end{equation} Moreover $u_r(\lambda,1)<0$ if $\int_0^1 f(r)\varphi(\lambda,r) dr> 0$, and $u_r(\lambda,1)=0$ if \\ $\int_0^1 f(r)\varphi(\lambda,r) dr=0$. \end{theorem} \begin{proof} From the symmetry of the solution, $u(r)=u(\lambda,r)$ satisfies \begin{equation}\label{2.3} [p(r)u']'+s(r)u+\lambda q(r) u=f(r), \; r\in (0,1), \;\;u'(0)=u(1)=0. \end{equation} We denote $Lu=[p(r)u']'+\lambda q(r)u$. Suppose that $\psi$ is the solution of the initial value problem \begin{equation}\label{2.4} \begin{gathered}[c] [p(r)\psi']'+s(r)\psi+\lambda q(r) \psi=0, \quad r\in (-1,1), \\ u(-1)=0, \quad u'(-1)=a>0\,. \end{gathered} \end{equation} We claim that $\psi$ has exactly one zero in $(-1,1)$, and $\psi(1)\le 0$. First we assume that $\lambda_1<\lambda<\lambda_2$. If we also assume that $r\phi_2(r)>0$ for $r\ne 0$, then $\phi_2<0$ and $L\phi_2=(\lambda-\lambda_2)q\phi_2\le 0$ for $r\in (-1,0)$. Hence by Lemma \ref{lem:6.1} part 1, $\psi$ has no zero in $[-1,0]$ besides $r=-1$. Similarly, $\phi_2>0$ and $L\phi_2=(\lambda-\lambda_2)q\phi_2<0$ for $r\in (0,1)$. Thus $\psi$ has at most one zero in $[0,1]$, and has at most two zeros in $[-1,1]$. On the other hand, $\phi_1>0$, $L\phi_1=(\lambda-\lambda_1)q\phi_1>0$, and $\phi_1(-1)=\phi_1(1)=0$, then from Lemma \ref{lem:6.1} part 2, $\psi$ has at least one zero in $(-1,1)$. Therefore $\psi$ has exactly one zero in $(-1,1)$, and $\psi(1)< 0$ if $\lambda_1<\lambda<\lambda_2$. If $\lambda=\lambda_2$, then $\psi=k \phi_2$ from the uniqueness of the equation, which implies that $\psi$ has exactly one zero in $(-1,1)$, and $\psi(1)= 0$. Let $I_+=\{x\in(-1,1):u(\lambda,x)>0\}$. Then $I_+$ is the union of countable disjoint open sub-intervals of $(-1,1)$. For each $(a,b)\subset I_+$, $u>0$ and $Lu=f>0$ in $(a,b)$, and $u(a)=u(b)=0$, thus by Lemma \ref{lem:6.1} part 2, $\psi$ has at least one zero in $(a,b)$. However from the claim above, $\psi$ has exactly one zero in $(-1,1)$. Hence $I_+$ has at most one connected component. Since $u(\lambda,u)$ is even, then the only connected component of $I_+$ must be symmetric about $r=0$. Therefore $I_+$ must satisfy one of the following three: (a) $I_+=\emptyset$; (b) $I_+=(-r_0,r_0)$ for some $r_0\in (0,1)$; or (c) $I_+=(-1,1)$. Case (a) is not possible since $I_+=\emptyset$, $u\le 0$ and $Lu=f\ge 0$ in $[-1,1]$, which will imply $\psi$ has at most one zero in $[-1,1]$ from Lemma \ref{lem:6.1} part 1, but $\psi$ has two zeros (including $r=-1$) in $[-1,1)$. Therefore, for any $\lambda\in (\lambda_1,\lambda_2]$, $u(\lambda,r)$ is either positive in $(-1,1)$, or $u(\lambda,r)>0$ when $|r|< r_0$ and $u(\lambda,r)\le 0$ when $1>|r|>r_0$. If $u_r(\lambda,1)<0$, then $u(\lambda,r)>0$ for all $r\in (-1,1)$; and if $u_r(\lambda,1)>0$, then the latter case occurs. If $u_r(\lambda,-1)=0$, then from the equation $p(1)u_{rr}(\lambda,1)=f(1)>0$, $u$ is positive in a right neighborhood of $r=1$, and consequently positive in $(-1,1)$. Therefore, $u(\lambda,r)>0$ in $(-1,1)$ if and only if $u_r(\lambda,1)\le 0$. From \eqref{2.1} and \eqref{2.2a}, we have \begin{equation}\label{2.7} p(1)\varphi(\lambda,1)u_r(\lambda,1)=[pu'\varphi-p\varphi'u]|_0^1=\int_0^1 \varphi(\lambda,r)f(r)dr. \end{equation} Since $\varphi(\lambda,1)<0$ for any $\lambda\in (\lambda_1,\lambda_2)$ and $p(1)>0$, then $u_r(\lambda,1)\le 0$ is equivalent to $\int_0^1 \varphi(\lambda,r)f(r) dr\ge 0$. The last statement in the theorem is also clear from \eqref{2.7}. \end{proof} We illustrate the result in Theorem \ref{thm:6.3} with the special case of $p(r)=q(r)\equiv 1$ and $s(r)\equiv 0$: \begin{equation}\label{2.10} u''+\lambda u=f(r), \;\;r\in (-1,1), \;\; u(-1)=u(1)=0. \end{equation} The homogeneous equation \eqref{2.2a} becomes \begin{equation}\label{2.11} \begin{gathered} \varphi''+\lambda \varphi =0, \quad r\in (-1,1), \\ \varphi'(0)=0, \quad \varphi(0)=1, \end{gathered} \end{equation} and it is easy to calculate that $\varphi(\lambda,r)=\cos (\sqrt{\lambda}r)$. The eigenvalues of \begin{equation}\label{2.12} \begin{gathered} \phi''+\lambda \phi =0, \quad r\in (-1,1), \\ \phi(-1)=\phi(1)=0, \end{gathered} \end{equation} are $\lambda_i=i^2\pi^2/4$ ($i\in \mathbf{N}$), and the corresponding eigenfunctions are $\phi_{2i-1}(r)=\cos[(2i-1)\pi r/2]$ and $\phi_{2i}(r)=\sin(i \pi r)$. The condition \eqref{2.2c} now becomes \begin{equation}\label{2.13} \int_0^1 f(r) \cos (\sqrt{\lambda}r)dr \ge 0. \end{equation} We observe that the family of functions $\{\cos(\sqrt{\lambda} r):\pi^2/4<\lambda<\pi^2\}$ satisfy \begin{equation}\label{2.14} \frac{\partial (\cos(\sqrt{\lambda}r))}{\partial \lambda}=-\frac{\sin (\sqrt{\lambda}r)}{2\sqrt{\lambda}}<0, \end{equation} for $r\in (0,1)$ and $\lambda \in (\pi^2/4,\pi^2]$. We define a functional: \begin{equation}\label{2.15} I(\lambda,f)= \int_0^1 f(r) \cos (\sqrt{\lambda}r)dr, \end{equation} for $\lambda \in [\pi^2/4,\pi^2]$. Then for any even positive function $f$, $I(\lambda,f)$ is decreasing in $\lambda$. Hence we obtain a stronger result for \eqref{2.10}: \begin{theorem} Suppose that $f\in C^0[-1,1]$, $f(-r)=f(r)$ and $f(r)\ge (\not\equiv) 0$ for $|r|\le 1$. Let $u(\lambda,r)$ be the unique even solution of \eqref{2.10} for $\lambda \in (\pi^2/4,\pi^2]$. \begin{enumerate} \item If $ \int_0^1 f(r) \cos (\pi r)dr\ge 0$, then $u(\lambda,r)>0$ for $r\in (-1,1)$ and all $\lambda \in (\pi^2/4,\pi^2]$; \item If $ \int_0^1 f(r) \cos (\pi r)dr< 0$, then there exists $\lambda^*\in (\pi^2/4,\pi^2)$ such that $u(\lambda,r)>0$ for $r\in (-1,1)$ and $\lambda \in (\pi^2/4, \lambda^*]$, and $u(\lambda,r)$ changes sign exactly twice in $(-1,1)$ for $\lambda\in (\lambda^*,\pi^2]$. \end{enumerate} \end{theorem} \section{Radially Symmetric Problem} In this section, we consider the anti-maximum principle for the equation \begin{equation}\label{3.1} \begin{gathered} \Delta u +K(x)u+\lambda V(x)u =f(x),\quad x\in B^n, \\ u = 0, \quad x\in \partial B^n, \end{gathered} \end{equation} where $B^n$ is the unit ball in $\mathbb{R}^n$, $n\ge 2$. We assume that $V$ and $f$ satisfy \begin{enumerate} \item[(B1)] $V,K,f\in C(\overline{B^n})$; \item[(B2)] $V,K$ and $f$ are radially symmetric; \item[(B3)] $V(x)>0$ for $x\in B^n$; \item[(B4)] $f(x)>0$ for $x\in B^n$. \end{enumerate} For the homogeneous equation \begin{equation}\label{3.2} \begin{gathered} \Delta \phi +K(x)\phi+\lambda V(x)\phi =0, \quad x\in B^n, \\ \phi= 0, \quad x\in \partial B^n, \end{gathered} \end{equation} It is well-known that the principal eigenfunction $\phi_1$ is of one sign and is radially symmetric. In general the second eigenvalue $\lambda$ is not simple. It was shown in \cite{LN} that three cases can happen for the solution space $W$ of $[\Delta +K(x)+\lambda_2 V(x)]\phi=0$: \begin{enumerate} \item $\mathop{\rm dim}(W)=1$, $W=\mathop{\rm span}\{\phi_2\}$, and $\phi_2$ is radially symmetric; \item $\mathop{\rm dim}(W)=n$, $W=\mathop{\rm span}\{\psi_i\equiv \psi(|x|)x_i |x|^{-1}:i=1,2,\cdots,n\}$, where $x=(x_1,x_2,\cdots,x_n)$; \item $\mathop{\rm dim}(W)=n+1$, $W=\mathop{\rm span}[\{\psi_i\equiv \psi(|x|)x_i|x|^{-1}:i=1,2,\cdots,n\}\cup \{\phi_2(|x|)\}]$. \end{enumerate} For example, for $V(x)\equiv 1$, it is well-known that the second case above occurs, {\it i.e.} all eigenfunctions are asymmetric with respect to a hyperplane through the origin. Indeed, we can define $\lambda_i^R$ to be the $i$-th eigenvalue with radially symmetric eigenfunction, and $\lambda_i^n$ to be the $i$-th eigenvalue with non-radial eigenfunction. Then $\lambda_1=\lambda_1^R$, and $\lambda_2=\lambda_2^R<\lambda_1^n$ or $\lambda_2=\lambda_1^n<\lambda_2^R$ or $\lambda_2=\lambda_2^R=\lambda_1^n$ corresponds to one of three cases above. In all three cases, when $\lambda\in (\lambda_1,\lambda_2)$, \eqref{3.1} has a unique solution $u(x)$ which is also radially symmetric. Thus the solution $u$ satisfies \begin{equation}\label{3.3} \begin{gathered} (r^{n-1}u')'+r^{n-1}K(r)u+\lambda r^{n-1}V(r)u=r^{n-1}f(r), \quad r\in (0,1), \\ u'(0)=u(1)=0. \end{gathered} \end{equation} We define $\lambda^*$ to be the principal eigenvalue of the equation \begin{equation}\label{3.4} \begin{gathered} (r^{n-1}\varphi')'+r^{n-1}K(r)\varphi+\lambda r^{n-1}V(r)\varphi=0, \quad r\in (0,1), \\ \varphi(0)=\varphi(1)=0. \end{gathered} \end{equation} Then by applying Theorem \ref{thm:6.3}, we obtain the following result. \begin{theorem} \label{thm:3.1} Suppose that (B1)-(B4) are satisfied. Let $u(\lambda,x)$ be the unique (radially symmetric) solution of \eqref{3.1} for $\lambda\in (\lambda_1,\lambda^*]$, and let $\psi$ be the unique radially symmetric solution of the linear equation \begin{equation} \label{3.5} \begin{gathered} \Delta \psi +K(x)\psi+\lambda V(x)\psi =0, \quad x\in B^n, \\ \psi(0)= 1, \quad \nabla \psi(0)=0. \end{gathered} \end{equation} Then \eqref{2.1} satisfies the anti-maximum principle for $f$ at $\lambda \in (\lambda_1,\lambda^*]$ if and only if \begin{equation}\label{3.6} \int_{B^n} \psi(x) f(x) dx\ge 0. \end{equation} Moreover $\nabla u<0$ on $\partial\Omega$ if $\int_{B^n} \psi(x) f(x) dx>0$, and $\nabla u=0$ on $\partial\Omega$ if \\ $\int_{B^n} \psi(x) f(x) dx=0$. \end{theorem} \begin{proof} We consider the boundary-value problem: \begin{equation}\label{3.7} \begin{gathered} (p(r)u')'+s(r)u+\lambda q(r)u=r^{n-1}f(r), \quad r\in (-1,1), \\ u(-1)=u(1)=0, \end{gathered}\end{equation} where \begin{equation}\label{3.8} \begin{gathered} p(r)=\begin{cases} r^{n-1}, & r\in [0,1]; \\ (-r)^{n-1}, & r\in [-1,0]. \\ \end{cases}\\ s(r) = \begin{cases} r^{n-1}K(r), & r\in [0,1]; \\ (-r)^{n-1}K(r), & r\in [-1,0]. \\ \end{cases} \\ q(r) = \begin{cases} r^{n-1}V(r), & r\in [0,1]; \\ (-r)^{n-1}V(r), & r\in [-1,0]. \\ \end{cases} \end{gathered} \end{equation} Then the proof of Theorem \ref{thm:6.3} can be carried over although $p(0)=q(0)=0$. \end{proof} Note that $\lambda^*$ is not an eigenvalue associated with the PDE operator $\Delta+K(x)+\lambda V(x)$. In fact, $\lambda^*$ is the second eigenvalue of the homogeneous problem associated with \eqref{3.7}: \begin{equation}\label{3.9} \begin{gathered} (p(r)\phi')'+s(r)\phi+\lambda q(r)\phi=0, \quad r\in (-1,1), \\ \phi(-1)=\phi(1)=0, \end{gathered} \end{equation} while $\lambda_2^R$ is the third eigenvalue of \eqref{3.9}. Thus $\lambda^*<\lambda_2^R$. On the other hand it is well-known (see \cite{LN}) that the eigenfunction $\theta$ corresponding to $\lambda_1^n$ is of form $\eta (r)\cdot (x_i/|x|)$, and $\eta$ satisfies \begin{equation}\label{3.10} \begin{gathered} (r^{n-1}\eta')'+r^{n-1}K(r)\eta+\lambda r^{n-1}V(r)\eta-(n-1)r^{n-2}\eta=0, \quad r\in (0,1), \\ \eta(0)=\eta(1)=0. \end{gathered} \end{equation} Comparing the variational characterization of $\eta>0$ \eqref{3.10} and $\varphi>0$ \eqref{3.4}, we can see that $\lambda^*<\lambda_1^n$. Thus $\lambda^*<\lambda_2=\min (\lambda_2^R,\lambda_1^n)$. It would be an interesting question whether the anti-maximum principle holds for all $\lambda\in (\lambda_1,\lambda_2)$ like the one-dimensional case. \section{Applications to Fishery Management Problems} In this section, we consider the equation: \begin{equation}\label{4.1} \begin{gathered} u''+au-bu^2-c h(x)=0, \quad x\in (-1,1), \\ u(-1)=u(1)=0, \end{gathered} \end{equation} where $a>0$, $b\ge 0$, $c>0$, and $h(x)(\not\equiv 0)$ is an even non-negative function on $[-1,1]$ such that $\max_{x\in [-1,1]}h(x)=1$. First as a direct application of the result in Section 2, we consider the case when $b=0$. Then the solution $u$ gives the asymptotic spatial distribution of the fish population, when the population has a uniform Malthus growth, and the population is harvested at a constant rate $ch(x)$. In this case, when $a\ne \lambda_i$, then \eqref{4.1} has a unique solution $u$, and we are interested in the question whether $u$ is positive. If that is the case, then it is not hard to show that for any sufficiently large initial population $u_0$, the population will approach this equilibrium distribution when $t\to\infty$, thus the population will not become extinct. From Theorem \ref{thm:6.3}, we have the following result. \begin{proposition}\label{pro:4.1} Suppose that $a\in (\lambda_1,\lambda_2)$ and $b=0$, and $h(x)$ is an even function. Then the unique solution $u(x)$ of \eqref{4.1} is positive in $(-1,1)$ if and only if \begin{equation}\label{4.2} \int_{-1}^{1} h(x) \cos(\sqrt{a}x) dx=2\int_{0}^{1} h(x) \cos(\sqrt{a}x) dx\ge 0. \end{equation} \end{proposition} In particular, when $u(x)$ is positive, \begin{equation}\label{4.3} h\in C^1[-1,1]\quad\mbox{and}\quad h'(x)\le 0\quad\mbox{for }x\in (0,1). \end{equation} The assumption \eqref{4.3} is reasonable in fishery business since fishermen tend to catch more fish from the interior part of the habitat, where the fish has higher population density. Note that $u(x)$ must also be an even function because of the uniqueness of solution. Next we consider the case when $a\in (\lambda_1,\lambda_2)$, $b>0$ (logistic growth) and \eqref{4.3} is satisfied. Suppose that $u$ is a non-negative solution of \eqref{4.1}. Then $u$ is \textit{stable} if all eigenvalues $\mu_i(u)$ of \begin{equation}\label{4.4} \varphi''+(a-2u) \varphi=-\mu \varphi, \quad \varphi(-1)=\varphi(1)=0, \;\; \end{equation} are positive, and otherwise it is \textit{unstable}. For a unstable solution $u$, the \textit{Morse index} $M(u)$ is defined as the number of negative eigenvalues of \eqref{4.4}. It is well-known that the eigenvalues $\mu_i(u)$ can be rearranged into an increasing order: $\mu_1<\mu_2<\mu_3<\cdots\to\infty$. A solution $u$ of \eqref{4.1} is \textit{degenerate} if $\mu_i(u)=0$ for some integer $i$, and otherwise it is \textit{non-degenerate}. Our main result reads as follows. \begin{theorem} \label{thm:4.1} Suppose that $b>0$, $a\in (\lambda_1, \lambda_2)$, $h(-x)=h(x)$ and $h(x)\ge 0$ for $x\in [0,1]$, and we assume that \eqref{4.3} holds. Then there exists $c_2>0$ such that \begin{enumerate} \item \eqref{4.1} has exactly two positive solutions $u_1(\cdot,c)$ and $u_2(\cdot,c)$ for $c\in [0,c_2)$, exactly one positive solution $u_1(\cdot,c)$ for $c=c_2$, and no non-negative solution for $c>c_2$; \item $u_i(c,-x)=u_i(c,x)$ and $\partial_x u_i (c,x)<0$ for $c\in (0,c_2]$, $x\in (0,1]$ and $i=1,2$; \item The Morse index $M(u_1(\cdot,c))=0$ (stable) and $M(u_2(\cdot,c))=1$, $c\in [0,c_2)$, $u_1(\cdot,c_2)$ is degenerate with $\mu_1(u_1(\cdot,c_2))=0$; \item All solutions lie on a smooth curve $\Sigma$. On $(c,u)$ space, $\Sigma$ starts from $(0,0)$, continues to the right, reaches the unique turning point at $c=c_2$ where it turns back, then continues to the left without any turnings until it reaches $(0,v_{a})$, where $v_{a}$ is the unique positive solution of \eqref{4.1} with $c=0$ (see Figure 1.) \end{enumerate} \end{theorem} To prove this theorem, we first prove some lemmas. \begin{lemma}\label{lem:4.2} Suppose that $b>0$, $a\in (\lambda_1, \lambda_2)$, $h(-x)=h(x)$ and $h(x)\ge 0$ for $x\in [0,1]$, and $u$ is a non-negative solution of \eqref{4.1}. Then \begin{enumerate} \item $\mu_2(u)>0$, thus the Morse index $M(u)$ is either $0$ or $1$; \item If $u$ is a degenerate solution, then $M(u)=0$ and the eigenfunction $w$ of $\mu_1(u)=0$ can be chosen as positive. \end{enumerate} \end{lemma} \begin{proof} From the variational characterization of $\mu_2(u)$: \begin{align*} \mu_2(u) &=\inf_{T}\sup_{\varphi\in T} \frac{\int_{-1}^{1}[\varphi'^2-(a-2u)\varphi^2] dx}{\int_{-1}^{1}\varphi^2}\\ &>\inf_{T}\sup_{\varphi\in T} \frac{\int_{-1}^{1}[\varphi'^2-a\varphi^2] dx}{\int_{-1}^{1}\varphi^2}=\lambda_2-a>0, \end{align*} where $T$ is any two dimensional subspace of $H^1_0[-1,1]$. If $u$ is a degenerate solution, then $\mu_1(u)=0$ since $\mu_2(u)>0$, and $w$ can be chosen as positive from the well-known result for the principal eigenfunction. \end{proof} \begin{lemma}\label{lem:4.3} Suppose that $b>0$, $a\in (\lambda_1, \lambda_2)$, $h(-x)=h(x)$ and $h(x)\ge 0$ for $x\in [0,1]$, $h(x)$ satisfies \eqref{4.3}, and $u$ is a non-negative even solution of \eqref{4.1}. Then either $u'(x)<0$ for $x\in (0,1]$ or there exists $x_1\in (0,1)$ such that $u'(x)\ge 0$ in $(0,x_1)$ and $u'(x)<0$ in $(x_1,1)]$. \end{lemma} \begin{proof} Since $\mu_2(u)>0$ from Lemma \ref{lem:4.2}, then similar to the proof of Theorem \ref{thm:6.3} about function $\psi$, we can show that the solution of \begin{equation}\label{4.7} \begin{gathered} \Psi''+(a-2u)\Psi =0, \quad r\in (-1,1), \\ \Psi(-1)=0, \quad \Psi'(-1)=k>0, \end{gathered} \end{equation} changes sign at most once in $(-1,1)$, $\Psi(1)>0$ if $u$ is stable, $\Psi(1)<0$ if $M(u)=1$, and $\Psi(1)=0$ if $\mu_1(u)=0$ (in that case, $\Psi=w$.) On the other hand, $u'$ satisfies \begin{equation}\label{4.8} \begin{gathered} (u')''+(a-2u)u'=ch'(x), \quad x\in (-1,1), \\ u'(0)=0. \end{gathered} \end{equation} At $x=1$, $u''(1)=ch(1)\ge 0$ and $u\ge 0$, thus $u'(x)\le 0$ on $(1-\delta,1]$ for some $\delta>0$. If $u'(x)\equiv 0$ for $(1-\delta_1,1]$ for some $\delta_1>0$, then $h(x)\equiv 0$ on $(1-\delta_1,1]$, and a contradiction can be reached by the Hopf boundary lemma. Thus we can assume that $u'(x)<0$ on $(1-\delta_1,1)$. Let $x_1$ be the first zero of $u'$ left of $x=1$. If $x_1=0$, then $u'(x)<0$ on $(0,1)$; if $u'(1)=0$, then $L(u')=-ch'\le 0$, where $L\phi=\phi''+(a-2u)\phi$, $u'<0$ on $(0,1)$, and $u'(0)=u'(1)=0$, thus by Lemma \ref{lem:6.2} (2), $\Psi$ has at least one zero in $(0,1)$. From the symmetry of $u$ and $h$, $\Psi$ has at least one zero in $(-1,0)$. Thus $\Psi$ has at least two zeros in $(-1,1)$. That is a contradiction. Thus $u'(1)<0$, and $u'(x)<0$ for $x\in (0,1]$. If $x_1>0$, then for the same reason above, $u'(1)<0$. If there exists another interval $(x_2,x_3)\subset (0,1)$ such that $u'(x)<0$ on $(x_2,x_3)$ and $u'(x_2)=u'(x_3)=0$, then on the interval $(x_2,x_3)$, $L(u')=-ch'\le 0$ and $u'\le 0$, by Lemma \ref{lem:6.2} (2), $\Psi$ has at least one zero in $(0,1)$ and we reach a similar contradiction as in the last paragraph. Hence $u'(x)\ge 0$ on $[0,x_1)$. \end{proof} \begin{proof}[Proof of Theorem \ref{thm:4.1}] From \cite{OSS} page 3610 Theorem 3.2, there exists $c_2>0$ such that \eqref{4.1} has a maximum solution $u_1(c,x)$ for $c\in (0,c_2)$. Moreover from the proof of Theorem 3.2 in \cite{OSS}, $\Sigma_1=\{(c,u_1(c,\cdot)):c\in (0,c_2)\}$ is a smooth curve and $\partial_c u_1(c,x)<0$ for $x\in (-1,1)$. On the other hand, from \cite{OSS} Theorem 3.3, \eqref{4.1} has a second solution $u_2(c,x)$ for $c\in (0,c_3)$ for some $c_30$ for $x\in (-1,1)$. Hence $u_2(c,x)=cw(x)+o(|c|)>0$ for $c\in (0,c_3)$ and $x\in (-1,1)$. We can use the implicit function theorem to continue the solution branch $\Sigma_2=\{(c,u_2(c,x):c\in (0,c_3)\}$ as long as the linearized operator $\phi\mapsto\phi''+(a-2u_2)\phi$ is non-degenerate. Suppose $\Sigma_2$ can be extended to $c=c_4>c_3$ such that $u_2(c,x)$ is non-degenerate for $c\in (0,c_4)$. We claim that $u_2(c,x)>0$ for $c\in (0,c_4)$ and $x\in (-1,1)$ and $\partial_x u_2(c,x)\ne 0$ when $x=\pm 1$. The function $\partial_c u_2(c,x)$ satisfies the equation \begin{equation}\label{4.11} \begin{gathered} v''+[a-2u_2(c,\cdot)]v=h(x), \quad x\in (-1,1), \\ v(-1)=v(1)=0. \end{gathered} \end{equation} The Morse index $M(u_2(c,\cdot))=M(u_2(0,\cdot))=1$ from the implicit function theorem. Thus all conditions of Theorem \ref{thm:6.3} except \eqref{2.2c} are satisfied for \eqref{4.11} since $\lambda=0\in (\lambda_1,\lambda_2)$ and \eqref{4.3}. Although Theorem \ref{thm:6.3} cannot be applied here since the integral condition is hard to check, as long as $\lambda\in (\lambda_1,\lambda_2)$, the set $I_+=\{x:\partial_c u_2(c,x)>0\}$ must be one of the two cases (b) or (c) listed in the proof of Theorem \ref{thm:6.3}. In either case, $0\in I_+$ and thus $\partial_c u_2(c,0)>0$ for all $c\in (0,c_4)$. In particular, $u_2(c,0)>0$ for all $c\in (0,c_4)$. Suppose that $u_2(c,x)>0$ is not true for some $c\in (0,c_4)$ and $x\in (-1,1)$, then $c_5=\inf\{c>0:u_2(c,x)\le 0 \text{ for some $x$} \}>0$ since $u_2(c,x)>0$ for $x\in (-1,1)$ and $c\in (0,c_3)$. At $c=c_5$, either there exists $x_1\in (-1,1)$ such that $u_2(c_5,x_1)=0$ or $\partial_x u_2(c_5,\pm 1)=0$. The latter case cannot happen from Lemma \ref{lem:4.3} since $u_2(c_5,x)$ is a non-negative solution of \eqref{4.1}. In the former case, it can only happen when $x=0$ is a local minimum of $u_2(c_5,\cdot)$ from Lemma \ref{lem:4.3}, thus $x_1=0$. But this cannot happen since we show that $u_2(c,0)>0$ for all $c\in (0,c_4)$. Therefore such $c_5$ does not exist, and the claim holds. At $c=c_4$, $u_2(c_4,x)=\lim_{c\to c_4^-}u_2(c,x)>0$ exists for $x\in (-1,1)$. From the Schauder estimates, we can show that the $u_2(c,\cdot)\to u_2(c_4,\cdot)$ in $C^2[-1,1]$, thus $u_2(c_4,\cdot)$ is a non-negative solution of \eqref{4.1} when $c=c_4$. Moreover, since $\partial_x u_2(c,1)< 0$, then $u_2(c_4,x)>0$. From the definition of $c_4$, $u_2(c_4,x)$ is degenerate, and from Lemma \ref{lem:4.2}, $\mu_1(u_2(c_4,x))=0$ and the principal eigenfunction $w$ can be assumed to be positive. Hence a bifurcation theorem of Crandall-Rabinowitz \cite{CR} can be applied here as in the proof of Theorem 3.2 in \cite{OSS}, and the solution curve near $u_2(c_4,\cdot)$ can be written as $(c(s),u(s,\cdot))$ for $s\in (-\delta,\delta)$, $c(s)=c_4+c''(0)s^2+o(s^2)$, $u(s)=u_2(c_4,\cdot)+sw+o(|s|)$, and \begin{equation}\label{4.12} c''(0)=-\frac{2b\int_{-1}^1 w^3(x) dx}{\int_{-1}^1 h(x) w(x) dx}<0. \end{equation} Thus the solution continuum which contains $\Sigma_2$ (which we will still call $\Sigma_2$) is a curve which turns at $(c_4,u_2(c_4,\cdot))$. For $c\in (c_4-\delta_1,c_4)$, \eqref{4.1} has another solution $u_3(c,\cdot)$ on $\Sigma_2$. We denote $\Sigma_2^-=\{(c,u_2(c,\cdot):c\in (0,c_4)\}$, and $\Sigma_2^+=\{ (c,u_3(c,\cdot):c\in (c_4-\delta_1,c_4)\}$. $\Sigma_2^+$ can also be extended via the implicit function theorem, and from the change of stability theorem in \cite{CR}, $u_3$ is stable ($\mu_1(u_3)>0$). In fact $\Sigma_2^+$ can be extended for all $c\in (0,c_4)$ with $u_3$ always non-degenerate and stable. Suppose not, there there is $c_6\in (0,c_4)$ such that $u_3(c_4,\cdot)$ is degenerate. Then $\mu_1(u_2(c_4,x))=0$ and $w>0$ at $(c_6,u_3(c_6,\cdot))$, thus the bifurcation theorem can be applied, and \eqref{4.12} holds, but $(c_6,u_3(c_6,\cdot))$ cannot be a minimum of the solution curve since the continuation is from right to left. Hence $\Sigma_2^+$ can be extended to $\{ (c,u_3(c,\cdot):c\in (0,c_4)\}$ and also $c=0$. Moreover we can show that $\partial_c u_3(c,x)<0$ from the proof of \cite{OSS} Theorem 3.2 since $u_3$ is stable. Thus $u_3(0,x)=\lim_{c\to 0^+} u_3(c,x)$ is a non-negative solution of \eqref{4.1} when $c=0$ which is the classical logistic equation. It is well-known that \eqref{4.1} has a unique nonnegative (positive indeed) solution $u_1$ when $c=0$ (see \cite{OSS} Section 2.3), and the branch $\Sigma_1$ emanates from $(0,u_1)$. Therefore $\Sigma_2^+$ must be coincident to $\Sigma_1$, $c_2=c_4$, and $u_1(c,x)=u_3(c,x)$ for $c\in (0,c_2)$. If there is any other solution for $c\in (0,c_2)$, then the same continuation and bifurcation arguments above. But \eqref{4.1} has only two non-negative solutions $u_1$ and $0$ when $c=0$, so no any other solution exist. This concludes the proof. \end{proof} \subsection*{Remarks} (1) The results in Theorem \ref{thm:6.3} hold for a general smooth domain $\Omega$ in $\mathbb{R}^n$ and all $f\ge 0$, but only for $a\in (\lambda_1,\lambda_1+\delta_f)$. That is proved in \cite{OSS}. \\ (2) We point out that although $h(x)$ is an even function, the solution $u(x)$ of \eqref{4.1} may not satisfy $u'(x)<0$ for $x\in (0,1)$ as in the classical Gidas-Ni-Nirenberg \cite{GNN} since $h'(x)\le 0$ on $(0,1)$. The result in \cite{GNN} holds when $h'(x)\ge 0$, thus $u_1$ or $u_2$ may be the two-peak solution described in Lemma \ref{lem:4.3}. \subsection*{Acknowledgement} This research is partially supported by NSF grant DMS-0314736, College of William and Mary summer research grants, and a grant from Science Council of Heilongjiang Province, China. \begin{thebibliography}{00} \bibitem{AG} Arcoya, David and G\'amez, Jos\'e L., {\it Bifurcation theory and related problems: anti-maximum principle and resonance}. Comm. Partial Differential Equations, {\bf 26} (2001), no. 9-10, 1879--1911. \bibitem{B} Birindelli, Isabeau, {\it Hopf's lemma and anti-maximum principle in general domains}. J. Differential Equations, {\bf 119} (1995), no. 2, 450--472. \bibitem{CP} Cl\'{e}ment, Ph. and Peletier, L. A., {\it An anti-maximum principle for second-order elliptic operators}. J. Differential Equations, {\bf 34} (1979), no. 2, 218--229. \bibitem{CS} Cl\'ement, Ph. and Sweers, G., {\it Uniform anti-maximum principles}. J. Differential Equations, {\bf 164} (2000), no. 1, 118--154. \bibitem{CR}Crandall, Michael G. and Rabinowitz, Paul H., {\it Bifurcation, perturbation of simple eigenvalues and linearized stability}. Arch. Rational Mech. Anal., {\bf 52} (1973), 161--180. \bibitem{GNN}Gidas, B.; Ni, Wei Ming and Nirenberg, L., {\it Symmetry and related properties via the maximum principle}, Comm. Math. Phys., {\bf 68} (1979), no. 3, 209--243. \bibitem{H} Hess, Peter, {\it An anti-maximum principle for linear elliptic equations with an indefinite weight function}. J. Differential Equations, {\bf 41} (1981), no. 3, 369--374. \bibitem{K} Korman, Philip, {\it Monotone approximations of unstable solutions}. J. Comput. Appl. Math., {\bf 136} (2001), no. 1-2, 309--315. \bibitem{LN}Lin, Chang-Shou; Ni, Wei-Ming, {\it A counterexample to the nodal domain conjecture and a related semilinear equation}, Proc. Amer. Math. Soc., {\bf 102} (1988), no. 2, 271--277. \bibitem{OSS} Oruganti, Shohba; Shi, Junping and Shivaji, Ratnasingham, {\it Diffusive Logistic equation with constant effort harvesting, I: Steady States}. Trans. Amer. Math. Soc., {\bf 354} (2002), no. 9, 3601--3619. \bibitem{OS}Ouyang, Tiancheng and Shi, Junping, {\it Exact multiplicity of positive solutions for a class of semilinear problem:II}. Jour. Diff. Equa., {\bf 158}, (1999), no. 1, 94--151. \bibitem{Sc} Schr\"oder, Johann, {\it Operator inequalities}. Mathematics in Science and Engineering, \textbf{147}. Academic Press, Inc. [Harcourt Brace Jovanovich, Publishers], New York-London, 1980. \bibitem{S} Shi, Junping, {\it Anti-maximum principle and bifurcation}, submitted. \bibitem{T} Tak\'a\v c, Peter, {\it An abstract form of maximum and anti-maximum principles of Hopf's type}. J. Math. Anal. Appl., {\bf 201} (1996), no. 2, 339--364. \end{thebibliography} \end{document} \vspace{0.1in} \begin{center} \begin{texdraw} \arrowheadtype t:F \arrowheadsize l:0.08 w:0.04 \def\Rtext #1{\bsegment \textref h:L v:C \htext ( 0.08 0){#1} \esegment} \def\Ltext #1{\bsegment \textref h:R v:C \htext ( -0.08 0){#1} \esegment} \def\Ttext #1{\bsegment \textref h:C v:B \htext (0 0.06){#1} \esegment} \def\Btext #1{\bsegment \textref h:C v:T \htext (0 -0.06){#1} \esegment} \move(1.4 0) \bsegment \move (0 0) \avec (2 0) \Rtext{$c$} \move (0 0) \avec (0 1.5) \Ttext{$u$} \move (0 0) \clvec (1.9 0.1)(1.8 1)(0 1.3) \move(0.8 0.2) \Ttext{$u_2(\cdot,c)$} \move(0.8 1) \Btext{$u_1(\cdot,c)$} \move (1.4 0) \Ttext{$c_2$}\move(0.9 -0.1) \Btext{Figure 1: Precise bifurcation diagram for $\lambda_1