\documentclass[reqno]{amsart} \usepackage{hyperref} \usepackage{graphicx} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2006(2006), No. 05, pp. 1--12.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2006 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2006/05\hfil Asymptotic profile] {Asymptotic profile of a radially symmetric solution with transition layers for an unbalanced bistable equation} \author[H. Matsuzawa\hfil EJDE-2006/05\hfilneg] {Hiroshi Matsuzawa} \address{Hiroshi Matsuzawa \hfill\break Numazu National College of Technology \\ Ooka 3600, Numazu-city, Shizuoka 410-8501, Japan} \email{hmatsu@numazu-ct.ac.jp} \date{} \thanks{Submitted August 31, 2005. Published January 11, 2006.} \subjclass[2000]{35B40, 35J25, 35J55, 35J50, 35K57} \keywords{Transition layer; Allen-Cahn equation; bistable equation; unbalanced} \begin{abstract} In this article, we consider the semilinear elliptic problem $$ -\varepsilon^{2}\Delta u=h(|x|)^2(u-a(|x|))(1-u^2) $$ in $B_1(0)$ with the Neumann boundary condition. The function $a$ is a $C^1$ function satisfying $|a(x)|< 1$ for $x\in [0,1]$ and $a'(0)=0$. In particular we consider the case $a(r)=0$ on some interval $I\subset [0,1]$. The function $h$ is a positive $C^1$ function satisfying $h'(0)=0$. We investigate an asymptotic profile of the global minimizer corresponding to the energy functional as $\varepsilon\to 0$. We use the variational procedure used in \cite{DS} with a few modifications prompted by the presence of the function $h$. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Propositon} \section{Introduction and Statement of Main Results} In this article, we consider the boundary value problem \begin{equation} \label{Pe} \begin{gathered} -\varepsilon^2\Delta u=h(|x|)^2(u-a(|x|))(1-u^2) \quad\text{in }B_1(0) \\ \frac{\partial u}{\partial \nu}=0 \quad\text{on } \partial B_1(0) \end{gathered} \end{equation} where $\varepsilon$ is a small positive parameter, $B_1(0)$ is a unit ball in $\mathbb{R}^N$ centered at the origin, and the function $a$ is a $C^1$ function on $[0,1]$ satisfying $-10$ small and $a(r)>1/2$ for $r-l_2>0$ small, then a global minimizer of the corresponding functional has a transition layer near the $l_1$, that is, the minimum point of $r^{N-1}$ on $I$ (see \cite[Theorem 1.3]{DS}). In this sense, we can say that our results are natural extension of the results in \cite{DS}. We are going to follow throughout the variational procedure used in \cite{DS} with a few modifications prompted by the presence of the function $h$. Here we state the energy functional, corresponding to \eqref{Pe}, \[ J_{\varepsilon}(u)=\int_{B_1(0)}\frac{\varepsilon^2}{2}|\nabla u|^2-F(|x|,u) dx, \] where $F(|x|,u)=\int_{-1}^u f(|x|,s)ds$ and $f(|x|,u)=h(|x|)^2(u-a(|x|))(1-u^2)$. It is easy to see that the following minimization problem has a minimizer \begin{equation}\label{mini} \inf\{J_{\varepsilon}(u)|u\in H^1(B_1(0))\}. \end{equation} Let $A_-=\{x\in B_1(0)|a(|x|)<0\}$ and $A_+=\{x\in B_1(0)|a(|x|)>0\}$. In this paper, we will analyze the profile of the minimizer of \eqref{mini}, and prove the following results. \begin{theorem} \label{thm1.1} Let $u_{\varepsilon}$ be a global minimizer of \eqref{mini}. Then $u_{\varepsilon}$ is radially symmetric and \[ u_{\varepsilon}\to \begin{cases} 1, &\mbox{uniformly on each compact subset of } A_-, \\ -1, &\mbox{uniformly on each compact subset of } A_+, \end{cases} \] as $\varepsilon\to 0$. In particular $u_{\varepsilon}$ converges uniformly near the boundary of $B_1(0)$, that is, if $a(r)<0$ on $[r_0,1]$ for some $r_0>0$, $u_{\varepsilon}\to 1$ uniformly on $\overline{B_1(0)}\backslash B_{r_0}(0)$ and if $a(r)>0$ on $[r_0,1]$ for some $r_0>0$, $u_{\varepsilon}\to -1$ uniformly on $\overline{B_1(0)}\backslash B_{r_0}(0)$. Moreover, for any $00$ small and for $r-r_2>0$ small, $a(r)=0$ if $r\in [r_1,r_2]$, we have: \begin{itemize} \item[(i)] If $a(r)<0$ for $r_1-r>0$ small and $a(r)>0$ for $r-r_2>0$, then for any small $\eta>0$ and for any small $\theta>0$, there exists a positive number $\varepsilon_0$ which has the following properties: \begin{itemize} \item[(a)] For all $\varepsilon\in (0, \varepsilon_0]$, there exist $t_{\varepsilon,1}1-\eta \quad \text{for } r\in [r_1-\theta,t_{\varepsilon,1}), \\ u_{\varepsilon}(t_{\varepsilon,1})=1-\eta, \\ u_{\varepsilon}(t_{\varepsilon, 2})=-1+\eta, \\ u_{\varepsilon}(r)<-1+\eta, \quad\text{for } r\in (t_{\varepsilon,2},r_2+\theta]. \end{gather*} \item[(b)] The function $u_{\varepsilon}(r)$ is decreasing on the interval $(t_{\varepsilon,1},t_{\varepsilon, 2})$ \item[(c)] The inequality $00$. \item[(d)] If $t_{\varepsilon_j,1}$, $t_{\varepsilon_j,2}\to \overline{t}$ for some positive sequence $\{\varepsilon_j\}$ converging to zero as $j\to\infty$, then $\overline{t}$ satisfies $h(\overline{t})\overline{t}^{N-1}=\min_{s\in [r_1, r_2]}h(s) s^{N-1}$. \end{itemize} \item[(ii)] If $a(r)>0$ for $r_1-r>0$ small and $a(r)<0$ for $r-r_2>0$, then for each small $\eta>0$ and for each small $\theta>0$, there exists a positive number $\varepsilon_0$ which has the following properties: For each $\varepsilon\in (0, \varepsilon_0]$, there exist $t_{\varepsilon,1}1-\eta, \quad\text{for } r\in (t_{\varepsilon,2},r_2+\theta]. \end{gather*} \item[(b)] The function $u_{\varepsilon}(r)$ is increasing in $(t_{\varepsilon,1},t_{\varepsilon, 2})$. \item[(c)] The inequality $00$. \item[(d)] If $t_{\varepsilon_j,1}$, $t_{\varepsilon_j,2}\to \overline{t}$ for some positive sequence $\{\varepsilon_j\}$ converging to zero as $j\to\infty$, then $\overline{t}$ satisfies $h(\overline{t})\overline{t}^{N-1}=\min_{s\in [r_1, r_2]}h(s) s^{N-1}$. \end{itemize} \end{itemize} \end{theorem} \begin{figure}[ht] \begin{center} \includegraphics[width=0.7\textwidth]{fig1} \end{center} \caption{Profile of the global minimizer $u_{\varepsilon}$} \end{figure} \subsection*{Remarks.} \begin{itemize} \item Note that results from (a) to (c) both in cases (i) and (ii) are not related to the presence of the function $h$. The effect of presence of function $h$ appears in the result (d) in (i) and (ii). \item If $\min_{s\in [r_1,r_2]}s^{N-1}h(s)$ is attained at a unique point $\overline{t}$, we can show $t_{\varepsilon,1}$, $t_{\varepsilon,2}\to \overline{t}$ as $\varepsilon\to 0$ without taking subsequences. \item If the function $r^{N-1}h(r)$ is constant on $[r_1, r_2]$, it is a very difficult problem to know the location of the point $\overline{t}\in [r_1,r_2]$. \end{itemize} This paper is organized as follows: In section 2, we present some preliminary results. In section 3, we prove the main theorem. \section{Preliminary Results} Let $D$ is a bounded domain in $\mathbb{R}^N$. Let $\overline{f}(x,t)$ be a function defined on $\overline{D}\times\mathbb{R}$ which is bounded on $\overline{D}\times[-1,1]$. Suppose $\overline{f}$ is continuous on $t\in\mathbb{R}$ for each $x\in\overline{D}$ and is measurable in $D$ for each $t\in\mathbb{R}$. We also assume \begin{equation}\label{comp1} \begin{gathered} \overline{f}(x,t)>0 \quad\text{for } x\in\overline{D},\; t<-1;\\ \overline{f}(x,t)<0 \quad\text{for } x\in\overline{D},\; t>1. \end{gathered} \end{equation} Consider the minimization problem \begin{equation}\label{comp2} \inf\left\{\overline{J}_{\varepsilon}(u,D) :=\int_{D}\frac{\varepsilon^2}{2}|\nabla u|^2-\overline{F}(x, u)dx: u-\eta\in H^1_0(D)\right\}, \end{equation} where $\eta\in H^1(D)$ with $-1\le \eta\le 1$ on $D$ and \[ \overline{F}(x,t)=\int_{-1}^t \overline{f}(x,s)ds. \] We can prove next two lemmas by methods similar to \cite{DS}. For the readers convenience, we prove these lemmas in this section. \begin{lemma}\label{complem1} Suppose that $\overline{f}(x,t)$ satisfies \eqref{comp1}. Let $u_{\varepsilon}$ be a minimizer of \eqref{comp2}. Then $-1\le u_{\varepsilon}\le 1$ on $D$. \end{lemma} \begin{proof} We prove $-1\le u_{\varepsilon}$ on $D$. Let $M=\{x:u_{\varepsilon}(x)<-1\}$. Define $\tilde{u}_{\varepsilon}$ by \[ \tilde{u}_{\varepsilon}(x)=\begin{cases} u_{\varepsilon}(x) & \text{if } x\in D\backslash M \\ -1 &\text{if } x\in M. \end{cases} \] Since $u_{\varepsilon}(x)=\eta\ge -1$ on $\partial D$, we see that $M$ is compactly contained in $D$. Thus $\tilde{u}-\eta\in H^1_0(D)$. If the measure $m(M)$ of $M$ is positive, we have $\overline{J}_{\varepsilon}(\tilde{u}_{\varepsilon}, D) <\overline{J}_{\varepsilon}(u_{\varepsilon},D)$. Because $u_{\varepsilon}$ is a minimizer, we see $m(M)=0$, where $m(A)$ denotes the Lebesgue measure of the set $A$. Thus $u_{\varepsilon}\ge -1$. Similarly we can prove that $u_{\varepsilon}\le 1$. \end{proof} \begin{lemma}\label{complem2} Suppose that $\overline{f}_1(x,t)$ and $\overline{f}_2(x,t)$ both satisfy \eqref{comp1} and the same regularity assumption on $\overline{f}$. Assume that $\eta_i\in H^1(D)$ satisfy $-1\le\eta_i\le 1$ on $D$ for $i=1,2$. Let $u_{\varepsilon,i}$ be a corresponding minimizer of {\rm \eqref{comp2}}, where $\overline{f}=\overline{f}_i$ and $\eta=\eta_i$, $i=1,2$. Suppose that $\overline{f}_1(x,t)\ge\overline{f}_2(x,t)$ for all $(x,t)\in\overline{D}\times [-1,1]$ and $1\ge \eta_1\ge \eta_2\ge -1$. Then $u_{\varepsilon,1}\ge u_{\varepsilon, 2}$. \end{lemma} \begin{proof} Let $M=\{x\in D:u_{\varepsilon, 2}>u_{\varepsilon,1}\}$. Define $\varphi_{\varepsilon}=(u_{\varepsilon,2}-u_{\varepsilon,1})^+$. Since $\eta_1\ge \eta_2$, we have $\varphi_{\varepsilon}\in H^1_0(D)$. Set $\overline{F}_i(x,u)=\int_{-1}^u \overline{f}_i(x,s)ds$. Since $u_{\varepsilon,i}$ is a minimizer of \[ J_{\varepsilon,i}(u):=\int_{D}\frac{\varepsilon^2}{2}|\nabla u|^2 -\overline{F}_i(x,u)dx \] and $\varphi_{\varepsilon}=0$ for $x\in D\backslash M$, we have \begin{align*} 0 &\le J_{\varepsilon, 1}(u_{\varepsilon, 1} +\varphi_{\varepsilon})-J_{\varepsilon, 1}(u_{\varepsilon, 1}) \\ & = \int_{M}\frac{\varepsilon^2}{2}(|\nabla (u_{\varepsilon, 1} +\varphi_{\varepsilon})|^2-|\nabla u_{\varepsilon,1}|^2)dx -\int_{M}\int_{u_{\varepsilon,1}}^{u_{\varepsilon,1} +\varphi_{\varepsilon}}\overline{f}_1(x,s)ds \\ & \le \int_{M}\frac{\varepsilon^2}{2}(|\nabla (u_{\varepsilon, 1} +\varphi_{\varepsilon})|^2-|\nabla u_{\varepsilon,1}|^2)dx -\int_{M}\int_{u_{\varepsilon,1}}^{u_{\varepsilon,1} +\varphi_{\varepsilon}}\overline{f}_2(x,s)ds \\ & = J_{\varepsilon,2}(u_{\varepsilon,2})-J_{\varepsilon,2}(u_{\varepsilon,2} -\varphi_{\varepsilon})\le 0. \end{align*} This implies that $u_{\varepsilon, 1}+\varphi_{\varepsilon}$ is also a minimizer of $J_{\varepsilon,1}(u)$. Let $L>0$ be large enough such that $\overline{f}_1(x,t)+Lt$ is strictly increasing for $x\in\overline{D}$, $t\in[-1,1]$. From \[ -\varepsilon^2\Delta (u_{\varepsilon,1}+\varphi_{\varepsilon}) =\overline{f}_1(u_{\varepsilon,1}+\varphi_{\varepsilon}), \] we obtain \[ -\varepsilon^2\Delta\varphi_{\varepsilon} =\overline{f}_1(u_{\varepsilon,1}+\varphi_{\varepsilon}) -\overline{f}_1(u_{\varepsilon,1}). \] Thus \[ -\varepsilon^2\Delta\varphi_{\varepsilon}+L\varphi_{\varepsilon} =\overline{f}_1(u_{\varepsilon,1}+\varphi_{\varepsilon}) +L(u_{\varepsilon,1}+\varphi_{\varepsilon}) -(\overline{f}_1(u_{\varepsilon,1})+Lu_{\varepsilon,1})>0 \] in $D$. Fix $z_0\in M$. Let $x_0\in\partial M$ such that $|x_0-z_0|={\rm dist}(z_0,\partial M)$. Using the Strong maximum principle and Hopf's lemma in $B_{{\rm dist}(z_0, \partial M)}(z_0)$, we obtain that $\frac{\partial\varphi_{\varepsilon}}{\partial\nu}(x_0)<0$, where $\nu=(x_0-z_0)/|x_0-z_0|$. But $\varphi_{\varepsilon}(x)=0$ for $x\notin M$. Thus, $\frac{\partial\varphi_{\varepsilon}}{\partial\nu}(x_0)=0$. This is a contradiction. Thus we obtain $M=\emptyset$. \end{proof} \section{Proof of Main Theorem} To prove Theorem \ref{thm1.1}, the following proposition is used as the first step. \begin{proposition} \label{prop3.1} Let $u_{\varepsilon}$ be a global minimizer of the problem \eqref{mini}. Then $u_{\varepsilon}$ satisfies \[ u_{\varepsilon}\to \begin{cases} 1 & \mbox{uniformly on each compact subset of } A_- \\ -1 & \mbox{uniformly on each compact subset of } A_+ \end{cases} \] as $\varepsilon\to 0$. \end{proposition} \begin{proof} Let $x_0\in A_-$. Choose $\delta>0$ small so that $B_{\delta}(x_0)\subset\subset A$. Take $b\in (\max_{z\in\overline{B_{\delta}(x_0)}}a(z), 1/2)$. Define $f_{x_0,\delta,b}(t)=(\min_{z\in B_{\delta}(x_0)} h(|z|)^2)(t-b)(1-t^2)$. Then for $x\in \overline{B_{\delta}(x_0)}$, $t\in [-1, 1]$, we have $f(|x|,t)\ge f_{x_0,\delta,b}(t)$. Let $u_{\varepsilon,x_0,\delta,b}$ be the minimizer of \[ \inf\Big\{\int_{B_{\delta}(x_0)}\frac{\varepsilon^2}{2}|\nabla u|^2-F_{x_0, \delta,b}(u)dx: u+1\in H^1_0(B_{\delta}(x_0))\Big\}, \] where $F_{x_0,\delta,b}(t)=\int_{-1}^t f_{x_0,\delta,b}(s)ds$. It follows from Lemmas \ref{complem1} and \ref{complem2} that \[ u_{\varepsilon, x_0, \delta,b}(x)\le u_{\varepsilon}(x)\le 1, \quad \text{for } x\in B_{\delta}(x_0). \] Since $\int_{-1}^1 f_{x_0,\delta,b}(s)ds>0$, it follows from \cite{CP,CS} that $u_{\varepsilon,x_0,\delta,b}(x)\to 1$ as $\varepsilon\to 0$ uniformly in $B_{\delta/2}(x_0)$, thus $u_{\varepsilon}(x)\to 1$ as $\varepsilon \to 0$ uniformly in $B_{\delta/2}(x_0)$. \end{proof} To prove the rest of Theorem \ref{thm1.1}, we need the following proposition and lemma. \begin{proposition}\label{rad} Let $u$ be a local minimizer of the problem \[ \inf\Big\{\int_{B_1(0)}\frac{1}{2}|\nabla u|^2-G(|x|,u)dx: u\in H^1(B_1(0))\Big\}. \] Here $G(r,t)=\int_{-1}^t g(r,s)ds$, $g(r,t)$ is $C^1$ in $t\in\mathbb{R}$ for each $r\ge 0$, $g(r,t)$ and $g_t(r,t)$ are measurable on $[0,+\infty)$ for each $t\in\mathbb{R}$, $g(r,t)<0$ if $t<-1$ or $t>1$ and $|g(r,t)|+|g_t(r,t)|$ is bounded on $[0, k]\times [-2,2]$ for any $k>0$. Then $u$ is radial, i.e., $u(x)=u(|x|)$. \end{proposition} The proof of the above proposition can be found in \cite[Proposition 2.6]{DS}. \begin{lemma}\label{seed3} Let $0<\eta<1$ be any fixed constant and $w$ satisfies \begin{gather*} -w_{zz}=w(1-w^2) \quad \text{on } \mathbb{R}, \\ w(0)=-1+\eta\quad \mbox{(resp. $w(0)=1-\eta$)}, \\ w(z)\le -1+\eta\quad\mbox{(resp. $w(z)\ge 1-\eta$)} \quad \text{for }z\le 0, \\ w \mbox{ is bounded on } \mathbb{R}. \end{gather*} Then $w$ is a unique solution of \begin{gather*} -w_{zz}=w(1-w^2) \quad\text{on } \mathbb{R}, \\ w(0)=-1+\eta\quad\mbox{(resp. $w(0)=1-\eta$)}, \\ w'(z)>0\quad \mbox{(resp. $w'(z)<0$)}\quad z\in\mathbb{R}, \\ w(z)\to\pm 1\quad\mbox{(resp. $w(z)\to \mp 1$)} \quad\text{as } z\to\pm\infty. \end{gather*} \end{lemma} The proof of the above lemma can be found in \cite{Ma}. Now we prove the rest of Theorem \ref{thm1.1}. \begin{proof}[Proof of Theorem \ref{thm1.1}] For the sake of simplicity, we prove for the case where $a(r)<0$ on $[0,r_1)$, $a(r)=0$ on $[r_1, r_2]$ and $a(r)>0$ on $(r_2,1]$ for some $00$. We note that we have $u_{\varepsilon}\to -1$ uniformly on $\overline{B_{1-\tau}(0)}\backslash B_{r_2+\tau}(0)$ as $\varepsilon\to 0$. Now we claim that $u_{\varepsilon}(r)\le u_{\varepsilon}(1-\tau)=:T_{\varepsilon}$ for $r\in [1-\tau, 1]$. We define the function $\tilde{u}_{\varepsilon}$ by \[ \tilde{u}_{\varepsilon}(r)=\begin{cases} u_{\varepsilon}(r) & \mbox{if $r\in [0, 1-\tau]$} \\ u_{\varepsilon}(r) & \mbox{if $u_{\varepsilon}(r)0$ and $|r-1|$ small and $t\ge T_{\varepsilon}$. Hence we obtain $J_{\varepsilon}(\tilde{u}_{\varepsilon})T_{\varepsilon}\ {\rm and}\ r\in [1-\tau, 1]\}$ is positive. Hence $-1t_1$, $u_{\varepsilon}$ is a minimizer of the following problem \[ \inf\{J_{\varepsilon}(u, B_{t_2}(0)\overline{\backslash B_{t_1}(0)}): u-u_{\varepsilon}\in H^1_0(B_{t_2}(0)\overline{\backslash B_{t_1}(0)})\}, \] where \[ J_{\varepsilon}(u,M)=\int_M\frac{\varepsilon^2}{2}|\nabla u|^2-F(|x|,u)dx \] for any open set $M$. Let $m_{\varepsilon,t_1,t_2}$ be the minimum value of this minimization problem. In this part we show that $u_{\varepsilon}$ has exactly one layer near the interval $[r_1, r_2]$. \smallskip \noindent{\bf Step 2.1.} First we estimate the energy of transition layer. Let $\eta>0$ and $\theta>0$ be small numbers. Since $u_{\varepsilon}\to 1$ uniformly on $[0, r_1-\theta]$ and $u_{\varepsilon}\to -1$ uniformly on $[r_2+\theta,1-\theta]$, we can find $\overline{r}_{\varepsilon}\in (r_1-\theta, r_2+\theta)$ such that $u_{\varepsilon}(r)\ge 1-\eta$ if $r\in [0, \overline{r}_{\varepsilon}]$, $u_{\varepsilon}(r)<1-\eta$ for $r-\overline{r}_{\varepsilon}>0$ small. Let $\tilde{r}_{\varepsilon}>\overline{r}_{\varepsilon}$ be such that $u_{\varepsilon}(r)\le\eta$ if $r\in [\tilde{r}_{\varepsilon},1-\theta]$, $u_{\varepsilon}(r)>\eta$ for $\tilde{r}_{\varepsilon}-r>0$ small. We may assume that $\overline{r}_{\varepsilon}\to \overline{r}\in [r_1, r_2]$ and $\tilde{r}_{\varepsilon}\to\tilde{r}\in[r_1,r_2]$ We employ the so-called blow-up argument. Let $v_{\varepsilon}(t)=u_{\varepsilon}(\varepsilon t+\overline{r}_{\varepsilon})$. Then \[ -v_{\varepsilon}''-\varepsilon\frac{N-1}{\varepsilon t+\overline{r}_{\varepsilon}}v_{\varepsilon}'=f(\varepsilon t+\overline{r}_{\varepsilon},v_{\varepsilon}), \] $-1\le v_{\varepsilon}\le 1$ and $v_{\varepsilon}(0)=1-\eta$. Since $\overline{r}_{\varepsilon}\to\overline{r}\in [r_1, r_2]$, it is easy to see that $v_{\varepsilon}\to v$ in $C^1_{\rm loc}(\mathbb{R})$ and \[ -v''=h(\overline{r})^2(v-v^3),\quad t\in\mathbb{R}. \] and $v(t)\ge 1-\eta$ for $t\le 0$. If we set $v(t)=V(h(\overline{r})t)$, the function $V(t)$ satisfies \begin{equation} \begin{gathered} -V''=V-V^3 \quad \text{on } \mathbb{R}, \\ V(0)=1-\eta, \\ V'(t)\ge 1-\eta \quad t\le 0. \end{gathered} \end{equation} Hence by Lemma \ref{seed3}, the function $V$ is a unique solution for \begin{equation}\label{seed} \begin{gathered} -V''=V-V^3 \quad\text{on } \mathbb{R}, \\ V(0)=1-\eta, \\ V'(t)<0 \quad t\le 0. \\ V(t)\to \pm 1 \quad\text{as } t\to\mp\infty. \end{gathered} \end{equation} Thus, we can find an $R>0$ large, such that $v(R)=\eta$. Since $v_{\varepsilon}\to v$ in $C^1_{\rm loc}(\mathbb{R})$, we can find an $R_{\varepsilon}\in (R-1, R+1)$, such that $v_{\varepsilon}'(r)<0$ if $r\in [0, R_{\varepsilon}]$ and $v_{\varepsilon}(R_{\varepsilon})=-1+\eta$. Hence $u_{\varepsilon}'(r)<0$ if $r\in[\overline{r}_{\varepsilon}, \overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon}]$ and $u_{\varepsilon}(\overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon})=-1+\eta$. Then we have \begin{equation}\label{1lay} \begin{aligned} & J_{\varepsilon}(u_{\varepsilon}, B_{\overline{r}_{\varepsilon} +\varepsilon R_{\varepsilon}}(0)\backslash \overline{B_{\overline{r}_{\varepsilon}}(0)}) \\ &=\omega_{N-1}(\overline{r}_{\varepsilon}^{N-1}+o_{\varepsilon}(1)) \int_{\overline{r}_{\varepsilon}}^{\overline{r}_{\varepsilon} +\varepsilon R_{\varepsilon}}\left(\frac{\varepsilon^2}{2}|u_{\varepsilon}'|^2 -F(t, u_{\varepsilon})\right)dt \\ &=\omega_{N-1}(\overline{r}_{\varepsilon}^{N-1}+o_{\varepsilon}(1)) \varepsilon\int_0^{R_{\varepsilon}}\left(\frac{1}{2}|v_{\varepsilon}'|^2 -F(\varepsilon t+\overline{r}_{\varepsilon}, v_{\varepsilon})\right)dt \\ &=\omega_{N-1}(\overline{r}_{\varepsilon}^{N-1}+o_{\varepsilon}(1)) (\beta_{h(\overline{r})}+O(\eta)+o_{\varepsilon}(1))\varepsilon, \end{aligned} \end{equation} where $\omega_{N-1}$ is the area of the unit sphere in $\mathbb{R}^N$, $o_{\varepsilon}(1)\to 0$ as $\varepsilon\to 0$, $\beta_{h(s)}$ is the positive value defined by \begin{align*} \beta_{h(s)} &=\int_{-\infty}^{+\infty} \Big(\frac{1}{2}|w_{h(s)}'(t)|^2+h(s)^2\frac{(w_{h(s)}^2-1)^2}{4}\Big)dt \\ &=h(s)\int_{-\infty}^{+\infty}\frac{1}{2}|V'(t)|^2+\frac{(V(t)^2-1)^2}{4}dt \\ &=h(s)\beta_1 \end{align*} and $w_{h(s)}(t)=V(h(s)t)$ for $s\in[0,1]$. We note that although the function $V$ depends on $\eta$, the value \[ \beta_1=\int_{-\infty}^{+\infty}\frac{1}{2}|V'(t)|^2+\frac{(V(t)^2-1)^2}{4}dt \] is independent of $\eta$. \smallskip \noindent{\bf Step 2.2.} We claim $u_{\varepsilon}$ has exactly one layer near the interval $[r_1, r_2]$. To show $u_{\varepsilon}$ has exactly one layer near the interval $[r_1, r_2]$, it sufficient to prove the following claim \noindent{\bf Claim.} $\tilde{r}_{\varepsilon}=\overline{r}_{\varepsilon} +\varepsilon R_{\varepsilon}$. Suppose that the claim is not true. Then we can find a $t_{\varepsilon}>\overline{r}_{\varepsilon}+R_{\varepsilon}\varepsilon$ such that $u_{\varepsilon}(r)<-1+\eta$ if $r\in (\overline{r}_{\varepsilon}+R_{\varepsilon}\varepsilon,t_{\varepsilon})$, $u_{\varepsilon}(t_{\varepsilon})=-1+\eta$. Thus we can use the blow-up argument again at $t_{\varepsilon}$ to deduce that there is a $\tilde{t}_{\varepsilon}=t_{\varepsilon}+\varepsilon \tilde{R}_{\varepsilon}$ with $u_{\varepsilon}'(r)>0$ if $r\in (t_{\varepsilon}, \tilde{t}_{\varepsilon})$, $u_{\varepsilon}(\tilde{t}_{\varepsilon})=1-\eta$. We may assume that $t_{\varepsilon},\tilde{t}_{\varepsilon}\to\overline{t}$ as $\varepsilon\to 0$ for some $\overline{t}\in [r_2, r_3]$. Moreover \begin{equation}\label{2lay} J_{\varepsilon}(u_{\varepsilon}, B_{\tilde{t}_{\varepsilon}}(0)\backslash \overline{B_{t_{\varepsilon}}(0)})=\omega_{N-1}(t_{\varepsilon}^{N-1}+o_{\varepsilon}(1))(\beta_{h(\overline{t})}+O(\eta))\varepsilon+o_{\varepsilon}(1) \end{equation} Now we claim $\tilde{t}_{\varepsilon}\ge r_1$. Suppose $\tilde{t}_{\varepsilon}0$ small and $s\in[-1+t,1-t]$, \begin{equation} \label{cont} \begin{aligned} & F_a(1-t)-F_a(s) \\ &=F_0(1-t)-F_0(s)+F_a(1-t)-F_0(1-t)-F_a(s)+F_0(s) \\ &=\big[\frac{(v^2-1)^2}{4}\big]_{s}^{1-t}-a\int_{s}^{1-t}(1-v^2)dv \end{aligned} \end{equation} Thus it follows from \eqref{cont} that if $a<0$, then \begin{equation}\label{cont2} F_a(1-t)-F_a(s)>0 \end{equation} for $s\in [-1+t,1-t]$. Define \[ \overline{u}_{\varepsilon}(r):=\begin{cases} 1-\eta & r\in [\overline{r}_{\varepsilon},\overline{r}_{\varepsilon} +R_{\varepsilon}\varepsilon]\cup[t_{\varepsilon},\tilde{t}_{\varepsilon}], \\ -u_{\varepsilon}(r) & r\in [\overline{r}_{\varepsilon} +R_{\varepsilon}\varepsilon, t_{\varepsilon}]. \end{cases} \] By the assumption that $\tilde{t}_{\varepsilon}0$ be such that $V(h(\overline{r})R)=\eta$, where $V$ is a unique solution to (\ref{seed}). Define $\overline{u}_{\varepsilon}$ by \begin{equation}\label{testfunction} \overline{u}_{\varepsilon}(r):=\begin{cases} V(h(\overline{r})\frac{r-\overline{r}_{\varepsilon}}{\varepsilon}) & r\in [\overline{r}_{\varepsilon},\overline{r}_{\varepsilon}+\varepsilon R] \\ -1+\eta-\frac{\eta}{\varepsilon}(r-\overline{r}_{\varepsilon}-\varepsilon R) & r\in [\overline{r}_{\varepsilon}+\varepsilon R, \overline{r}_{\varepsilon} +\varepsilon R+\varepsilon] \\ -1 & r\in[\overline{r}_{\varepsilon}+\varepsilon R+\varepsilon, \tilde{r}_{\varepsilon}-\varepsilon] \\ -1+\frac{\eta}{\varepsilon}(r-\tilde{r}_{\varepsilon}+\varepsilon) & r\in[\tilde{r}_{\varepsilon}-\varepsilon,\tilde{r}_{\varepsilon}] \end{cases} \end{equation} Now we note that $|F(r,t)|=O(\eta)$ for $r\in [\overline{r}_{\varepsilon},\tilde{r}_{\varepsilon}]$ and $-1\le t\le -1+\eta$ . Then we have \begin{equation} \label{upper} \begin{aligned} m_{\varepsilon,\overline{r}_{\varepsilon},\tilde{r}_{\varepsilon}} &\le J_{\varepsilon}(\overline{u}_{\varepsilon}, B_{\tilde{r}_{\varepsilon}}(0)\backslash \overline{B_{\overline{r}_{\varepsilon}}(0)}) \\ &\le J_{\varepsilon}(\overline{u}_{\varepsilon}, B_{\overline{r}_{\varepsilon} +R\varepsilon}(0)\backslash \overline{B_{\overline{r}_{\varepsilon}}(0)}) +J_{\varepsilon}(\overline{u}_{\varepsilon}, B_{\tilde{r}_{\varepsilon}}(0) \backslash \overline{B_{\tilde{r}_{\varepsilon}-\varepsilon}(0)}) \\ &\quad +J_{\varepsilon}(\overline{u}_{\varepsilon}, B_{\tilde{r}_{\varepsilon} -\varepsilon}(0)\backslash \overline{B_{\overline{r}_{\varepsilon} +\varepsilon R}(0)}) \\ &\le \omega_{N-1}\overline{r}_{\varepsilon}^{N-1}(\beta_{h(\overline{r})} +O(\eta))\varepsilon+o(\varepsilon)+O(\varepsilon\eta)+o(\varepsilon) \\ &= \omega_{N-1}\overline{r}_{\varepsilon}^{N-1}\beta_{h(\overline{r})} +O(\eta\varepsilon)+o(\varepsilon) \end{aligned} \end{equation} By (\ref{lower}) and (\ref{upper}), we have \[ \omega_{N-1}(\overline{r}_{\varepsilon}^{N-1}\beta_{h(\overline{r})} +t_{\varepsilon}^{N-1}\beta_{h(\overline{t})})\varepsilon\le \omega_{N-1}\overline{r}_{\varepsilon}^{N-1}\beta_{h(\overline{r})} \varepsilon+O(\varepsilon\eta)+o(\varepsilon) \] This is a contradiction. So we can conclude $\tilde{r}_{\varepsilon}=\overline{r}_{\varepsilon} +\varepsilon R_{\varepsilon}$. \subsection*{Part 3.} It remains to prove that if $\overline{r}_{\varepsilon_j}\to\overline{r}$ for some positive sequence $\{\varepsilon_j\}$ converging to zero as $j\to\infty$ then $\overline{r}$ satisfies \[ \overline{r}^{N-1}h(\overline{r})=\min_{s\in [r_1, r_2]}s^{N-1}h(s). \] {\bf Step 3.1.} First we note that from Part 1, the function $u_{\varepsilon}$ satisfies $-1\le u_{\varepsilon}\le -1+\eta$ for $r\in [\overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon},1]$ in this case. \noindent{\bf Step 3.2.} Set $H(s)=s^{N-1}h(s)$. Assume that the result is not true. Then there exists a subsequence of $\{\overline{r}_{\varepsilon}\}$ (denoted by $\overline{r}_{\varepsilon}$) such that $\overline{r}_{\varepsilon}\to r'\in [r_1, r_2]$ and $H(r')>\min_{s\in [r_1, r_2]}H(s)$. Then we can find a point $\overline{t}\in (r_1, r_2)$ such that $H(r')>H(\overline{t})$. Now we give a lower estimate for $J_{\varepsilon}(u_{\varepsilon})$. We have \begin{equation} \label{lower21} J_{\varepsilon}(u_{\varepsilon}) =J_{\varepsilon}(u_{\varepsilon}, B_{\overline{r}_{\varepsilon}}(0))+J_{\varepsilon}(u_{\varepsilon}, B_{\overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon}}(0) \backslash \overline{B_{\overline{r}_{\varepsilon}}(0)}) +J_{\varepsilon}(u_{\varepsilon}, B_1(0)\backslash \overline{B_{\overline{r}_{\varepsilon}+R_{\varepsilon}\varepsilon}(0)}). \end{equation} First we note that $1-\eta\le u_{\varepsilon}(r)\le 1$ for $r\le\overline{r}_{\varepsilon}$ and for sufficiently small $\eta>0$, $-F(r,u)\ge -F(r,1)$ ($u\in [1-\eta, 1]$). We also remark that since $a(r)<0$ for $r0$ for $r>r_2$, we have $-F(r,1)<0$ for $r0$ for $r>r_2$. Hence we have $-\int_{r_1}^{\overline{r}_{\varepsilon}}r^{N-1}F(r,1)dr\ge 0$ and we obtain the estimate \begin{equation} \label{lower22} \begin{aligned} J_{\varepsilon}(u_{\varepsilon}, B_{\overline{r}_{\varepsilon}}(0)) &\ge -\int_0^{\overline{r}_{\varepsilon}}r^{N-1}F(r,u_{\varepsilon})dr \\ &\ge -\int_0^{\overline{r}_{\varepsilon}}r^{N-1}F(r,1)dr \\ &= -\int_0^{r_1}r^{N-1}F(r,1)dr-\int_{r_1}^{\overline{r}_{\varepsilon}}r^{N-1}F(r,1)dr \\ &\ge -\int_0^{r_1}r^{N-1}F(r,1)dr=:A. \end{aligned} \end{equation} Using methods similar to those in the proof of (\ref{1lay}), we obtain \begin{equation}\label{lower23} J_{\varepsilon}(u_{\varepsilon}, B_{\overline{r}_{\varepsilon}+R_{\varepsilon}\varepsilon}(0)\backslash \overline{B_{\overline{r}_{\varepsilon}}(0)})\ge \omega_{N-1}H(r')\beta_1\varepsilon+O(\eta\varepsilon)+o(\varepsilon). \end{equation} Since $-1\le u_{\varepsilon}(r)\le -1+\eta$ for $r\ge \overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon}$ and for sufficiently small $\eta>0$, $-F(r,u)\ge-F(r,-1)=0$ ($u\in [-1,-1+\eta]$), we obtain the estimate \begin{equation} \label{lower24} \begin{aligned} J_{\varepsilon}(u_{\varepsilon}, B_1(0)\backslash B_{\overline{r}_{\varepsilon}+R_{\varepsilon}\varepsilon}(0)) &\ge -\int_{\overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon}}^1 r^{N-1}F(r,u_{\varepsilon})dr \\ &\ge -\int_{\overline{r}_{\varepsilon}+\varepsilon R_{\varepsilon}}^1 r^{N-1}F(r,-1)dr=0. \end{aligned} \end{equation} Thus we obtain \begin{equation}\label{lower25} J(u_{\varepsilon})\ge A+\omega_{N-1}H(r')\beta_1\varepsilon+O(\eta\varepsilon)+o(\varepsilon). \end{equation} Next we give an upper bound for $J_{\varepsilon}(u_{\varepsilon})$. Consider the function \[ \overline{w}_{\varepsilon}(r):=\begin{cases} 1 & r\in [0,\overline{t}-\varepsilon] \\ 1-\frac{\eta}{\varepsilon}(r-\overline{t}+\varepsilon) & r\in[\overline{t}-\varepsilon,\overline{t}] \\ V\big(h(\overline{t})\frac{r-\overline{t}}{\varepsilon}\big) & r\in [\overline{t}, \overline{t}+\varepsilon R'] \\ -1-\frac{\eta}{\varepsilon}(r-\overline{t}-\varepsilon R'-\varepsilon) & r\in [\overline{t}+\varepsilon R', \overline{t}+\varepsilon R' +\varepsilon ] \\ -1 & r\in [\overline{t}+\varepsilon R'+\varepsilon, 1], \end{cases} \] where $R'>0$ is the number satisfying $V(h(\overline{t})R')=-1+\eta$. Then \begin{equation}\label{upper21} J_{\varepsilon}(u_{\varepsilon})\le J_{\varepsilon} (\overline{w}_{\varepsilon})\le A+\omega_{N-1}H(\overline{t}) \beta_1\varepsilon+O(\eta\varepsilon)+o(\varepsilon). \end{equation} By (\ref{lower25}) and (\ref{upper21}) we have a contradiction. The proof of Theorem \ref{thm1.1} is complete. The more complicate case, can be shown by a similar method (see Remark below). \end{proof} \subsection*{Remark} We briefly show the more complicate case, that is, when $a$ is the function as in Figure 2. More precisely we set $I_1:=[r_1, r_2]$ and $I_2:=[r_3, r_4]$ and we assume $a>0$ on $[0, r_1)\cup(r_4, 1]$ and $a<0$ on $(r_3, r_4)$. \begin{figure}[ht] \begin{center} \includegraphics[width=0.8\textwidth]{fig2} \end{center} \caption{Special case of coefficient $a(t)$} \end{figure} Let $\eta>0$ and $\theta>0$ be small numbers. As in Part 1, we can find pairs of numbers $(\overline{r}_{1, \varepsilon}, \overline{r}_{2,\varepsilon})$ and $(R_{1,\varepsilon}, R_{\varepsilon,2})$ satisfying $\overline{r}_{1,\varepsilon}\in(r_1-\theta, r_2+\theta)$, $\overline{r}_{2,\varepsilon}\in(r_3-\theta, r_4+\theta)$, $\sup_{\varepsilon}|R_{1,\varepsilon}|<\infty$, $\sup_{\varepsilon}|R_{2,\varepsilon}|<\infty$ and \begin{gather*} u_{\varepsilon}(r)<-1+\eta \quad\text{for } 01-\eta \quad\text{for } \overline{r}_{1,\varepsilon} +\varepsilon R_{1,\varepsilon}\min_{s\in I_1}H(s)$ or $H(\overline{r}_2)>\min_{s\in I_2}$ hold. We assume $H(\overline{r}_1)=\min_{s\in I_1}$ and $H(\overline{r}_2)>\min_{s\in I_2}H(s)$. We also assume $r_1=\overline{r}_1$. We note that if $H(\overline{r}_1)>\min_{s\in I_1}H(s)$ or $\overline{r}_1\in {\rm int}I_1$, the proof is more easy. Let we take $\tilde{r}_2\in {\rm int}I_2$ such that $H(\overline{r}_2)>H(\tilde{r}_2)>\min_{s\in I_2}H(s)$ and consider the function \[ \tilde{u}_{\varepsilon}(r):=\begin{cases} u_{\varepsilon}(r) \quad\text{on }[0, r_2-\varepsilon) \\ 1+\frac{\eta}{\varepsilon}(r-r_2) \quad\text{on }[r_2-\varepsilon, r_2]\\ 1 \quad\text{on }[r_2, \tilde{r}_2-\varepsilon] \\ 1-\frac{\eta}{\varepsilon}(r-\tilde{r}_2+\varepsilon) \quad\text{on }[\tilde{r}_2-\varepsilon, \tilde{r}_2] \\ V\left(h(\tilde{r}_2)\frac{r-\tilde{r}_2}{\varepsilon} \right) \quad\text{on }[\tilde{r}_2, \tilde{r}_2+\varepsilon R''] \\ -1-\frac{\eta}{\varepsilon}(r-\tilde{r}_2-\varepsilon R''-\varepsilon) \quad\text{on }[\tilde{r}_2+\varepsilon R'', \tilde{r}_2+\varepsilon R'' +\varepsilon] \\ -1 \quad\text{on }[\tilde{r}_2+\varepsilon R''+\varepsilon, 1], \end{cases} \] where $V$ is the unique solution of (\ref{seed}) and $R''$ is the unique value such that $V(h(r_1)R'')=-1+\eta$. Since $u_{\varepsilon}$ is global minimizer, we can estimate the energy of $J_{\varepsilon}(\tilde{u}_{\varepsilon})$ as follows \begin{equation}\label{sevest2} J_{\varepsilon}(u_{\varepsilon})\le J_{\varepsilon}(\tilde{u}_{\varepsilon}) \le J_{\varepsilon}(u_{\varepsilon}, B_{r_2-\varepsilon}(0)) + \varepsilon\omega_{N-1}H(\tilde{r}_2)\beta_1+B+O(\varepsilon\eta) +o(\varepsilon). \end{equation} Then we have a contradiction from (\ref{sevest1}) and (\ref{sevest2}) by taking $\varepsilon=\varepsilon_j$ and sufficiently large $j$. \subsection*{Acknowledgments} The author would like to thank Professor Kazuhiro Kurata for his valuable advice and help, also to the anonymous referee for the numerous and useful comments. \begin{thebibliography}{99} \bibitem{AMP} S. B. Angenent, J. Mallet-Paret, and L. A. Peletier, \emph{Stable transition layers in a semilinear boundary value problem}, J. Differential Equations, {\bf 67} (1987), 212-242. \bibitem{CP} Ph. Cl\'{e}ment and L. A. Peletier, \emph{On a nonlinear eigenvalue problem occurring in population genetics}, Proc. Royal Soc. Edinburg, {\bf 100A}(1985), 85-101. \bibitem{CS} Ph. Cl\'{e}ment abd G. Sweers, \emph{Existence of multiplicity results for a semilinear eigenvalue problem}, Ann. Scuola Norm. Sup. Pisa, {\bf 14}(1987), 97-121 \bibitem{DS} E. N. Dancer, S. Yan, \emph{Construction of various type of solutions for an elliptic problem}, Calculus of Variations and Partial Differential Equations {\bf 20}(2004), 93-118. \bibitem{GT} D. Gilbarg and N. S. Trudinger, \emph{Elliptic partial differential equations of second order}, Springer-Verlag, Berlin, second edition 1983. \bibitem{Ma} H. Matsuzawa, \emph{Stable transition layers in a balanced bistable equation with degeneracy}, Nonlinear Analysis {\bf 58} (2004), 45-67. \bibitem{NM} A. S. do. Nascimento, \emph{Stable transition layers in a semilinear diffusion equation with spatial inhomogeneities in $N$-dimensional domains}, J. Differential Equations, {\bf 190} (2003), 16-38. \bibitem{N} K. Nakashima, \emph{Multi-layered stationary solutions for a spatially inhomogeneous Allen-Cahn equation}, J. Differential Equations, {\bf 191} (2003), 234-276. \bibitem{NT} K. Nakashima, K. Tanaka, \emph{Clustering layers and boundary layers in spatially inhomogeneous phase transition problems}, Ann. Inst. H. Poincar\'{e} Anal. Non Lin\'{e}aire {\bf 20} (2003), 107-143. \end{thebibliography} \end{document}