\documentclass[reqno]{amsart} \usepackage{hyperref} \usepackage{amssymb,graphicx} \AtBeginDocument{{\noindent\small \emph{Electronic Journal of Differential Equations}, Vol. 2009(2009), No. 52, pp. 1--48.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2009 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2009/52\hfil Diophantine condition in global well posedness] {Diophantine conditions in global well-posedness for coupled KdV-type systems} \author[Tadahiro Oh\hfil EJDE-2009/52\hfilneg] {Tadahiro Oh} \address{Tadahiro Oh \newline Department of Mathematics, University of Toronto \\ 40 St. George St, Rm 6290 \newline Toronto, ON M5S 2E4, Canada} \email{oh@math.toronto.edu} \thanks{Submitted August 2, 2008. Published April 14, 2009.} \subjclass[2000]{35Q53} \keywords{KdV; global well-posedness; I-method; Diophantine condition} \begin{abstract} We consider the global well-posedness problem of a one-parameter family of coupled KdV-type systems both in the periodic and non-periodic setting. When the coupling parameter $\alpha = 1$, we prove the global well-posedness in $H^s(\mathbb{R}) $ for $s > 3/4$ and $H^s(\mathbb{T}) $ for $s \geq -1/2$ via the $I$-method developed by Colliander-Keel-Staffilani-Takaoka-Tao \cite{CKSTT4}. When $\alpha \ne 1$, as in the local theory \cite{OH1}, certain resonances occur, closely depending on the value of $\alpha$. We use the Diophantine conditions to characterize the resonances. Then, via the second iteration of the $I$-method, we establish a global well-posedness result in $H^s(\mathbb{T})$, $s \geq \widetilde{s}$, where $\widetilde{s}= \widetilde{s}(\alpha) \in (5/7, 1]$ is determined by the Diophantine characterization of certain constants derived from the coupling parameter $\alpha$. We also show that the third iteration of the $I$-method fails in this case. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem} [section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{remark}[theorem]{Remark} \newtheorem{definition}[theorem]{Definition} \newtheorem{corollary}[theorem]{Corollary} \allowdisplaybreaks \section{Introduction} In this article, we consider the global well-posedness (GWP) of coupled KdV systems of the form \begin{equation} \label{KDVsystem1} \begin{gathered} u_t + a_{11} u_{xxx} + a_{12} v_{xxx} + b_1 u u_x + b_2 u v_x + b_3 u_x v + b_4 v v_x = 0 \\ v_t + a_{21} u_{xxx} + a_{22} v_{xxx} + b_5 u u_x + b_6 u v_x + b_7 u_x v + b_8 v v_x = 0 \\ (u, v) \big|_{t = 0} = (u_0, v_0) \end{gathered} \end{equation} in both periodic and non-periodic settings, where $A = \bigl(\begin{smallmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{smallmatrix} \bigr)$ is self-adjoint and non-singular, and $u$ and $v$ are real-valued functions. There are several systems of this type: the Gear-Grimshaw system \cite{GG}, the Hirota-Satsuma system \cite{HS}, the Majda-Biello system \cite{MB}, etc. By applying the space-time scale changes along with the diagonalization of $A$, one can reduce \eqref{KDVsystem1} to \begin{equation} \label{KDVsystem2} \begin{gathered} u_t + u_{xxx} + \widetilde{b_1} u u_x + \widetilde{b_2} u v_x + \widetilde{b_3} u_x v + \widetilde{b_4} v v_x = 0 \\ v_t + \alpha v_{xxx} + \widetilde{b_5} u u_x + \widetilde{b_6} u v_x + \widetilde{b_7} u_x v + \widetilde{b_8} v v_x = 0 \\ (u, v) \big|_{t = 0} = (u_0, v_0), \end{gathered} \end{equation} where $\alpha \ne 0$, $(x, t) \in \mathbb{T}\times\mathbb{R}$ or $\mathbb{R} \times \mathbb{R}$ with $\mathbb{T} = [0, 2\pi)$. As a model example, we consider the Majda-Biello system: \begin{equation} \label{MB} \begin{gathered} u_t + u_{xxx} + vv_x = 0\\ v_t + \alpha v_{xxx} + (uv)_x = 0 \\ (u, v) \big|_{t = 0} = (u_0, v_0), \end{gathered} \end{equation} where $(x, t) \in \mathbb{T}\times\mathbb{R}$ or in $\mathbb{R} \times \mathbb{R}$, $0< \alpha \leq 4$, and $u$ and $v$ are real-valued functions. This system has been proposed by Majda and Biello \cite{MB} as a reduced asymptotic model to study the nonlinear resonant interactions of long wavelength equatorial Rossby waves and barotropic Rossby waves with a significant mid-latitude projection, in the presence of suitable horizontally and vertically sheared zonal mean flows. In \cite{MB}, the values of $\alpha $ are numerically determined and they are $0.899$, $0.960$, and $0.980$ for different equatorial Rossby waves. Of particular interest to us is the periodic case because of its challenging mathematical nature as well as its physical relevance of the proposed model (the spatial period for the system before scaling is set as $40, 000$ km in \cite{MB}.) Several conservation laws are known for the system: \begin{equation} \label{Wconserved} \begin{gathered} E_1 = \int u \, dx, \quad E_2 = \int v \, dx, \\ N(u, v) = \int u^2 + v^2 dx, \quad H(u, v) = \int u_x^2 + \alpha v_x^2 - u v^2 dx, \end{gathered} \end{equation} where $H(u, v)$ is the Hamiltonian of the system. There seems to be no other conservation law, suggesting that the Majda-Biello system may not be completely integrable. The system has scaling which is similar to that of KdV and the critical Sobolev index $s_c$ is $-\frac{3}{2}$ just like KdV. First, we review the local well-posedness results of \eqref{MB} from \cite{OH1}. Note that all the results are essentially sharp in the sense that the smoothness/uniform continuity of the solution map fails below the specified regularities. When $\alpha =1$, the local well-posedness (LWP) theory of KdV (Bourgain \cite{BO1}, Kenig-Ponce-Vega \cite{KPV4}) immediately implies that \eqref{MB} is locally well-posed (LWP) in $H^{-\frac{3}{4}+}(\mathbb{R}) \times H^{-\frac{3}{4}+}(\mathbb{R})$. In \cite{OH1}, we showed that \eqref{MB} is locally well-posed (LWP) in $H^{-1/2}(\mathbb{T}) \times H^{-1/2}(\mathbb{T})$ without the mean 0 condition on the initial data, by relying on the vector-valued variants of the Bourgain space $X^{s, b}$ \cite{BO1} and the bilinear estimates due to Kenig-Ponce-Vega \cite{KPV4}. Now, let's turn to the case $\alpha \in (0, 1) \cup (1, 4]$. In the following, we first consider the periodic setting. Since $\alpha \ne 1$, we have two distinct linear semigroups $S(t)= e^{ -t \partial_x^3}$ and $S_\alpha(t)= e^{ -\alpha t \partial_x^3}$ corresponding to the linear equations for $u$ and $v$, respectively. Thus, we need to define two distinct Bourgain spaces $X^{s, b}$ and $X_\alpha^{s, b}$ to encompass the situation. For $s, b \in \mathbb{R}$, let $X^{s, b}(\mathbb{T}\times\mathbb{R})$ and $X_\alpha^{s, b}(\mathbb{T}\times\mathbb{R})$ be the completion of the Schwartz class $\mathcal{S} (\mathbb{T} \times \mathbb{R})$ with respect to the norms \begin{gather} \label{WXsb1} \|u\|_{X^{s, b}(\mathbb{T} \times \mathbb{R})} = \big\| \langle\xi\rangle ^s \langle\tau -\xi^3\rangle ^b \widehat{u}(\xi, \tau) \big\|_{L^2_{\xi, \tau}(\mathbb{Z} \times \mathbb{R})} \\ \label{WXsb2} \|v\|_{X_{\alpha}^{s, b}(\mathbb{T} \times \mathbb{R})} = \big\| \langle\xi\rangle ^s \langle\tau - \alpha \xi^3\rangle ^b \widehat{v}(\xi, \tau) \big\|_{L^2_{\xi, \tau}(\mathbb{Z} \times \mathbb{R})} \end{gather} where $\langle \cdot\rangle = 1 + |\cdot|$. Then, two of the crucial bilinear estimates in establishing the LWP of \eqref{MB} are: \begin{gather} \label{bilinear1} \| \partial_x (v_1 v_2) \|_{X^{s, -\frac{1}{2}} (\mathbb{T} \times \mathbb{R} )} \lesssim \|v_1\|_{X_\alpha^{s, \frac{1}{2}}(\mathbb{T} \times \mathbb{R} )} \|v_2\|_{X_\alpha^{s, \frac{1}{2}}(\mathbb{T} \times \mathbb{R} ).} \\ \label{bilinear2} \| \partial_x (u v) \|_{X_\alpha^{s, -\frac{1}{2}} (\mathbb{T} \times \mathbb{R} )} \lesssim \|u\|_{X^{s, \frac{1}{2}}(\mathbb{T} \times \mathbb{R} )} \|v\|_{X_\alpha^{s, \frac{1}{2}}(\mathbb{T} \times \mathbb{R} ).} \end{gather} First, consider the first bilinear estimate \eqref{bilinear1}. As in the KdV case \cite{KPV4}, we define the bilinear operator $\mathcal{B}_{s, b} (\cdot, \cdot)$ by \begin{align*} &\mathcal{B}_{s, b} (f, g) (\xi, \tau)\\ &= \frac{\xi \langle\xi\rangle ^s }{\langle\tau - \xi^3\rangle^{\frac{1}{2}}}\frac{1}{2\pi} \sum_{\xi_1+ \xi_2 = \xi} \int_{\tau_1 + \tau_2 = \tau} \frac{f(\xi_1, \tau_1) g(\xi_2, \tau_2)}{\langle\xi_1\rangle^s \langle\xi_2\rangle^s \langle\tau_1 - \alpha \xi_1^3\rangle^{1/2} \langle\tau_2 - \alpha \xi_2^3\rangle^{1/2}} d\tau_1. \end{align*} Then \eqref{bilinear1} holds if and only if $\| \mathcal{B}_{s,b}(f, g)\|_{L^2_{\xi, \tau}} \lesssim \| f \|_{L^2_{\xi, \tau}} \| g \|_{L^2_{\xi, \tau}}$. As in the KdV case, $\partial_x$ appears on the left hand side of \eqref{bilinear1} and thus we need to make up for this loss of derivative from $\langle\tau - \xi^3\rangle^{\frac{1}{2}} \langle\tau_1 - \alpha \xi_1^3\rangle^{1/2} \langle\tau_2 - \alpha \xi_2^3\rangle^{1/2}$ in the denominator. Recall that we basically gain $3/2$ derivatives in the KdV/$\alpha = 1$ case (with $\xi, \xi_1, \xi_2 \ne 0$) thanks to the algebraic identity \begin{equation} \label{P1algebra} \xi^3 - \xi_1^3 - \xi_2^3 = 3 \xi \xi_1\xi_2 \end{equation} for $\xi = \xi_1 + \xi_2$. However, when $\alpha \ne 1$, we no longer have such an identity and we have \begin{equation} \begin{aligned} \label{resonance1} &\max \big( \langle\tau - \xi^3\rangle, \langle\tau_1 - \alpha \xi_1^3\rangle, \langle\tau_2 - \alpha \xi_2^3\rangle \big)\\ &\sim \langle\tau - \xi^3\rangle + \langle\tau_1 - \alpha \xi_1^3\rangle + \langle\tau_2 - \alpha \xi_2^3\rangle \\ & \gtrsim \big| (\tau - \xi^3) - (\tau_1 - \alpha \xi_1^3) - (\tau_2 - \alpha \xi_2^3) \big| = | \xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3 | , \end{aligned} \end{equation} where $\xi = \xi_1 + \xi_2$ and $\tau = \tau_1 + \tau_2$. Note that the last expression in \eqref{resonance1} can be 0 for infinitely many (nonzero) values of $\xi, \xi_1, \xi_2 \in \mathbb{Z}$, causing resonances. By solving the resonance equation: \begin{equation} \label{JJreseq1} \xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3 = 0 \quad \text{with } \xi = \xi_1 + \xi_2, \end{equation} we have $( \xi_1, \xi_2) = (c_1 \xi, c_2 \xi)$ or $(c_2 \xi, c_1 \xi)$, where \begin{equation} \label{c_1} c_1 = \tfrac{1}{2} + \tfrac{\sqrt{-3 + 12 \alpha^{-1}}}{6}, \quad c_2 = \tfrac{1}{2} - \tfrac{\sqrt{-3 + 12 \alpha^{-1}}}{6}. \end{equation} Note that $c_1 + c_2 = 1$ and that $c_1, c_2 \in \mathbb{R}$ if and only if $ 0 < \alpha \leq 4$. If $c_1 \in \mathbb{Q}$ (and thus $c_2 \in \mathbb{Q}$), then there are infinitely many values of $\xi \in \mathbb{Z}$ such that $c_1\xi, \: c_2 \xi \in \mathbb{Z}$. This causes resonances for infinitely many values of $\xi$, and thus we do not have any gain of derivative from $\langle\tau - \xi^3\rangle \langle\tau_1 - \alpha \xi_1^3\rangle \langle\tau_2 - \alpha \xi_2^3\rangle $ in this case. If $c_1 \in \mathbb{R} \setminus \mathbb{Q}$, then $c_1 \xi \notin \mathbb{Z}$ for any $\xi \in \mathbb{Z}$. i.e. $\xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3 \ne 0 $ for any $\xi, \xi_1, \xi_2 \in \mathbb{Z}$. However, generally speaking, $\xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3$ can be arbitrarily close to 0, since $c_1 \xi$ can be arbitrarily close to an integer. Therefore, we need to measure \emph{how ``close" $c_1$ is to rational numbers}. In \cite{OH1}, we used the following definition regarding the Diophantine conditions commonly used in dynamical systems. \begin{definition}[Arnold \cite{AR}] \label{def1} \rm A real number $\rho$ is called of type $(K, \nu)$ (or simply of type ${\nu}$) if there exist positive $K$ and $\nu$ such that for all pairs of integers $(m, n)$, we have \begin{equation} \label{lowerbd} \left| \rho - \frac{m}{n} \right| \geq \frac{K}{ |n|^{2+\nu}} . \end{equation} \end{definition} Also, for our purpose, we defined \emph{the minimal type index} of a given real number $\rho$. \begin{definition} \label{def2} \rm Given a real number $\rho$, define the minimal type index ${\nu_{\rho}}$ of ${\rho}$ by \[ \nu_{\rho} = \begin{cases} \infty , &\text{if } \rho \in \mathbb{Q} \\ \inf \{ \nu > 0 : \rho \text{ is of type } \nu \}, & \text{if } \rho \notin \mathbb{Q} . \end{cases} \] \end{definition} \begin{remark} \label{JJDIO} \rm Then, by Dirichlet Theorem {\cite[p.112]{AR}} and {\cite[p.116, lemma 3]{AR}}, it follows that $\nu_\rho \geq 0$ for \emph{any} $\rho \in \mathbb{R}$ and $\nu_\rho = 0$ for \emph{almost every } $\rho \in \mathbb{R}$. \end{remark} Using the minimal type index $\nu_{c_1}$ of $c_1$, for any $\varepsilon >0$, we have \begin{equation}\label{lowerbdC} | \xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3 | \gtrsim |\xi|^{1-\nu_{c_1} -\varepsilon} \end{equation} for all sufficiently large $n \in \mathbb{Z}$, which provides a good lower bound on \eqref{resonance1}. With \eqref{lowerbdC}, we proved that \eqref{bilinear1} holds for $s > 1/2 + \frac{1}{2} \nu_{c_1}$. The resonance equation of the second bilinear estimate \eqref{bilinear2} is given by \begin{equation} \label{resonance2} \alpha \xi^3 - \xi_1^3 - \alpha \xi_2^3 = 0 \quad\text{with } \xi = \xi_1 + \xi_2. \end{equation} By solving \eqref{resonance2}, we obtain $(\xi_1, \xi_2) = \big(d_1 \xi, (1-d_1) \xi\big), \big(d_2 \xi, (1-d_2) \xi\big), (0, \xi)$, where \begin{equation} \label{d_1 and d_2} d_1 = \tfrac{- 3 \alpha + \sqrt{3 \alpha(4 - \alpha)}}{2 (1 - \alpha)} \quad \text{and} \quad d_2 = \tfrac{- 3 \alpha - \sqrt{3 \alpha(4 - \alpha)}}{2 (1 - \alpha)}. \end{equation} Note that $d_1, d_2 \in \mathbb{R}$ if and only if $\alpha \in [0, 1) \cup (1, 4]$. Then, for any $\varepsilon >0$, we have \begin{equation}\label{lowerbdD} | \alpha \xi^3 - \xi_1^3 - \alpha \xi_2^3 | \gtrsim |\xi|^{1-\max( \nu_{d_1}, \nu_{d_2}) -\varepsilon} \end{equation} for all sufficiently large $\xi \in \mathbb{Z}$ with $|\xi_1 - d_1 \xi| <1$ or $|\xi_1 - d_2 \xi| <1$. With \eqref{lowerbdD}, we proved that \eqref{bilinear2} holds for $s > 1/2 + \frac{1}{2} \max( \nu_{d_1}, \nu_{d_2})$ with the mean 0 condition on $u$. Note that the mean 0 condition on $u$ is needed since $\xi_1 = 0$ is a solution of \eqref{resonance2} for any $\xi \in \mathbb{Z}$. Also, we need both $\nu_{d_1}$ and $ \nu_{d_2}$ since $d_1 + d_2 \notin \mathbb{Q}$ in general. \begin{remark} \label{REM:bilinear} \rm It is shown in \cite{OH1} that the bilinear estimates \eqref{bilinear1} and \eqref{bilinear2} hold for $s \geq 0$ away from the resonance sets; i.e., \eqref{bilinear1} holds for $s \geq 0$ on $ \{ (\xi, \xi_1) : |\xi| \gtrsim 1, |\xi_1 - c_1 \xi| \geq 1$ and $ |\xi_1 - c_2 \xi| \geq 1\}$, and \eqref{bilinear2} holds for $s \geq 0$ on $ \{ (\xi, \xi_1) : |\xi| \gtrsim 1, |\xi_1 - d_1 \xi| \geq 1$ and $|\xi_1 - d_2 \xi| \geq 1\}$. \end{remark} Now, let \begin{equation} \label{s0} s_0 (\alpha) = \frac{1}{2} + \frac{1}{2} \max( \nu_{c_1}, \nu_{d_1}, \nu_{d_2}). \end{equation} Note that $s_0 = 1/2$ for almost every $\alpha \in (0, 4] \setminus \{1\}$ in view of Remark \ref{JJDIO}. In \cite{OH1}, we proved that, for $ \alpha \in ( 0 , 4] \setminus \{1\}$, the Majda-Biello system \eqref{MB} is locally well-posed in $H^s(\mathbb{T}) \times H^s(\mathbb{T})$ for $s \geq s^\ast(\alpha) := \min ( 1,s_0 + ) $, assuming the mean 0 condition on $u_0$. We would like to point out the following. On the one hand, we have $s^\ast(\alpha) = s_0 (\alpha) = 1/2+$ for almost every $\alpha \in (0, 4] \setminus \{1\}$. On the other hand, for any interval $I \subset (0, 4]$, there exists $\alpha \in I$ such that $s^\ast(\alpha) = 1$. This shows that the well-posedness (below $H^1$) of the periodic Majda-Biello system is very unstable under a slight perturbation of the parameter $\alpha$. Now, let us discuss the LWP of \eqref{MB} in the non-periodic setting for $\alpha \in (0, 4] \setminus \{1\}$. In this case, the LWP of \eqref{MB} follows once we prove the bilinear estimates: \begin{gather} \label{Rbilinear1} \| \partial_x (v_1 v_2) \|_{X^{s, b-1} ( \mathbb{R}^2 )} \lesssim \|v_1\|_{X_\alpha^{s, b}( \mathbb{R}^2 )}\|v_2\|_{X_\alpha^{s, b} ( \mathbb{R}^2 )} \\ \label{Rbilinear2} \| \partial_x (u v) \|_{X_\alpha^{s, b-1} ( \mathbb{R}^2 )} \lesssim \|u\|_{X^{s, b}( \mathbb{R}^2 )}\|v\|_{X_\alpha^{s, b}( \mathbb{R}^2 )} . \end{gather} As in the periodic case, we obtain two resonance equations $ \xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3 = 0 $ and $ \alpha \xi^3 - \xi_1^3 - \alpha \xi_2^3 = 0$ with $\xi = \xi_1 + \xi_2$, giving rise to $c_1$, $d_1$, and $d_2$. Since the spatial Fourier variables are not discrete in this case, the rational/irrational character of $c_1, d_1,$ and $d_2$ is irrelevant. In \cite{OH1}, we proved that \eqref{Rbilinear1} and \eqref{Rbilinear2} hold for $s \geq 0$ with some $b = 1/2+$. Using the $L^2$ conservation law $N(u, v) = \int u^2 + v^2$, we showed that \eqref{MB} is globally well-posed in $L^2(\mathbb{R}) \times L^2(\mathbb{R}) $. This result is sharp in view of the ill-posedness result \cite{OH1} in $H^s(\mathbb{R}) \times H^s(\mathbb{R})$ for $s < 0$. Note that although the rational/irrational character of $c_1, d_1,$ and $d_2$ is irrelevant in this case, the result for $ \alpha \in (0, 4] \setminus \{1\}$ is much worse than that for $\alpha = 1$, where the threshold for LWP is $s = -\frac{3}{4}+$. In this paper, we continue to study the well-posedness theory of \eqref{MB}, in particular, global in time well-posedness. In view of the $L^2$ conservation, we see that when $\alpha = 1$, \eqref{MB} is globally well-posed in $L^2 \times L^2$ in both periodic and non-periodic settings. When $\alpha \in (0, 4] \setminus \{1\}$, one can use the Hamiltonian $H(u, v)$ along with Sobolev embedding to obtain an a priori bound on the $H^1$ norm of the solutions $(u, v)$. This yields the GWP of \eqref{MB} in $H^1(\mathbb{T}) \times H^1(\mathbb{T})$. Note that this result is sharp when $s^\ast = 1$, i.e. when $\max( \nu_{c_1}, \nu_{d_1}, \nu_{d_2}) \geq 1$. In particular, this result is sharp for $\alpha = 4$ since $c_1 \in \mathbb{Q}$ for $\alpha = 4$. In order to fill the gap of the Sobolev indices between LWP and GWP (except for the non-periodic setting with $\alpha \in (0, 4] \setminus \{1\}$), we use the $I$-method developed by Colliander-Keel-Staffilani-Takaoka-Tao \cite{CKSTT4} to generate sequences of modified energies $E^{(j)}$ to gain a better control of the growth of the Sobolev norms of solutions. In the following section, we introduce the necessary notations to set up the modified energies. In this introduction, we simply state the results without any details. When $\alpha = 1$, we use the third modified energy $E^{(3)}$ constructed from $N(u, v) = \int u^2 + v^2$ as in the KdV case \cite{CKSTT4}. In the non-periodic case, the result follows from the argument in \cite{CKSTT4} once we prove certain pointwise cancellations in the quintilinear multiplier for $\frac{d}{dt}E^{(3)}$, which in turn controls the growth of the solutions. Thus, we obtain: \begin{theorem} \label{mainTHM:GWPR} When $\alpha = 1$, the Majda-Biello system \eqref{MB} is globally well-posed in $H^s(\mathbb{R}) \times H^s(\mathbb{R})$ for $s > -\frac{3}{4}$. \end{theorem} In the periodic case, in handling the situation without the mean 0 assumption on $u_0$ and $v_0$, we consider $u \to u - p$ and $ v \to v - q$, where $p$ and $q$ are the means of $u_0$ and $v_0$ along with the conservation of $E_1$ and $E_2$ of the means of $u$ and $v$. This leads us to consider the following system: \begin{equation} \label{mean0MB'} \begin{gathered} u_t + u_{xxx} + q v_x + v v_x = 0 \\ v_t + v_{xxx} + q u_x + pv_x + (u v)_x = 0, \end{gathered} \end{equation} When $q \ne 0$, the linear part of \eqref{mean0MB'} is mixed, and thus we need to use the vector-valued Bourgain space $X^{s, b}_{p, q}$ as in the local theory \cite{OH1}. There are two difficulties in this case. As seen in \cite{OH1}, the eigenvalues $d_1(\xi)$ and $d_2(\xi)$ of the symbol $A(\xi) = \left(\begin{smallmatrix} \xi^3 & -q\xi \\ -q\xi & \xi^3 - p\xi\end{smallmatrix}\right)$ of the linear part are no longer $\xi^3$. Moreover, the presence of lower order linear terms in \eqref{mean0MB'} introduces extra terms in $\frac{d}{dt} E^{(3)}$. In dealing with the first difficulty, we need to refine the trilinear linear estimate by Colliander-Staffilani-Keel-Takaoka-Tao \cite[Theorem 3]{CKSTT5} using $d_1(\xi)$ and $d_2(\xi)$. This requires us to go through a more refined number-theoretic counting argument. See Appendix. In the end, we obtain the following result. \begin{theorem} \label{mainTHM:GWPT} When $\alpha = 1$, the periodic Majda-Biello system \eqref{MB} (without the mean 0 assumption) is globally well-posed in $H^s(\mathbb{T}) \times H^s(\mathbb{T})$ for $s \geq -1/2$. \end{theorem} We point out that the algebraic identities \cite{BO1}: \begin{equation} \label{IDEN} \begin{gathered} \xi_1^3 +\xi_2^3 +\xi_3^3 = 3\xi_1\xi_2\xi_3, \quad \text{when } \xi_1 + \xi_2 + \xi_3 = 0 \\ \xi_1^3 +\xi_2^3 +\xi_3^3+ \xi_4^3 = 3(\xi_1 + \xi_2)(\xi_1 + \xi_3)(\xi_1 + \xi_4), \quad \text{when }\xi_1 + \xi_2 + \xi_3 + \xi_4= 0 \end{gathered} \end{equation} play a crucial role in the proof when $\alpha = 1$ as in the KdV case. When $\alpha \in (0, 4) \setminus \{1\}$, \eqref{MB} is locally well-posed in $H^s(\mathbb{T}) \times H^s(\mathbb{T})$ (with the mean 0 assumption on $u_0$) for $s \geq s^* :=\min( 1, s_0 +)$, where $s_0 = s_0 (\alpha)$ is given in \eqref{s0}. It is also globally well-posed in $H^1(\mathbb{T}) \times H^1(\mathbb{T})$. When $s_0 \geq 1$, this is sharp. Hence, we assume $s_0 < 1$ in the following. In this case, we construct the modified energies $E^{(j)}$ using the Hamiltonian $H(u, v)$ since $s^* \in (\frac{1}{2}, 1)$. The main difficulty when $\alpha \ne 1$ lies in the fact that we can not make use of the identities in \eqref{IDEN} and that the multipliers for $\frac{d}{dt} E^{(j)}$ no longer satisfy certain symmetries needed for reasonable pointwise cancellation. In this case we obtain the following positive and negative results, using the $I$-method with $E^{(j)},$ $ j = 1, 2, 3$. \begin{theorem} \label{mainTHM:YGWP1} Let $ \alpha \in (0, 4) \setminus \{1\}$ and $s_0 < 1$. Assume the mean 0 condition on $u_0$. Then, the I-method with the first modified energy $E^{(1)}$ establishes the global well-posedness of the Majda-Biello system \eqref{MB} in $H^{s}(\mathbb{T}) \times H^{s}(\mathbb{T})$ for $s \geq \frac{ 3 + 7 (s_0+) - 2(s_0+)^2}{8}. $ In particular, it is globally well-posed for $s > \frac{3}{4} $ for almost every $\alpha \in (0, 4)$. \end{theorem} \begin{theorem} \label{mainTHM:YGWP2} Let $ \alpha \in (0, 4) \setminus \{1\}$ and $s_0 < 1$. Assume the mean 0 condition on $u_0$. Then, the I-method with the second modified energy $E^{(2)}$ establishes the global well-posedness of the Majda-Biello system \eqref{MB} in $H^{s}(\mathbb{T}) \times H^{s}(\mathbb{T})$ for \[ s \geq \max \Big( \frac{6(s_0+) - 2 (s_0+)^2}{5 - (s_0+)}, \frac{2 (s_0+) + 9}{14} \Big). \] In particular, it is globally well-posed for $s > 5/7 $ for almost every $\alpha \in (0, 1)$. \end{theorem} \begin{remark} \label{RM:YGWP3} \rm We show that one of the quintilinear multipliers for the time derivative $\frac{d}{dt} E^{(3)}$ of the third modified energy is unbounded. Hence, the $I$-method fails after the second iteration, and Theorem \ref{mainTHM:YGWP2} is the best global well-posedness result we can obtain via the $I$-method. See Section \ref{CHAP:YGWP}. \end{remark} \begin{figure}[ht] \begin{center} \includegraphics[width=0.7\textwidth]{fig1} %\include[height = 5cm, keepaspectratio]{fig1} \end{center} \caption{GWP index $s$ against LWP index $s_0$} \label{indexgraph} \end{figure} Now, we compare Theorems \ref{mainTHM:YGWP1} and \ref{mainTHM:YGWP2}. In Figure \ref{indexgraph}, the curve ${\rm I}$ shows the Sobolev index $s$ for the GWP, obtained by the first modified energy $E^{(1)}$, against the LWP index $s_0$, and ${\rm II}$ shows the GWP index obtained by the second modified energy $E^{(2)}$. Since $\frac{3}{4} -\frac{5}{7} \simeq 0.75 - 0.714 = 0.036$, it may seem that Theorem \ref{mainTHM:YGWP2} does not provide much improvement. However, Figure \ref{indexgraph} shows that there's actually a significant gain in Theorem \ref{mainTHM:YGWP2}, at least for $s_0 \geq \frac{36}{52} \simeq 0.69$, i.e. until the curve ${\rm II}$ bends. When $s_0 = \frac{36}{52}$, Theorem \ref{mainTHM:YGWP1} gives the GWP index $s \simeq 0.861$ and Theorem \ref{mainTHM:YGWP2} gives the GWP index $s \simeq 0.741$ with their difference $0.12 \gg 0.036$. Lastly, we summarize the LWP and GWP results for KdV and the Majda-Biello system \eqref{MB} in the tables below. Note that Kappeler-Topalov \cite{KT} proved the global well-posedness of KdV in $H^{-1}(\mathbb{T})$, using the complete integrability of the equation. We did not include this result in the tables since their method can not be applied to the general coupled KdV system \eqref{KDVsystem2} which is not necessarily integrable. \begin{table}[ht] \begin{center} \begin{tabular}{|c|c|c|c|} \hline \vphantom{$\Big|$} & KdV & $\alpha = 1$ & $ 0 < \alpha < 1$ \\ \hline \vphantom{$\Big|$} $\mathbb{T}$ & $-\frac{1}{2}$ & $-\frac{1}{2}$ & $\frac{1}{2}+ \sim 1$, depending on $\alpha$ \\ \hline \vphantom{$\Big|$} $\mathbb{R}$ & $-\frac{3}{4} $ & $-\frac{3}{4} +$ & 0 \\ \hline \end{tabular} \end{center} \caption{Local Well-posedness Results} \label{TAB:LWP} \end{table} \begin{table}[ht] \begin{center} \begin{tabular}{|c|c|c|c|} \hline \vphantom{$\Big|$} & KdV & $\alpha = 1$ & $ 0 < \alpha < 1$ \\ \hline \vphantom{$\Big|$} $\mathbb{T}$ & $-\frac{1}{2}$ & $-\frac{1}{2}$ & $\frac{5}{7}+ \sim 1$, depending on $\alpha$ \\ \hline \vphantom{$\Big|$} $\mathbb{R}$ & $-\frac{3}{4}+ $ & $-\frac{3}{4} +$ & 0 \\ \hline \end{tabular} \end{center} \caption{Global Well-posedness Results} \label{TAB:GWP} \end{table} This work is a part of the author's Ph.D. thesis \cite{OHTHESIS}. In the forthcoming papers, we address the invariance of the Gibbs measures (i.e. the weighted Wiener measure for $\alpha \in (0, 4] $ in \cite{OH3} and the white noise for $\alpha = 1$ in \cite{OH4}) in the periodic setting and the global well-posedness almost surely on the statistical ensembles. This paper is organized as follows. In Section 2, we introduce some standard notations as well as the notations for describing certain multilinear forms. In Section 3, we introduce the modified energies for the non-periodic setting with $\alpha = 1$. Then, we focus on establishing a pointwise estimate for one of the quadrilinear multipliers which is not present in the KdV case \cite{CKSTT4}. In Section 4, we first go over the vector-valued functions spaces to handle the well-posedness theory on $\mathbb{T}$ with $\alpha = 1$ without the mean 0 assumption. After introducing the modified energies in this setting, we establish tri-, quadri-, and quintilinear estimates. We conclude this section by proving Theorem \ref{mainTHM:GWPT} via the almost conservation law of the third modified energy. In Section 5, we present the proof of Theorem \ref{mainTHM:YGWP2}. We first introduce the modified energies constructed from the Hamiltonian and discuss the unboundedness of the growth of the third modified energy. Then, we establish the almost conservation law of the second modified energy by establishing the crucial quadrilinear estimates. In Appendix, we present the proof of Lemma \ref{LEM:XXXTRILINEAR}, the crucial trilinear estimate for proving Theorem \ref{mainTHM:GWPT} in Section 4. \section{Notation} Let $\mathbb{T}_\lambda = [0, 2 \pi \lambda)$ for $\lambda \geq 1$. In the periodic setting on $\mathbb{T}_\lambda$, the spatial Fourier domain is $\mathbb{Z} / \lambda$. Let $d \xi^\lambda$ be the normalized counting measure on $\mathbb{Z} /\lambda$, and we say $f \in L^p(\mathbb{Z}/\lambda)$, $1 \leq p < \infty$ if \[ \| f \|_{L^p(\mathbb{Z}/\lambda)} = \Big( \int_{\mathbb{Z}/\lambda} |f(\xi)|^p d\xi^\lambda \Big)^{1/p} := \Big( \frac{1}{2\pi\lambda} \sum_{\xi \in \mathbb{Z}/\lambda} |f(\xi)|^p \Big)^{1/p} < \infty. \] If $ p = \infty$, we have the obvious definition involving the essential supremum. For $f \in \mathcal{S}(\mathbb{R})$, the Fourier transform of $f$ is defined as $ \widehat{f}(\xi) = \int_{\mathbb{R}} e^{-i x \xi} f(x) dx,$ and its inverse Fourier transform is defined as $\check{f}(\xi) = \frac{1}{2\pi}\widehat{f}(-\xi)$. If $f \in L^2(\mathbb{T}_\lambda)$, then the Fourier transform of $f$ is defined as \[ \widehat{f}(\xi) = \int_{0}^{2\pi\lambda} e^{-i x \xi} f(x) dx, \quad\text{where } \xi \in \mathbb{Z}/\lambda, \] and we have the Fourier inversion formula \[ f(x) = \int_{\mathbb{Z}/\lambda} e^{i x \xi} \widehat{f}(\xi) d\xi^\lambda = \frac{1}{2\pi\lambda} \sum_{\xi \in \mathbb{Z}/\lambda} e^{i x \xi} \widehat{f}(\xi). \] If the function depends on both $x$ and $t$, we use ${}^{\wedge_x}$ (and ${}^{\wedge_t}$) to denote the spatial (and temporal) Fourier transform, respectively. However, when there is no confusion, we simply use ${}^\wedge$ to denote the spatial Fourier transform, temporal Fourier transform, and the space-time Fourier transform, depending on the context. Let $\langle \cdot \rangle = 1 + | \cdot |$. For $Z= \mathbb{R} $ or $\mathbb{T}_\lambda$, we define $X^{s, b}(Z \times \mathbb{R})$ and $X^{s, b}_\alpha (Z \times \mathbb{R})$ by the norms \begin{gather} \|u \|_{X^{s, b}(Z \times \mathbb{R})} = \| \langle\xi\rangle ^s \langle\tau - \xi^3\rangle^b \widehat{u}(\xi, \tau) \|_{L^2_{\xi, \tau}(Z^\ast \times \mathbb{R})} \\ \|v \|_{X^{s, b}_\alpha(Z \times \mathbb{R})} = \| \langle\xi\rangle ^s \langle\tau - \alpha \xi^3\rangle^b \widehat{v}(\xi, \tau) \|_{L^2_{\xi, \tau}(Z^\ast \times \mathbb{R})}, \end{gather} where $Z^\ast = \mathbb{R}$ if $Z = \mathbb{R}$ and $Z^\ast = \mathbb{Z}/\lambda$ if $Z = \mathbb{T}_\lambda$. Given any time interval $I \subset \mathbb{R}$, we define the local in time $X^{s, b}(Z \times I )$ by \[ \|u\|_{X_I^{s, b}} = \|u \|_{X^{s, b}(Z \times I )} = \inf \big\{ \|\widetilde{u} \|_{X^{s, b}(Z \times \mathbb{R})}: {\widetilde{u}|_I = u}\big\}. \] We define the local in time $X^{s, b}_\alpha (Z \times I )$ analogously. In proving estimates, we often use the Littlewood-Paley decomposition implicitly. In such cases, we define $N_j$ to be a dyadic block for $\xi_j$, i.e. $|\xi_j | \sim N_j$. Also, in dealing with a product space of two copies of a Banach space $X$, we may use $X\times X$ and $X$ interchangeably. We use $c$, $ C$ to denote various constants, usually depending only on $s, b$, and $\alpha$. If a constant depends on other quantities, we make it explicit. We use $A\lesssim B$ to denote an estimate of the form $A\leq CB$. Similarly, we use $A\sim B$ to denote $A\lesssim B$ and $B\lesssim A$ and use $A\ll B$ when there is no general constant $C$ such that $B \leq CA$. We also use $a+$ (and $a-$) to denote $a + \varepsilon$ (and $a - \varepsilon$), respectively, for arbitrarily small $\varepsilon \ll 1$. Now, we introduce the notation for describing certain multilinear forms; see for example \cite{CKSTT4}, Tao \cite{TAO}. Let $X = \mathbb{R}$ or $\mathbb{T}_\lambda$ and $Y$ denote the corresponding Fourier space, i.e. $Y = \mathbb{R}$ if $X = \mathbb{R}$, and $Y = \mathbb{Z}/\lambda $ if $X = \mathbb{T}_\lambda$. For $n \geq 2$, define an $n$-multiplier $M_n(\xi_1, \dots,\xi_n) $ to be a function: $\Gamma_n(Y) \to \mathbb{C}$, where $\Gamma_n(Y)$ is the hyperplane in $Y^n$ given by $ \Gamma_n(Y) = \{(\xi_1, \dots,\xi_n) \in Y^n : \xi_1 + \dots + \xi_n = 0\}. $ We endow $\Gamma_n(Y)$ with the $n-1$ dimensional surface measure given by \[ \int_{\Gamma_n(Y)} f = \int_{Y^{n-1}} f(\xi_1, \dots, \xi_{n-1}, -\xi_1 - \dots - \xi_{n - 1}) d\xi_1 \dots d\xi_{n-1}. \] We use $d\xi_j^\lambda$ in place of $d\xi_j$ if $Y = \mathbb{Z}/\lambda$. Also, given an $n$-multiplier $M_n$, define an $n$-form $\Lambda_n( M ; f_1, \dots, f_n)$ acting on $n$ functions $f_1, \dots, f_n$ by \[ \Lambda_n( M_n ; f_1, \dots, f_n) = \int_{ \Gamma_n} M_n (\xi_1, \dots, \xi_n) \widehat{f_1}(\xi_1) \dots \widehat{f_n}(\xi_n). \] Given an $n$-multiplier $M_n$ on $\Gamma_n(Y)$, we define its norm $\|M_n\|_{[n;Y]}$ to be the smallest constant such that the inequality $ |\Lambda_n( M_n ; f_1, \dots, f_n)| \leq \|M_n\|_{[n;Y]} \prod_{j = 1}^n \|f_j\|_{L^2(Y)} $ holds for all test functions $f_j$ on $Y$. We extend this definition of the multiplier norm to the space-time $n$-multiplier $M_n$ by defining $\|M_n\|_{[n;Y \times \mathbb{R}]}$ to be the smallest constant such that the inequality \begin{equation} \label{XTAO1} \Big| \int\Lambda_n( M_n ; f_1, \dots, f_n)dt \Big| \leq \|M_n\|_{[n;Y\times \mathbb{R}]} \prod_{j = 1}^n \|f_j\|_{L^2(Y \times \mathbb{R})} \end{equation} holds for all test functions $f_j$ on $Y \times \mathbb{R}$. We define the symmetrization of an $n$-multiplier $M_n$ by \[ [M_n]_{\rm sym} (\xi_1, \dots,\xi_n) = \frac{1}{n !} \sum_{\sigma \in S_n} M_n\big(\sigma(\xi_1, \dots,\xi_n)\big), \] where $S_n$ is the symmetric group on $n$ elements. Unlike the KdV theory, we sometimes need to symmetrize only under certain indices, say $j$ and $k$. We define the symmetrization under $j$ and $k$ by \begin{align*} &[M_n]_{ j \leftrightarrow k} (\xi_1, \dots,\xi_n)\\ &= 1/2\Big(M_n\big((\xi_1, \dots, \xi_j, \dots, \xi_k, \dots, \xi_n)\big) + M_n\big((\xi_1, \dots,\xi_k,\dots,\xi_j, \dots, \xi_n)\big) \Big) . \end{align*} Similarly, for pairs of indices $(j_1, k_1)$ and $(j_2, k_2)$, we define $[M_n]_{ \substack{j_1 \leftrightarrow k_1\\ j_2 \leftrightarrow k_2}}$ to be the symmetrized average of $M_n$ under $ j_1 \leftrightarrow k_1$ and $j_2 \leftrightarrow k_2$. Lastly, let $\xi_{jk} = \xi_j + \xi_k$, $\xi_{jkl} = \xi_j + \xi_k + \xi_l$, and $Z_k = \xi_1^3 + \dots + \xi_k^3$. Note that we have nice algebraic identities for $Z_k$ when $k = 3, 4$: $Z_3 = 3\xi_1\xi_2\xi_3$ when $\xi_1 + \xi_2 + \xi_3 = 0$ and \begin{equation} \label{XALGEBRA2} \xi_1 + \xi_2 + \xi_3 + \xi_4= 0 \Longrightarrow Z_4 = \xi_1^3 +\xi_2^3 +\xi_3^3+ \xi_4^3 = 3\xi_{12}\xi_{13}\xi_{14}. \end{equation} \section{Global Well-Posedness on $\mathbb{R}$, ${\alpha = 1}$} \label{SEC:XGWPonR} In this section, we briefly discuss an application of the $I$-method on $\mathbb{R}$ for $\alpha = 1$ to obtain the global well-posedness in $H^{-\frac{3}{4}+}(\mathbb{R}) \times H^{-\frac{3}{4}+}(\mathbb{R})$, referring to the corresponding results in the KdV theory \cite{CKSTT4}. We mainly focus on setting up the modified energies, displaying certain asymmetry in the multipliers which was not present in the KdV theory \cite{CKSTT4}. Then, we concentrate on proving the pointwise estimate on the multiplier in the absence of full symmetry. The remaining argument is basically the same as in \cite{CKSTT4} and hence is omitted. For full details, see \cite{OHTHESIS}. \subsection{Modified Energies} For $s < 0$, define $m: \mathbb{R} \to \mathbb{R} $ to be the even, smooth, monotone Fourier multiplier given by \begin{equation} \label{XXM} m(\xi ) = \begin{cases} 1, & \text{for } |\xi | \leq N \\ \frac{N^{-s}}{|\xi|^{-s} }, & \text{for } |\xi| \geq 2N, \end{cases} \end{equation} for $N \gg1$ (to be determined later), and the operator $I$ to be the corresponding Fourier multiplier operator defined by $\widehat{I f}(\xi) = m(\xi)f(\xi)$. The operator $I$ is smoothing of order $-s$ (since $s <0$) : $H^s \to L^2$ and we have $ \|f \|_{X^{s_0, b_0}} \lesssim \|I f \|_{X^{s_0 - s, b_0}} \lesssim N^{-s} \|f \|_{X^{s_0, b_0}}$ for any $ s_0, b_0 \in \mathbb{R}$. Now, define the first modified energy $E^{(1)}(t)$ by \[ E^{(1)}(t) = N(Iu, Iv)(t) = \|(Iu, Iv)(t)\|_{L^2 \times L^2}^2. \] By Plancherel and the fact that $m$, $u$, and $v$ are real-valued, we have \[ E^{(1)}(t) =\Lambda_2(m(\xi_1)m( \xi_2); u, u) + \Lambda_2(m(\xi_1)m( \xi_2); v, v). \] Using \eqref{MB}, we have \begin{equation} \label{X1stMOD} \frac{d}{dt} E^{(1)}(t) = - 3i \Lambda_3\big([m(\xi_1)m(\xi_{23})\xi_{23}]_{\rm sym};u, v, v \big) = \Lambda_3(M_3;u, v, v), \end{equation} where $M_3 = 3i [\xi_1 m^2(\xi_1)]_{\rm sym}$. Now, we define the second modified energy $E^{(2)}(t)$ by \[ E^{(2)}(t) = E^{(1)}(t) + \Lambda_3(\sigma_3; u, v, v ), \] where the 3-multiplier $\sigma_3$ will be chosen to achieve a cancellation. Then, we have \begin{equation} \label{X2ndMOD} \frac{d}{dt} E^{(2)}(t) = \frac{d}{dt} E^{(1)}(t) + \frac{d}{dt} \Lambda_3(\sigma_3; u, v, v ). \end{equation} By \eqref{MB}, we have \begin{align*} \frac{d}{dt} \Lambda_3(\sigma_3; u, v, v ) = & \Lambda_3( i \sigma_3 Z_3 ; u, v, v) -\frac{i}{2} \Lambda_4( \xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3); v, v, v, v)\\ & -2i \Lambda_4( \xi_{23} \sigma_3(\xi_{1}, \xi_{23}, \xi_4); u, u, v, v). \end{align*} Then, by choosing $\sigma_3 = \frac{iM_3}{Z_3}$, we cancel the two trilinear terms in \eqref{X2ndMOD} and thus we obtain \[ \frac{d}{dt} E^{(2)}(t) = \Lambda_4({M_4}; v, v, v, v)+ \Lambda_4(\widetilde{M_4}; u, u, v, v), \] where $M_4 = -\tfrac{i}{2}[\xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3)]_{\rm sym} $ and $ \widetilde{M_4} = -2i [\xi_{23} \sigma_3(\xi_{1}, \xi_{23}, \xi_4)]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}}$. Now, we define the third modified energy $E^{(3)}(t)$ by \[ E^{(3)}(t) = E^{(2)}(t) + \Lambda_4(\sigma_4; v, v, v, v )+\Lambda_4(\widetilde{\sigma_4}; u,u, v, v ), \] with $\sigma_4 = \frac{iM_4}{Z_4}$ and $\widetilde{\sigma_4} = \frac{i\widetilde{M_4}}{Z_4}$. Then, using \eqref{MB}, full symmetry of $\sigma_4$, and symmetry of $\widetilde{\sigma_4}$ in $1 \leftrightarrow 2$ and $3 \leftrightarrow 4$, we have \[ \frac{d}{dt} E^{(3)}(t) = \Lambda_5({M_5};u, v, v, v, v) + \Lambda_5(\widetilde{M_5};u, u, u, v, v) + \Lambda_5(\widetilde{M_5}' ;u, v, v, v, v), \] where \begin{equation} \label{XM5} \begin{gathered} M_5 = -4i \xi_{15} \sigma_4(\xi_{15}, \xi_2, \xi_3, \xi_4), \quad \widetilde{M_5} = -2i \xi_{35} \widetilde{\sigma_4}( \xi_1, \xi_2, \xi_{35}, \xi_4), \\ \widetilde{M_5}' = -i \xi_{25} \widetilde{\sigma_4}(\xi_1, \xi_{25}, \xi_3, \xi_4). \end{gathered} \end{equation} \subsection{Pointwise Estimates of the Multipliers and Almost Conservation Law} Let $|\xi_j| \sim N_j$, $|\xi_{kl}|\sim N_{kl}$ for $N_j$, $N_{kl}$, dyadic. Then, we have the following pointwise estimate on $M_4$ and $\widetilde{M_4}$. Since $M_4$ is symmetric in $\{1, 2, 3, 4\}$, the proof for $M_4$ directly follows from Lemma 4.4 in \cite{CKSTT4}. However, since $\widetilde{M_4}$ is symmetric only in $1 \leftrightarrow 2$ and $3 \leftrightarrow 4$, we need to obtain new pointwise cancellations. The proof for $\widetilde{M_4}$ is presented at the end of this subsection. \begin{lemma} \label{LEM:XM4BOUND} \begin{equation} \label{XM4BOUND} |M(\xi_1, \xi_2, \xi_3, \xi_4) | \lesssim \frac{|Z_4| m^2(\min(N_j, N_{kl}))}{(N+N_1)(N+N_2)(N+N_3)(N+N_4)} \end{equation} where $M = M_4$ or $\widetilde{M_4}$. \end{lemma} Lemma \ref{LEM:XM4BOUND} shows that $M_4$ and $\widetilde{M_4}$ vanish whenever $Z_4$ vanishes. Moreover, we have the following pointwise estimates for $M_5$, $\widetilde{M_5}$, and $\widetilde{M_5}'$ as in the KdV case. \begin{corollary} \label{COR:XM5BOUND} \begin{equation} \label{XM5BOUND} |M_5(\xi_1, \xi_2, \xi_3, \xi_4, \xi_5) | \lesssim \frac{ m^2(N_{\ast15})N_{15}}{(N+N_{15})(N+N_2)(N+N_3)(N+N_{4})} \end{equation} where $N_{\ast15} = \min(N_2, N_3, N_4, N_{15}, N_{23}, N_{24}, N_{34})$. The same estimate holds for $\widetilde{M_5}$ and $\widetilde{M_5}'$ with the obvious change. i.e. we replace $N_{15}, N_{*15}$ by $N_{35}, N_{*35}$ for $\widetilde{M_5}$, and by $N_{25}, N_{*25}$ for $\widetilde{M_5}'$ along with the rest of variables adjusted accordingly. \end{corollary} Once we establish the pointwise estimates on the quintilinear multipliers in Corollary \ref{COR:XM5BOUND}, we need to control the growth of the third modified energy $E^{(3)}(t)$. Using Lemma 5.1 in \cite{CKSTT4}, we obtain the following lemma. (See Lemma 5.2 in \cite{CKSTT4}.) \begin{lemma} \label{LEM:XM5TIMEGROWTH} For $ s = -\frac{3}{4}+$, we have \begin{equation} \label{XM5TIMEGROWTH} \big| \int_0^1 \Lambda_5(M; f_1, \dots, f_5) dt \big| \lesssim N^{- \frac{15}{4}+} \prod_{j = 1}^5 \|f_j\|_{X^{{0}, \frac{1}{2}+}_{[0, 1]}}, \end{equation} where $M = M_5$, $\widetilde{M_5}$, or $\widetilde{M_5}'$. \end{lemma} We point out that the proof of Lemma 5.2 in \cite{CKSTT4} does not make use of symmetry of the multiplier $M_5$ after establishing Corollary \ref{COR:XM5BOUND}. Hence, the same result holds for $\widetilde{M_5}$ and $\widetilde{M_5}'$, thus providing a good estimate on the time growth of $E^{(3)} (t)$. We omit the remaining portion of the proof of Theorem \ref{mainTHM:GWPR} since it basically follows from the argument in \cite{CKSTT4}. \begin{proof}[Proof of Lemma \ref{LEM:XM4BOUND}] Since $\sigma_3 = \frac{iM_3}{Z_3} =\frac{-3[\xi_1m^2(\xi_1)]_{\rm sym}}{3 \xi_1\xi_2\xi_3}$, we have \begin{equation} \label{XM4TILDE} \widetilde{M_4} = -2i [\xi_{23} \sigma_3(\xi_{1}, \xi_{23}, \xi_4)]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}} =\Big[ \frac{2i}{3} \frac{\xi_{1}m^2(\xi_{1}) +\xi_{23}m^2(\xi_{23})+ \xi_{4}m^2(\xi_{4})}{\xi_1\xi_4} \Big]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}}. \end{equation} From \eqref{XALGEBRA2}, we have \begin{equation} \label{XALGEBRA3} Z_4 = 3\xi_{12}\xi_{13}\xi_{14} = 3(\xi_1\xi_2\xi_3 + \xi_1\xi_2\xi_4 + \xi_1\xi_3\xi_4 + \xi_2\xi_3\xi_4). \end{equation} Then, we have \begin{align*} \Big[ \frac{2i}{3} & \frac{\xi_{1} m^2(\xi_{1})}{\xi_1\xi_4} \frac{\xi_2\xi_3}{\xi_2\xi_3}\Big]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}} = \Big[\frac{i}{9}\frac{ m^2(\xi_1)Z_4}{\prod_{j = 1}^4 \xi_j} - \frac{i}{3}\Big(\frac{m^2(\xi_1)}{\xi_1} + \frac{m^2(\xi_1)}{\xi_2}\Big)\Big]_{\substack{1\leftrightarrow 2}}\\ &= \frac{i}{18}\frac{ (m^2(\xi_1)+m^2(\xi_2))Z_4}{\prod_{j = 1}^4 \xi_j} - \frac{i}{6}\Big(\frac{m^2(\xi_1)}{\xi_1} + \frac{m^2(\xi_1)}{\xi_2} + \frac{m^2(\xi_2)}{\xi_1} + \frac{m^2(\xi_2)}{\xi_2} \Big), \end{align*} where the symmetry $3 \leftrightarrow 4$ is used in the first equality and $1 \leftrightarrow 2$ is used in the last equality. A similar computation yields \begin{align*} \Big[ \frac{2i}{3} \frac{\xi_{4}m^2(\xi_{4})}{\xi_1\xi_4} \frac{\xi_2\xi_3}{\xi_2\xi_3}\Big]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}} = \frac{i}{18}\frac{ (m^2(\xi_3)+m^2(\xi_4))Z_4}{\prod_{j = 1}^4 \xi_j} - \frac{i}{6} \sum_{j, k = 3}^4 \frac{m^2(\xi_j)}{\xi_k}. \end{align*} Since $\xi_{23} = - \xi_{14}$ and $m$ is even, we have $m(\xi_{23}) = m(\xi_{14})$. Using this fact and \eqref{XALGEBRA3}, we have \begin{align*} &\Big[ \frac{2i}{3} \frac{\xi_{23}m^2(\xi_{23})}{\xi_1\xi_4} \Big]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}} = - \frac{i}{36} \frac{Z_4}{\prod_{j = 1}^4 \xi_j} \big(m^2(\xi_{13})+m^2(\xi_{14})+m^2(\xi_{23})+m^2(\xi_{24})\big). \end{align*} Hence, after symmetrization $1\leftrightarrow 2$ and $3\leftrightarrow 4$, we can write $\widetilde{M_4}$ as \begin{equation} \label{XM4TILDE2} \begin{aligned} \widetilde{M_4} = & \frac{i}{36 }\frac{ Z_4}{\prod_{j = 1}^4 \xi_j} \Big(2\sum_{j = 1}^4 m^2(\xi_j) -m^2(\xi_{13})-m^2(\xi_{14})-m^2(\xi_{23})-m^2(\xi_{24})\Big) \\ & - \frac{i}{6}\Big(\sum_{j, k = 1}^2 \frac{m^2(\xi_j)}{\xi_k} + \sum_{j, k = 3}^4 \frac{m^2(\xi_j)}{\xi_k} \Big) =: {\rm I} + {\rm II}. \end{aligned} \end{equation} Now, let $L_j$ denote the $j$th largest dyadic interval among $N_j,$ $j = 1, \dots 4$. Since $\widetilde{M_4}$ is symmetric in $1\leftrightarrow 2$ and $3\leftrightarrow 4$, assume $N_1 \geq N_2$ and $N_4 \geq N_3$. We further assume $N_1 \geq N_4$, since $\widetilde{M_4}$ is symmetric under $1 \leftrightarrow 4$ in view of \eqref{XM4TILDE}. Also, a simple reasoning (as in \cite{CKSTT4}) shows that we may assume that $N_1 \gtrsim N$ and that at least one of $N_{12}, N_{13}, N_{14}$ is at least of size $\sim N_1$. Lastly, note that we have $ \text{RHS of } \eqref{XM4BOUND} \sim \frac{N_{12} N_{13} N_{14} m^2(\min(N_j, N_{kl})}{N_1L_2(N+L_3)(N+L_4)}. $ \noindent $\bullet$ {\bf Case (1):} $L_4 \gtrsim N/2$ In this case, we have $N+L_j \sim L_j$ for all $j $. Since $m$ is decreasing, we have $ |{\rm I}| \lesssim \frac{|Z_4| m^2(\min(N_j, N_{kl})}{\prod_{j = 1}^4 (N+N_j)}, $ which satisfies \eqref{XM4BOUND}. Now, write the term ${\rm II}$ in \eqref{XM4TILDE2} as \begin{align*} {\rm II} = - \frac{i}{6} \sum_{j = 1}^4 \frac{m^2(\xi_j)}{\xi_j} - \frac{i}{6}\Big(\frac{m^2(\xi_1)}{\xi_2}+ \frac{m^2(\xi_2)}{\xi_1}+ \frac{m^2(\xi_3)}{\xi_4}+ \frac{m^2(\xi_4)}{\xi_3}\Big) =: {\rm II}_1 + {\rm II}_2. \end{align*} Note that ${\rm II}_1$ appears in \cite{CKSTT4} and it can be estimated by Mean Value Theorem and Double Mean Value Theorem \cite[Lemmata 4.1 and 4.2]{CKSTT4}. Now, we estimate ${\rm II}_2$. \noindent $\circ$ Subcase (1.a): $N_{12}, N_{13}, N_{14} \gtrsim N_1$. \ In this case, we have \[ |{\rm II}_2|\lesssim \frac{m(L_4)}{L_4} \leq \frac{N_{12} N_{13} N_{14}m^2(L_4)}{N_1L_2L_3L_4} \sim \text{RHS of } \eqref{XM4BOUND}.\] \noindent $\circ$ Subcase (1.b): $N_{12}, N_{14} \gtrsim N_1$ and $N_{13} \ll N_1$. In this case, write two terms in ${\rm II}_2$ as \[ \Big|\frac{m^2(\xi_1)}{\xi_2}+ \frac{m^2(\xi_3)}{\xi_4} \Big| = \Big|- \frac{m^2(\xi_1)\xi_{13}}{\xi_2\xi_4} + \frac{m^2(\xi_3)-m^2(\xi_1)}{\xi_4}\Big|. \] The first term is of size $ \sim \frac{m^2(N_1)N_{13}}{N_2N_4} \lesssim \frac{N_{12} N_{13} N_{14}m^2(L_4)}{N_1^2N_2N_4} \lesssim \text{RHS of } \eqref{XM4BOUND}$. On the other hand, since $N_{13} \ll N_1$, we have $ |m^2(\xi_3)-m^2(\xi_1)| = |m^2(\xi_1 - \xi_{13})-m^2(\xi_1)| \sim \frac{N_{13} m^2(N_1)}{N_1}$. Then, the second term can be written as \[ \Big|\frac{m^2(\xi_1 - \xi_{13})-m^2(\xi_1)}{\xi_4}\Big| \sim \frac{N_{13} m^2(N_1)}{N_1N_4} \lesssim \frac{N_{12} N_{13} N_{14}m^2(L_4)}{N_1^3N_4} \lesssim \text{RHS of } \eqref{XM4BOUND}. \] Since $N_{24} = N_{13} \ll N_1$, a similar computation holds for $\big|\frac{m^2(\xi_2)}{\xi_1}+ \frac{m^2(\xi_4)}{\xi_3} \big|$. A similar computation holds if precisely one of $N_{12},N_{13},N_{14}$ is much smaller than $N_1$. \noindent $\circ$ Subcase (1.c): $N_{12}, N_{13} \ll N_1$ and $N_{14} \gtrsim N_1$. \ In this case, we have $N_1 \sim N_2 \sim N_3$. Write ${\rm II}_2$ as \begin{align*} {\rm II}_2 &= \frac{m^2(\xi_1)\xi_{24}\xi_1\xi_3+m^2(\xi_2)\xi_{13}\xi_2\xi_4}{\prod_{j = 1}^4\xi_j} +\frac{m^2(\xi_3) -m^2(\xi_1)}{\xi_4} +\frac{m^2(\xi_4) -m^2(\xi_2)}{\xi_3}\\ &=: \widetilde{{\rm II}}_1 + \widetilde{{\rm II}}_2 + \widetilde{{\rm II}}_3. \end{align*} Then, using \eqref{XALGEBRA3} and Mean Value Theorem on $m^2(\xi_2)-m^2(\xi_1) = m^2(\xi_1 - \xi_{12})-m^2(\xi_1 )$, we obtain \[ |\widetilde{{\rm II}}_1| = \Big| \frac{m^2(\xi_1)Z_4}{3 \prod_{j = 1}^4\xi_j} + \frac{m^2(\xi_2)-m^2(\xi_1)}{\xi_1\xi_3}\xi_{13}\Big| \lesssim \frac{m^2(N_1)Z_4}{3 \prod_{j = 1}^4N_j} + \frac{N_{12}N_{13}m^2(N_1)}{N_1^2N_3} \] which is $\lesssim$ than the right-hand side of \eqref{XM4BOUND}. Next, we apply Mean Value Theorem to estimate $\widetilde{{\rm II}}_2 + \widetilde{{\rm II}}_3$, using $N_{24} = N_{13} \ll N_1$. Then, for some $\xi_1^* \sim \xi_1$ and $\xi_2^* \sim \xi_2$, we have \begin{align*} &|\widetilde{{\rm II}}_2 + \widetilde{{\rm II}}_3|\\ &\sim \Big| \frac{m^2(\xi_1^*)\xi_{13}}{\xi_1^*\xi_4} + \frac{m^2(\xi_2^*)\xi_{24}}{\xi_2^*\xi_3}\Big| \\ &\leq \Big| \xi_{13} \Big( \frac{m^2(\xi_1^*)}{\xi_1^*\xi_4} - \frac{m^2(\xi_1^* - \xi_{12})}{(\xi_1^*- \xi_{12})(\xi_4 +\xi_{12} )} \Big) \Big| + \Big|\xi_{13} \Big( \frac{m^2(\xi_2^*)}{(\xi_2^*)\xi_3} + \frac{m^2(\xi_1^* - \xi_{12})}{(\xi_1^*- \xi_{12})\xi_3} \Big)\Big|\\ &\leq \Big| \frac{\xi_{13}}{\xi_4} \Big( \frac{m^2(\xi_1^*)}{\xi_1^*} - \frac{m^2(\xi_1^* - \xi_{12})}{(\xi_1^*- \xi_{12} )} \Big) \Big| + \Big| \frac{\xi_{12}\xi_{13}m^2(\xi_1^* - \xi_{12})}{(\xi_1^*- \xi_{12})(\xi_4 +\xi_{12} )\xi_4} \Big| \\ &\quad+ \Big|\xi_{13} \Big( \frac{m^2(-\xi_2^*)}{(-\xi_2^*)\xi_3} - \frac{m^2(\xi_1^* - \xi_{12})}{(\xi_1^*- \xi_{12})\xi_3} \Big)\Big|. \end{align*} Now we can apply Mean Value Theorem on the first and the third term, since $|\xi_{12}| \ll N_1$ and $|(\xi_1^*- \xi_{12}) - (-\xi_2)| \leq |\xi_1^*- \xi_1| + |\xi_2^*- \xi_2| \ll N_2$. Then, we have $|\widetilde{{\rm II}}_2 + \widetilde{{\rm II}}_3| \lesssim \frac{N_{12}N_{13}m^2(N_1)}{N_1^2N_4} \lesssim \text{RHS of } \eqref{XM4BOUND}$. We point out that, unlike \cite{CKSTT4}, we could not apply Double Mean Value Theorem. Note that the first application of Mean Value Theorem on $\widetilde{{\rm II}}_1$ is only on the numerators, and the second application of Mean Value Theorem on $\widetilde{{\rm II}}_2 +\widetilde{{\rm II}}_3$ is on the whole fractions. A similar computation holds if precisely two of $N_{12},N_{13},N_{14}$ are much smaller than $N_1$. \noindent $\bullet$ {\bf Case (2):} $L_4 \ll N/2$ In this case, we have $m^2(\min(N_j, N_{kl})) = 1$. Let $L_{1j} \sim |\xi_1 + \xi_k|$ where $|\xi_k |\sim L_j$. Also, $L_{14} \ll N_1\sim L_2$. Thus, it is sufficient to show $ |\widetilde{M_4}| \lesssim \frac{L_{12}L_{13}}{N_1(N+L_3)N} $. As in \cite{CKSTT4}, a simple reasoning shows that we have $\max(L_{12}, L_{13}) \sim N_1$ in this case. \noindent $\circ$ Subcase (2.a): $L_{13}\sim N_1$, $\frac N2 \lesssim L_{12} < \frac{N_1}{4}$. \ In this case, we have $L_3 \sim L_{12} \gtrsim N$. From this and $\frac{L_{13}}{N_1} \sim 1$, it is sufficient to show $|\widetilde{M_4}| \lesssim \frac{1}{ N}$. First, note that $ \frac{Z_4}{\prod_{j=1}^4\xi_j} = 3\sum_{j = 1}^4\frac{1}{\xi_j} $ by \eqref{XALGEBRA3}. Then, after rewriting \eqref{XM4TILDE2} using this identity, all the terms which don't have $\frac{1}{L_4}$ are $O(\frac{1}{N})$ since $L_2, L_3 \gtrsim N$. If $L_4 = N_3$, the remaining terms are $\sim \frac{1}{N_3} \big(\frac{m^2(N_{1})N_3}{N_1} + \frac{m^2(N_{2})N_3}{N_2} \big)\lesssim \frac{1}{N}$ by Mean Value Theorem. A computation for the case $L_4 = N_2$ is similar. \noindent $\circ$ Subcase (2.b): $L_{13}\sim N_1$, $L_{12} \ll \frac{N}{4}$. \ In this case, we have $L_3 \ll \frac{N}{2}$. Thus, it is sufficient to show $|\widetilde{M_4}| \lesssim \frac{L_{12}}{ N^2}$. First, suppose $L_2 = N_2$. Then, we have $m^2(\xi_3) = m^2(\xi_4) = 1$. Then, from \eqref{XM4TILDE2}, we have \begin{equation} \label{XM4CANCEL1} \widetilde{M_4} = \frac{i}{18 }\frac{ Z_4}{\prod_{j = 1}^4 \xi_j} \big( \sum_{j = 1}^2 \big(m^2(\xi_j) -m^2(\xi_{1\{j+2\}}) \big) + 2 \big) - \frac{i}{6}\Big(\sum_{j, k = 1}^2 \frac{m^2(\xi_j)}{\xi_k} \Big) - \frac{i}{3}\frac{\xi_3+\xi_4}{\xi_3\xi_4}. \end{equation} In principle, the last term could be large. However, using \eqref{XALGEBRA3}, we have $\frac{i}{9}\frac{ Z_4}{\prod_{j = 1}^4 \xi_j} = \frac{i}{3} \frac{\xi_3+\xi_4}{\xi_3\xi_4} - \frac{i}{3} \frac{\xi_3+\xi_4}{\xi_1\xi_2}$. The first term exactly cancels the last term in \eqref{XM4CANCEL1} and the second term is $O(\frac{N_{12}}{N_1^2}) \lesssim \frac{L_{14}}{N^2}$. Then, using \eqref{XALGEBRA3}, the remaining terms in \eqref{XM4CANCEL1} can be written as \begin{align*} &\frac{i}{18 } \frac{ Z_4}{\prod_{j = 1}^4 \xi_j} \sum_{j = 1}^2 \big(m^2(\xi_j) -m^2(\xi_{1\{j+2\}}) \big) - \frac{i}{6}\Big(\sum_{j, k = 1}^2 \frac{m^2(\xi_j)}{\xi_k} \Big) \\ &= \frac{i}{6}\frac{\xi_1\xi_2\xi_3 + \xi_1\xi_2\xi_4}{\prod_{j = 1}^4 \xi_j} \sum_{j = 1}^2 \big(m^2(\xi_j) -m^2(\xi_{1\{j+2\}}) \big)\\ &\quad - \frac{i}{6}\frac{\xi_1\xi_3\xi_4 + \xi_2\xi_3\xi_4}{\prod_{j = 1}^4 \xi_j}\big( m^2(\xi_{13})+m^2(\xi_{14})\big). \end{align*} The first term can be bounded by Double Mean Value Theorem as in \cite{CKSTT4}, and the second term can be estimated by $\frac{N_{12}}{N_1N_2} \lesssim \frac{N_{12}}{N^2}$. Next, suppose $L_2 = N_4$. Then, we have $m^2(\xi_2) = m^2(\xi_3) = 1$. In this case, we also have $m^2(\xi_{14}) = m^2(L_{12}) = 1$. Then, By repeating a similar computation, we have, from \eqref{XM4TILDE2}, \begin{equation} \label{XM4CANCEL2} \begin{aligned} \widetilde{M_4} = & \frac{i}{18 }\frac{ Z_4}{\prod_{j = 1}^4 \xi_j} \big( m^2(\xi_1) + m^2(\xi_4) + 1 -m^2(\xi_{13})\big) \\ & - \frac{i}{6}\Big( \frac{m^2(\xi_1)}{\xi_1} + \frac{m^2(\xi_1)}{\xi_2}+ \frac{m^2(\xi_4)}{\xi_3}+ \frac{m^2(\xi_4)}{\xi_4} \Big) - \frac{i}{6}\Big(\frac{\xi_1+\xi_2}{\xi_1\xi_2} + \frac{\xi_3+\xi_4}{\xi_3\xi_4} \Big). \end{aligned} \end{equation} Once again, the last term could be large in principle. However, using \eqref{XALGEBRA3}, we have $\frac{i}{18}\frac{ Z_4}{\prod_{j = 1}^4 \xi_j} = \frac{i}{6} \frac{\xi_1+\xi_2}{\xi_1\xi_2} + \frac{i}{6} \frac{\xi_3+\xi_4}{\xi_3\xi_4} $. This exactly cancels the last term in \eqref{XM4CANCEL2}. Using \eqref{XALGEBRA3} and Mean Value Theorem as before, the remaining terms in \eqref{XM4CANCEL2} are can be bounded by $\frac{N_{14}}{N^2}$. \noindent $\circ$ Subcase (2.c): $L_{12}\sim N_1$, $\frac{ N}{2} \lesssim L_{13} < \frac{N_1}{4}$. \ This case basically follows from Subcase (2.a). \noindent $\circ$ Subcase (2.d): $L_{12}\sim N_1$, $L_{13} \ll \frac{N}{4}$. \ Then, $L_{24} = L_{13} \ll \frac{N}{2}$ and $L_4\ll\frac{N}{2}$ imply $L_2 \ll N$, which is impossible since $L_2 \sim N_1 \gtrsim N$. Hence, this case does not occur. \end{proof} \section{Global Well-Posedness on $\mathbb{T}$, $\alpha = 1$} \label{SEC:XGWPonT} \subsection{Vector-Valued Function Spaces} In this subsection, we recall the vector-valued function spaces introduced in \cite{OHTHESIS} and \cite{OH1}, which are needed to study the well-posedness issues of \eqref{MB} without the mean 0 condition. Assuming the mean 0 condition for $u$ and $v$, the bilinear estimate \eqref{bilinear1} (with $\alpha = 1$, adjusted to the period $2\pi \lambda$ \cite{CKSTT4}) yields the local well-posedness of \eqref{MB} in $H^s(\mathbb{T}_\lambda) \times H^s(\mathbb{T}_\lambda)$ for $s \geq -\frac{1}{2}$. Proceeding as in Section \ref{SEC:XGWPonR}, one can also establish the global well-posedness of \eqref{MB} for $s \geq -\frac{1}{2}$ via the $I$-method. It is known from \cite{KPV4} that the crucial bilinear estimate \eqref{bilinear1} (with $\alpha = 1$) fails for any $s, b \in \mathbb{R}$ unless the functions have the spatial mean 0 for all $t$. If the means of $u$ and $v$ are not 0, we consider $u \mapsto u - \frac{1}{2\pi} \int u_0(x) dx$ and $v \mapsto v - \frac{1}{2\pi} \int v_0(x) dx$ along with the conservation of $E_1$ and $E_2$, the means of $u$ and $v$. This modifies the Majda-Biello system \eqref{MB} into the mean 0 system: \begin{equation} \label{mean0MB} \begin{gathered} u_t + u_{xxx} + q v_x + v v_x = 0 \\ v_t + v_{xxx} + q u_x + pv_x + (u v)_x = 0, \end{gathered} \end{equation} where $ p $ and $q$ are the means of the original $u$ and $v$ in \eqref{MB}. Now, consider the linear part of \eqref{mean0MB}: \begin{equation} \label{mean0linear} \Big( \partial_t + \partial_x^3 + \begin{pmatrix} 0 & q \\ q& p \end{pmatrix} \Big) \begin{pmatrix} u \\ v \end{pmatrix} = 0. \end{equation} When $q \ne 0$, the linear terms are \emph{mixed}. In this case, it does not make sense to consider the solution space as the direct sum of the scalar $X^{s, b} $ spaces. By taking the space-time Fourier transform of \eqref{mean0linear}, we see that the Fourier transforms of free solutions are ``supported on'' $\tau I - A(\xi)$, where $I $ is the $2\times 2$ identity matrix and $A(\xi ) = \Big(\begin{smallmatrix} \xi^3 & -q \xi \\ -q \xi & \xi^3 - p \xi \end{smallmatrix} \Big)$. Since $A(\xi) $ is self-adjoint, it is diagonalizable via an orthogonal matrix $M(\xi)$ (with $M(0) := I$). i.e., we have $A(\xi) = M(\xi)D(\xi)M^{-1}(\xi) $, where $D(\xi) = \Big(\begin{smallmatrix} d_1(\xi) & 0 \\ 0 & d_2(\xi) \end{smallmatrix} \Big)$ and $ d_1(\xi) $, $d_2(\xi)$ are the eigenvalues of $A(\xi)$ given by \begin{equation} \label{XEIGENVALUE} d_j(\xi) = \xi^3 -\tfrac{p \xi}{2} + (-1)^j L\xi, \ j = 1, 2, \end{equation} with $L := L(p, q) = 1/2\sqrt{ p^2 + 4q^2}$. Then, we can define the \emph{vector-valued} $X^{s, b}$ space as follows: \begin{definition} \label{def3} \rm Define $X^{s, b}_{p, q} (\mathbb{T} \times \mathbb{R}) = \big\{ (u, v) \in \mathcal{S}' : \|(u, v) \|_{X^{s, b}_{p, q}} < \infty \big\}$, via the norm \begin{align*} &\|(u, v) \|_{X^{s, b}_{p, q}(\mathbb{T} \times \mathbb{R})}\\ &= \Big( \frac{1}{2\pi} \int \sum_{\xi \in \mathbb{Z}} \Big[ \langle\xi\rangle ^{2s} \big( I + |\tau - A(\xi)|\big)^{2b} \left( \begin{smallmatrix} \widehat{u}(\xi, \tau) \\ \widehat{v}(\xi, \tau) \end{smallmatrix} \right), \left( \begin{smallmatrix} \widehat{u}(\xi, \tau) \\ \widehat{v}(\xi, \tau) \end{smallmatrix} \right) \Big]_{\mathbb{C}^2} d \tau \Big)^{1/2}, \end{align*} where $ [\cdot, \cdot]_{\mathbb{C}^2}$ is the usual Euclidean inner product on $\mathbb{C}^2$. \end{definition} \begin{remark} \label{XREMARK1} \rm Since $\tau I - A(\xi)$ is self-adjoint, $\big(\tau I - A(\xi)\big)^2$ is a positive matrix with a unique positive square root. We define $|\tau I - A(\xi)|$ by such a unique square root. Then, $ I + |\tau I - A(\xi)| $ is also positive definite and we can define $\big(I + |\tau I - A(\xi)| \big)^{2b}$ by $M(\xi) \big(I + |\tau I - D(\xi)| \big)^{2b} M^{-1}(\xi) $. \end{remark} \begin{remark} \label{XREMARK2} \rm Note that the $X^{s, b}_{p, q}$ norm is \emph{not} defined as a weighted $L^2$ norm of $|\widehat{u}|$ and $|\widehat{v}|$, unlike the scalar $X^{s, b}$ norm. Now, let \begin{equation} \label{XDIAGONAL} \left( \begin{smallmatrix} \widehat{U}(\xi, \tau) \\ \widehat{V}(\xi, \tau) \end{smallmatrix} \right) = M^{-1}(\xi) \left( \begin{smallmatrix} \widehat{u}(\xi, \tau) \\ \widehat{v}(\xi, \tau) \end{smallmatrix} \right). \end{equation} Then, by the orthogonality of $M(\xi)$ for all $\xi \in \mathbb{Z}$, we have \begin{align*} \|(u, v) \|_{X^{s, b}_{p, q}(\mathbb{T} \times \mathbb{R})} &=\Big( \iint \langle\xi\rangle ^{2s} \Big| \langle\tau I - A(\xi) \rangle^b \left( \begin{smallmatrix} \widehat{u}(\xi, \tau) \\ \widehat{v}(\xi, \tau) \end{smallmatrix} \right) \Big|^2_{\mathbb{C}^2} d\xi d\tau\Big)^{1/2} \\ & =\Big( \iint \langle\xi\rangle ^{2s} \Big| \langle\tau I - D(\xi)\rangle ^b \left( \begin{smallmatrix} \widehat{U}(\xi, \tau) \\ \widehat{V}(\xi, \tau) \end{smallmatrix} \right) \Big|^2_{\mathbb{C}^2} d\xi d\tau\Big)^{1/2}\\ &= \big( \| U \|^2_{X^{s, b}_1} + \| V\|^2_{X^{s, b}_2} \big)^{1/2}, \end{align*} where \begin{equation}\label{XXSB} \| f \|_{X^{s, b}_j} = \big\| \langle\xi\rangle ^s \langle\tau - d_j (\xi) \rangle^b \widehat{f}(\xi, \tau)\big \|_{L^2_{\xi, \tau}}, \ j = 1, 2. \end{equation} Hence, $X^{s, b}_{p, q}$ is defined as a weighted $L^2$ norm of the \emph{diagonal} terms $|\widehat{U}|$ and $|\widehat{V}|$. Thus, we can prove all the estimates in terms of these diagonal terms with their accompanied $X^{s, b}_j$ norms, assuming that $\widehat{U}$ and $\widehat{V}$ are nonnegative. \end{remark} As in the scalar case, we use $ b = 1/2$ in the periodic setting. Since the $X^{s, \frac{1}{2}}_{p, q}$ norm barely fails to control the $C_t H^s_x$ norm, we define a smaller space $Y^s_{p,q} $ via the norm \[ \|(u, v) \|_{Y^s_{p,q}} = \| (u, v) \|_{X^{s,\frac{1}{2}}_{p, q}} + \| \langle\xi\rangle ^{s}(\widehat{u}, \widehat{v}) (\xi, \tau)\|_{L^2(d\xi, L^1_{\tau})} \] Then, we have $Y^s_{p,q} \subset C_t H^s_x $. We also define $Y_j^s$ via the norm \[ \| f \|_{Y_j^s} = \|f \|_{X^{s, \frac{1}{2}}_j} + \| \langle\xi\rangle ^{s}\widehat{f}(\xi, \tau) \|_{L^2(d\xi, L^1_{\tau})} , \; j = 1, 2. \] In proving estimates, we repeatedly use the orthogonality of $M(\xi)$ for all $\xi$. Thus, if $\widehat{F}$ and $\widehat{G}$ are the diagonal terms of $\widehat{f}$ and $\widehat{g}$ given by $\left(\begin{smallmatrix} \widehat{F}(\xi) \\ \widehat{G}(\xi)\end{smallmatrix}\right) = M^{-1}(\xi)$, then $|\widehat{F}(\xi)|^2 + |\widehat{G}(\xi)|^2 = |\widehat{f}(\xi)|^2 + |\widehat{g}(\xi)|^2$ for all $\xi$. In particular, $\| (F, G) \|_{L^2_x} = \|(f, g) \|_{L^2_x}$. Now, we like to discuss the scaling property on \eqref{mean0MB} on $[0, 2 \pi \lambda) \times \mathbb{R}$. \eqref{mean0MB} was obtained from \eqref{MB} via $u \to u - p$ and $v \to v - q$, where $p$ and $q$ are the means of the original $u$ and $v$ in \eqref{MB}, respectively. Now, consider the scaling $\mathbb{T} = [0, 2\pi) \mapsto \mathbb{T}_{\lambda} = [0, 2\pi \lambda)$ on \eqref{MB} given by \begin{gather*} u^\lambda(x, t) = \frac{1}{\lambda^2}u(\frac{t}{\lambda^3}, \frac{x}{\lambda}) \\ v^\lambda(x, t) = \frac{1}{\lambda^2}v(\frac{t}{\lambda^3}, \frac{x}{\lambda}) \end{gather*} and \begin{gather*} u^\lambda_0(x) = \frac{1}{\lambda^2}u( \frac{x}{\lambda}) \\ v^\lambda_0(x) = \frac{1}{\lambda^2} v( \frac{x}{\lambda}). \end{gather*} Note that the scaling does \emph{not} preserve the means of $u$ and $v$. Rather, we have $p^\lambda =$ the mean of $u^\lambda = p / \lambda^2$, and $q^\lambda =$ the mean of $v^\lambda = q / \lambda^2. $ Then, after scaling, we need to consider the following equation rather than \eqref{mean0MB}: \begin{gather*} u^\lambda_t + u^\lambda_{xxx} + q^\lambda v^\lambda_x + v^\lambda v^\lambda_x = 0\\ v^\lambda_t + v^\lambda_{xxx} + q^\lambda u^\lambda_x + p^\lambda v^\lambda_x + (u^\lambda v^\lambda)_x = 0 \end{gather*} on $[0, 2 \pi \lambda) \times \mathbb{R}$, where $p^\lambda = p / \lambda^2$ and $q^\lambda = q / \lambda^2$. Hence, $p$ and $q$ in the definition of $X^{s, b}_{p, q}$ and $Y^s_{p, q}$ need to be modified accordingly when we apply scaling. i.e., we need to consider $X^{s, b}_{p^\lambda, q^\lambda}([0, 2\pi \lambda) \times \mathbb{R})$ and so on. The same modification is needed for $X_j^{s, b}$ and $Y_j^s$ as well since their definition depends on the eigenvalues $d_1(\xi)$ and $ d_2(\xi)$ in \eqref{XEIGENVALUE}, which in turn depend on the spatial averages $p$ and $q$. In the following, we drop the subscripts $p$ and $q$ in $X^{s, b}_{p, q}$ and $Y^s_{p, q}$ when there is no confusion. Before concluding this subsection, we verify that $ E(u, v) = \int_{\mathbb{T}} u^2 + v^2 dx = \Lambda_2(1; u, u) + \Lambda_2(1; v, v)$ is conserved for the mean 0 system \eqref{mean0MB}, using the multilinear notation. From \eqref{mean0MB}, we have \begin{equation} \label{XXDIFF} \begin{gathered} \partial_t \widehat{u}(\xi) = i \xi^3 \widehat{u}(\xi) - i q\xi \widehat{v}- \frac{i}{2} \xi \widehat{v}\ast\widehat{v}(\xi) \\ \partial_t \widehat{v}(\xi) = i \xi^3 \widehat{v}(\xi) - i q\xi \widehat{u}- i p \xi \widehat{v}- i \xi \widehat{u}\ast\widehat{v}(\xi). \end{gathered} \end{equation} Using \eqref{XXDIFF} and the fact that $\xi_1^3 + \xi_2^3 = 0 $ when $\xi_1 + \xi_2 = 0$, we have \begin{align*} &\frac{d}{dt}E(u, v)(t)\\ & = i \Lambda_2(\xi_1^3 + \xi_2^3; u, u) - 2iq \Lambda_2({\xi_{1}};u, v) - {i} \Lambda_3(\xi_{23}; u, v, v)\\ &\quad + i \Lambda_2(\xi_1^3 + \xi_2^3; v, v) - 2iq \Lambda_2({\xi_{1}};v, u) - ip \Lambda_2({\xi_{12}};v, v) - {i} \Lambda_3(\xi_{12} + \xi_{13}; u, v, v) \\ &= -2iq \Lambda_2({\xi_{1} + \xi_2};u, v) - 2{i} \Lambda_3(\xi_{1}+ \xi_2 + \xi_{3}; u, v, v) = 0. \end{align*} Hence, $E(u, v)(t) = \|(u, v)\|_{L^2\times L^2}^2$ is conserved. As in Section \ref{SEC:XGWPonR}, we use $E(u, v)(t)$ to generate a sequence of modified energies and apply the $I$-method to prove the following theorem. \begin{theorem} \label{THM:XGWP} Let $p, q \in \mathbb{R}$. The Cauchy problem \eqref{mean0MB} with mean 0 initial data $(u_0, v_0)$ is globally well-posed in $H^s(\mathbb{T}) \times H^s(\mathbb{T})$ for $s \geq -1/2$. \end{theorem} As a corollary, we obtain Theorem \ref{mainTHM:GWPT}. Before proceeding with the $I$-method, recall that we have $p = \frac{p_1}{\lambda^2}$ and $q = \frac{q_1}{\lambda^2}$ on $\mathbb{T}_\lambda$, where $p_1$ and $q_1$ are the means of $u_0$ and $v_0$ of \eqref{MB} on $\mathbb{T}$. In the following, we fix the initial condition $(u_0, v_0)$ of \eqref{MB} on $\mathbb{T}$. Thus, $p_1$ and $q_1$ are fixed. We often hide $p_1$ and $q_1$ under implicit constants in proving estimates. Also, for our purpose, we set $\lambda \geq 1$ in the remaining of the section. \subsection{Modified Energies} As in Section \ref{SEC:XGWPonR}, for $ -\frac{1}{2} \leq s \leq 0$, define the first modified energy $E^{(1)}(t) = \|(Iu, Iv)(t)\|_{L^2\times L^2}^2$, where $I$ is the Fourier multiplier operator with the symbol $m$ defined by \eqref{XXM}. Using \eqref{XXDIFF}, we have \begin{equation} \label{XX1stMOD} \frac{d}{dt} E^{(1)}(t) = \Lambda_3(M_3;u, v, v) - ip \Lambda_2(\xi_{12}; v, v ) - iq \Lambda_2(\xi_{12}m^2(\xi_1)+ m^2(\xi_2);u, v) \end{equation} where $M_3 = 3i [\xi_1 m^2(\xi_1)]_{\rm sym}$. Now, we define the second modified energy $E^{(2)}(t)$ by \begin{equation} \label{XXX2ndMOD} E^{(2)}(t) = E^{(1)}(t) + \Lambda_3(\sigma_3; u, v, v ), \end{equation} where $\sigma_3 = \frac{iM_3}{Z_3}$ to cancel the trilinear term $\Lambda_3(M_3;u, v, v)$ in \eqref{XX1stMOD}. By \eqref{XXDIFF}, we have \begin{align*} \frac{d}{dt} \Lambda_3(\sigma_3; u, v, v ) =& \Lambda_3( i \sigma_3 Z_3 ; u, v, v) -\frac{i}{2} \Lambda_4( \xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3); v, v, v, v)\\ &-2i \Lambda_4( \xi_{23} \sigma_3(\xi_{1}, \xi_{23}, \xi_4); u, u, v, v) - 2i q \Lambda_3(\xi_ {1} \sigma_3 ; u, u, v)\\ &- 2i p \Lambda_3(\xi_{3} \sigma_3; u, v, v) -iq\Lambda_3(\xi_3 \sigma_3 ; v, v, v). \end{align*} By symmetry in $\xi_1, \xi_2, \xi_3$, we have $ -iq\Lambda_3(\xi_3 \sigma_3 ; v, v, v) = -\tfrac{iq}{3}\Lambda_3((\xi_1+\xi_2+\xi_3 )\sigma_3 ; v, v, v) = 0$. Thus, we have \[ \frac{d}{dt} E^{(2)}(t) = \Lambda_4({M_4}; v, v, v, v) + \Lambda_4(\widetilde{M_4}; u, u, v, v) + \Lambda_3(M_3'; u, u, v) + \Lambda_3(M_3''; u, v, v), \] where $ M_4 = -\tfrac{i}{2}[\xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3)]_{\rm sym} $, $ \widetilde{M_4} = -2i [\xi_{23} \sigma_3(\xi_{1}, \xi_{23}, \xi_4) ]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}}$, and \begin{equation} \label{XXM3} \begin{gathered} M_3' = [- 2i q \xi_ {1} \sigma_3(\xi_1, \xi_2, \xi_3)]_{1 \leftrightarrow 2} = -iq \xi_{12} \sigma_3(\xi_1, \xi_2, \xi_3) \\ M_3'' = [- 2i p \xi_ {1} \sigma_3(\xi_1, \xi_2, \xi_3)]_{2 \leftrightarrow 3} = -ip \xi_{23} \sigma_3(\xi_1, \xi_2, \xi_3) . \end{gathered} \end{equation} Note that $M_4$ and $\widetilde{M_4}$ are exactly the same as Section \ref{SEC:XGWPonR}. Now, define the third modified energy $E^{(3)}(t)$ by \begin{equation} \label{XXX3rdMOD} E^{(3)}(t) = E^{(2)}(t) + \Lambda_4(\sigma_4; v, v, v, v )+\Lambda_4(\widetilde{\sigma_4}; u,u, v, v ). \end{equation} As before, by choosing $\sigma_4 = \frac{iM_4}{Z_4}$ and $\widetilde{\sigma_4} = \frac{i\widetilde{M_4}}{Z_4}$, we can cancel the quadrilinear terms in $\frac{d}{dt}E^{(2)}(t)$. However, there are several lower order multilinear terms after differentiation. Indeed, using \eqref{XXDIFF}, full symmetry of $\sigma_4$, and symmetry of $\widetilde{\sigma_4}$ in $1 \leftrightarrow 2$ and $3 \leftrightarrow 4$, we have \begin{equation} \label{XX3rdMOD} \begin{aligned} \frac{d}{dt} E^{(3)}(t) = & \Lambda_5({M_5};u, v, v, v, v)+ \Lambda_5(\widetilde{M_5};u, u, u, v, v) + \Lambda_5(\widetilde{M_5}' ;u, v, v, v, v) \\ & +\Lambda_4({M_4'}; u, v, v, v)+ \Lambda_4(\widetilde{M_4}'; u, v, v, v)+ \Lambda_4(\widetilde{M_4}''; u, u, v, v) \\ &+ \Lambda_4(\widetilde{M_4}'''; u, u, u, v) + \Lambda_3(M_3'; u, u, v) + \Lambda_3(M_3''; u, v, v), \end{aligned} \end{equation} where $M_5$, $\widetilde{M_5}$, and $\widetilde{M_5}'$ are as in \eqref{XM5}, $M_3'$ and $M_3''$ are as in \eqref{XXM3}, and \begin{equation} \label{XXM4} \begin{gathered} M_4' = -4iq \xi_1 \sigma_4(\xi_1, \xi_2, \xi_3, \xi_4), \quad \widetilde{M_4}' = -2iq \xi_2 \widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4), \\ \widetilde{M_4}'' = -ip\xi_{34} \widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4) , \quad \widetilde{M_4}''' = -2iq\xi_3 \widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4). \end{gathered} \end{equation} Hence, we need to estimate all the trilinear, quadrilinear, and quintilinear terms in order to establish a good control on the growth of $E^{(3)}$. \subsection{Trilinear and Quadrilinear Estimates} \label{SEC:XXTRILINIEAR} In this subsection we establish estimates on the trilinear and quadrilinear terms in \eqref{XX3rdMOD}. First, we state the dual form of the bilinear estimate in \cite[Proposition 7.9]{OH1}. Let $\vec{u}$, $\vec{v}$, $\vec{w}$ be functions on $\mathbb{T}_\lambda \times \mathbb{R}$ with the spatial mean 0 for all $t\in \mathbb{R}$. Let $\vec{U}$, $\vec{V}$, $\vec{W}$ be the diagonal terms of $\vec{u}$, $\vec{v}$, $\vec{w}$; i.e., $ \left(\begin{smallmatrix}\widehat{U_1}(\xi, \tau) \\ \widehat{U_2}(\xi, \tau) \end{smallmatrix}\right) = M^{-1}(\xi) \left(\begin{smallmatrix}\widehat{u_1}(\xi, \tau)\\ \widehat{u_2}(\xi, \tau) \end{smallmatrix}\right)$, etc. \begin{lemma} \label{COR:XXBILINEAR} Let $\vec{u}, \vec{v}, \vec{w}$ be as described above. Furthermore, assume the spatial Fourier transforms of $U_j, V_k, W_l$ are supported in dyadic blocks; i.e., $\mathop{\rm supp}_\xi \widehat{U}(\xi, t) \subset \{ |\xi|\sim 2^J\}$ for some $J$ for all $t$, etc. Then, for $s\geq - \frac{1}{2}$, we have, for $j, k, l \in \{1, 2\}$, \begin{equation} \label{XXBILINEAR} \begin{aligned} \Big|\int_0^1 \Lambda_3(1; U_j, V_k, W_l) dt \Big| & \lesssim \lambda^{0+} \|U_j\|_{X_{j }^{s, \frac{1}{2}}[0, 1]}\|V_k\|_{X_k^{s, \frac{1}{2}}[0, 1]}\|W_l\|_{X_l^{s, \frac{1}{2}}[0, 1]} \\ & \lesssim \lambda^{0+} \| \vec{u} \|_{X_{[0, 1]}^{s, \frac{1}{2}}}\| \vec{v} \|_{X_{[0, 1]}^{s, \frac{1}{2}}} \| \vec{w} \|_{X_{[0, 1]}^{s, \frac{1}{2}}}. \end{aligned} \end{equation} \end{lemma} Using this lemma, we have the following growth estimate on the trilinear terms. \begin{lemma} \label{LEM:XXTRILINEAR} Let ${u}$, ${v}$ be functions on $\mathbb{T}_\lambda \times \mathbb{R}$ with the spatial mean 0 for all $t \in \mathbb{R}$. Then, for $s \geq -1/2$, we have \begin{equation} \label{XXTRILINEAR} \Big| \int_0^1 \Lambda_3(M_3'; u, u, v) + \Lambda_3(M_3''; u, v, v) dt \Big| \lesssim \lambda^{-2+}N^{2s-1+} \|(Iu, Iv)\|^3_{X_{[0, 1]}^{0, \frac{1}{2}} }. \end{equation} \end{lemma} \begin{proof} We prove the estimate only for $M_3'$ as the proof for $M_3''$ is basically the same. First note that \eqref{XXTRILINEAR} is equivalent to \begin{equation} \label{XXM3DUAL} \Big| \int_0^1 \Lambda_3(\widetilde{M_3} ; u, u, v ) dt \Big| \lesssim \lambda^{-2+}N^{2s-1+} \|(u, v)\|^3_{X_{[0, 1]}^{s, \frac{1}{2}}} , \end{equation} where $ \widetilde{M_3} = \frac{M_3'\prod_{j = 1}^3 \langle\xi_j\rangle^{s}}{\prod_{j = 1}^3 m(\xi_j)}$. Let $T$ be the trilinear Fourier multiplier operator defined by $M_3'$ and let $\vec{U} = (U_1, U_2)$ be the diagonal term of $\vec{w} = (u, v)$ given by $\left(\begin{smallmatrix}\widehat{U_1}(\xi, \tau) \\ \widehat{U_2}(\xi, \tau)\end{smallmatrix}\right) = M^{-1}(\xi) \left(\begin{smallmatrix}\widehat{u}(\xi, \tau) \\ \widehat{v}(\xi, \tau) \end{smallmatrix}\right)$. In view of Remark \ref{XREMARK2}, assume $\widehat{U_1}$ and $\widehat{U_2}$ are nonnegative. Then, upon defining the bilinear operator $B$ by $B(\vec{f}, \vec{g}) = \left(\begin{smallmatrix}0 \\ f_1 g_1\end{smallmatrix}\right)$, we have \[ \text{LHS of } \eqref{XXM3DUAL} = \Big| \int_0^1 \int_{\mathbb{T}_\lambda} T \Big[ \big( B( \vec{w}, \vec{w}) \big) (x, t), \vec{w}(x, t) \Big]_{\mathbb{R}^2} dx dt \Big| . \] After dyadic decomposition $|\xi_j| \sim N_j$, $j = 1, 2, 3$, we can replace the sharp cutoff $\chi_{[0, 1]}(t)$ by a smoothed one (c.f. \cite{CKSTT2}.) Then, by proceeding as in the proof of \cite[Proposition 7.9]{OH1}, the left-hand side of \eqref{XXM3DUAL} is \[ \sim \Big| \iint_{\substack{ \tau_1 + \tau_2 + \tau_3 = 0 \\ \xi_1 + \xi_2 + \xi_3 = 0 }} \sum_{ j, k, l = 1}^2 \widetilde{M_3} C_{ j, k, l } \widehat{U_j }(\xi_1, \tau_1) \widehat{U_k}(\xi_2, \tau_2) \widehat{U_l}(\xi_3, \tau_3) d\xi_2^\lambda d\xi_3^\lambda d\tau_2 d\tau_3 \Big|, \] where $C_{j, k, l}$ is defined in terms of the entries of the orthogonal matrix $M(\xi)$, and thus we have $|C_{ j, k, l }(\xi_1, \xi_2, \xi_3)| \lesssim 1$ for all $\xi_1, \xi_2, \xi_3 \in \mathbb{Z}/\lambda$ and $j, k, l = 1, 2$. See the proof of \cite[Proposition 7.9]{OH1}. Then, by Lemma \ref{COR:XXBILINEAR}, on each dyadic domain $A = \{|\xi_j|\sim N_j, \ j = 1, 2, 3\}$, we have $\text{LHS of } \eqref{XXM3DUAL} \lesssim \lambda^{0+} \sup_{A} |\widetilde{M_3}|\| \vec{w} \|^3_{X_{[0, 1]}^{s, \frac{1}{2}}}$. Hence, it only remains to estimate $\sup_{A} |\widetilde{M_3}|$ on each dyadic domain and sum up in the dyadic variables $N_1$, $N_2$, and $N_3$. Recall that $M_3' =-iq \xi_{12} \sigma_3(\xi_1, \xi_2, \xi_3) $. Also, on $\mathbb{T}_\lambda$, we have $q = \frac{q_1}{\lambda^2}$. Since $\sigma_3 = \frac{-3[\xi_1m^2(\xi_1)]_{\rm sym}}{3\xi_1\xi_2\xi_3}$, we have $M_3' = \frac{iq_1}{\lambda^2} \frac{\sum_{j = 1}^3\xi_jm^2(\xi_j)}{3\xi_1\xi_2}. $ In the following, we assume $\max N_j \gtrsim N$ since $\widetilde{M_3} = M_3' = 0$ when $N_1, N_2, N_3 \ll N$. Now, let $L_j $ denote the $j$th largest dyadic block among $N_1$, $N_2$, and $N_3$. Then, we have $L_2 \sim L_1 \gtrsim N$ since $\xi_{123} = 0$. From Lemma 4.3 in \cite{CKSTT4}, we have $\big|\sum_{j = 1}^3\xi_jm^2(\xi_j)\big| \lesssim L_3 m^2(L_3) $. Thus, we have $ |M_3'| \lesssim \frac{L_3 m^2(L_3)}{\lambda^2N_1N_2}$. By symmetry, assume $N_1 \leq N_2$. If $L_3 = N_1$, then $N_2 \sim N_3 \gtrsim N$ and thus $m^2(N_j) \sim \frac{N^{-s}}{\langle N_j\rangle^{-s}}$ for $j = 2, 3 $. Also, by definition, $m(N_1) \leq 1$. Thus, we have \[ |\widetilde{M_3}| \lesssim \lambda^{-2}\frac{\langle N_1\rangle^{s}N_2^{2s} }{ N_2m^2(N_2)} \sim \lambda^{-2}\frac{N^{2s}}{\langle N_1\rangle^{-s}N_2 } \lesssim \lambda^{-2}\prod_{j = 1}^3 N_j^{0-} N^{2s-1+}. \] If $L_3 = N_3$, then $N_1 \sim N_2 \gtrsim N$. Then, we have \[ |\widetilde{M_3}| \lesssim \lambda^{-2}\frac{N_1^{2s} \langle N_3\rangle ^{s} N_3}{ N_1^2m^2(N_1)} \sim \lambda^{-2}\frac{N^{2s}}{N_1^{1-s}}\frac{\langle N_3\rangle ^{1+s}}{N_1^{1+s} } \lesssim \lambda^{-2}\prod_{j = 1}^3 N_j^{0-} N^{3s-1+}. \] Now, we can finish the proof by summing up over $N_1$, $N_2$, and $N_3$ to obtain \eqref{XXM3DUAL}. \end{proof} Before proving the estimates on the quadrilinear terms, we state a useful bilinear estimate. \begin{lemma} \label{LEM:XXXBILINEAR} Let $\vec{u} $ and $\vec{v}$ be as described at the beginning of this subsection. Then, for $s \geq \frac{1}{2}$, we have, for all $j, k, l \in \{1, 2\}$, \begin{equation} \label{XXXBILINEAR} \| U_{j} V_{k}\|_{X_l^{s-1, \frac{1}{2}}} \lesssim \|U_j\|_{Y_j^s} \|V_k\|_{Y_k^s} \lesssim \| \vec{u} \|_{Y^s} \| \vec{v} \|_{Y^s}. \end{equation} \end{lemma} This lemma follows easily from Sobolev embedding or Lemma \ref{COR:XXBILINEAR} by considering different cases, and thus we omit its proof. See \cite{OHTHESIS} for details. Also, see Lemmata \ref{LEM:XXXTRILINEAR} and \ref{LEM:XXXTRIX} for the trilinear version of this estimate, for which we present the full proof. Let $|\xi_j |\sim N_j$, $|\xi_{kl} |\sim N_{kl}$, dyadic. From Lemma \ref{LEM:XM4BOUND}, we have $|\sigma_4(\xi_1, \xi_2, \xi_3, \xi_4 )|,$ $ |\widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4 )| \lesssim \frac{m^2(\min( N_j, N_{kl}))}{\prod_{j = 1}^4 (N+N_j)}$. Now, we apply symmetrization to $M_4'$, $\widetilde{M_4}'$ and $\widetilde{M_4}'''$ in \eqref{XXM4}. In view of $\Lambda_4({M_4'}; u, v, v, v)$, we can redefine $M_4'$ under the symmetrization in $\xi_2,\xi_3, \xi_4$ i.e. among the variables of $\widehat{v}$. Then, we have \[ M_4' = [-4iq \xi_1 \sigma_4(\xi_1, \xi_2, \xi_3, \xi_4) ]_{2 \leftrightarrow 3 \leftrightarrow 4} = 6iq \xi_{34} \sigma_4(\xi_1, \xi_2, \xi_3, \xi_4). \] Similarly, we have $\widetilde{M_4}' = -iq \xi_{34} \widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4)$ and $ \widetilde{M_4}''' = -iq \xi_{12} \widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4)$. Note that the symmetrization was used here to have a convenient form in terms of estimates, not to have the totally symmetrized form. Using $q = \frac{q_1}{\lambda^2}$, we have \begin{equation}\label{XXM4POINTWISE} |M_4'(\xi_1, \xi_2, \xi_3, \xi_4) | \lesssim \frac{N_{34}m^2(\min( N_j, N_{kl}))}{\lambda^2 \prod_{j = 1}^4 (N+N_j)}. \end{equation} Similar pointwise estimates hold for $\widetilde{M_4}', \widetilde{M_4}''$ and $\widetilde{M_4}'''$. Then, using Lemmata \ref{COR:XXBILINEAR} and \ref{LEM:XXXBILINEAR}, we have the following growth estimate on the quadrilinear terms. \begin{lemma} \label{LEM:XXQUADRILINEAR} Let ${u}$, ${v}$ be functions on $\mathbb{T}_\lambda \times \mathbb{R}$ with the spatial mean 0 for all $t \in \mathbb{R}$. Then, for $s \in [-\frac{1}{2}, 0)$, we have \begin{equation} \label{XXQUADRILINEAR} \Big| \int_0^1 \Lambda_4(M_4'; u, v, v, v) dt \Big| \lesssim \lambda^{-2+}N^{2s-1+} \|(Iu, Iv)\|^4_{Y_{[0, 1]}^{0} }. \end{equation} The same estimate holds for the expressions $\Lambda_4(\widetilde{M_4}'; u, v, v, v)$, $ \Lambda_4(\widetilde{M_4}''; u, u, v, v)$, and $\Lambda_4(\widetilde{M_4}'; u, u, u, v),$ appearing in \eqref{XX3rdMOD}. \end{lemma} \begin{proof} We prove the estimate only for $M_4'$ as the proof for the other quadrilinear terms is basically the same. Since all the function spaces are on the time interval $[0, 1]$, we will drop $[0, 1]$ in $X^{s, b}_{[0, 1]}$ and $Y^s_{[0, 1]}$. First, note that \eqref{XXQUADRILINEAR} is equivalent to \begin{equation} \label{XXM4DUAL} \Big| \int_0^1 \Lambda_4( M ; u, v, v, v ) dt \Big| \lesssim \lambda^{-2+}N^{2s-1+} \|(u, v)\|^4_{Y^{s}} , \end{equation} where $ M = \frac{M_4'\prod_{j = 1}^4 \langle\xi_j\rangle^{s}}{\prod_{j = 1}^4 m(\xi_j)}$. Let $\vec{U} = (U_1, U_2)$ be the diagonal term of $\vec{w} = (u, v)$ given by $ \left(\begin{smallmatrix}\widehat{U_1}(\xi, \tau) \\ \widehat{U_2}(\xi, \tau)\end{smallmatrix}\right) = M^{-1}(\xi) \left(\begin{smallmatrix}\widehat{u}(\xi, \tau) \\ \widehat{v}(\xi, \tau)\end{smallmatrix}\right)$. After dyadic decomposition $|\xi_j| \sim N_j$, $j = 1,\dots, 4$, assume $\widehat{U_j}$ is nonnegative and also drop the cutoff $\chi_{[0, 1]}(t)$ as in the proof of Lemma \ref{LEM:XXTRILINEAR}. Then, it suffices to show \begin{equation} \label{XXM4DUAL1} \Big| \iint_{\substack{ \tau_1 + \tau_2 + \tau_3 + \tau_4 = 0 \\ \xi_1 + \xi_2 + \xi_3 + \xi_4= 0 }} |M(\xi_1, \xi_2, \xi_3, \xi_4)| \prod_{k = 1}^4\widehat{U_{j_k}}(\xi_k, \tau_k) \Big| \lesssim\lambda^{-2+}N^{2s-1+} \|\vec{w}\|^4_{Y^{s}} \end{equation} for all $j_1, j_2, j_3, j_4 \in \{1, 2\}$. By Lemmata \ref{COR:XXBILINEAR} and \ref{LEM:XXXBILINEAR}, on each dyadic domain $A = \{|\xi_j|\sim N_j, \ j = 1, \dots, 4\}$, we have \begin{align*} \text{LHS of } \eqref{XXM4DUAL1} & \lesssim \lambda^{0+} \sup_{A} |M| \| U_{j_1} \|_{X_{j_1}^{s, \frac{1}{2}}}\| U_{j_2} \|_{X_{j_2}^{s, \frac{1}{2}}}\| U_{j_3} U_{j_4} \|_{X_{1}^{s, \frac{1}{2}}} \\ & \lesssim \lambda^{0+} \sup_{A} |M| \| U_{j_1} \|_{X_{j_1}^{s, \frac{1}{2}}}\| U_{j_2} \|_{X_{j_2}^{s, \frac{1}{2}}} \| U_{j_3} \|_{Y_{j_3}^{s+1}}\| U_{j_4} \|_{Y_{j_4}^{s+1}}\\ & \lesssim \lambda^{0+} \sup_{A} |M| N_3 N_4 \| \vec{w} \|^4_{Y^{s}}. \end{align*} Note that Lemma \ref{COR:XXBILINEAR} was applicable thanks to the pointwise estimate \eqref{XXM4POINTWISE} of $M_4'$; i.e., we could assume the product $U_{j_3}U_{j_4}$ has the spatial mean 0 since $M_4' = 0$ (and hence $M = 0$) when $\xi_{34} = 0$. Hence, it remains to show \begin{equation} \label{XXM4DUAL2} |M| N_3N_4 = \frac{|M_4'|N_3N_4 \prod_{j = 1}^4 \langle N_j\rangle^{s}}{\prod_{j = 1}^4 m(N_j)} \lesssim \lambda^{-2}N^{2s-1+} \prod_{j = 1}^4 N_j^{0-}. \end{equation} From the proof of Lemma \ref{LEM:XM4BOUND}, we have $M_4' = 0$ if $N_j \ll N$ for all $j$. Thus, we assume the largest and the second largest of $N_1, N_2, N_3, N_4$ are $\gtrsim N$ in view of $\xi_{1234} = 0$. From \eqref{XXM4POINTWISE}, % the left-hand side of \eqref{XXM4DUAL2} is \begin{equation} \label{XXM4DUAL3} |M| N_3N_4 \lesssim \lambda^{-2} \frac{N_{34}N_3N_4 m^2(\min(N_j, N_{kl})\prod_{j = 1}^4 \langle N_j\rangle^{s}} {\prod_{j = 1}^4 [m(N_j)(N+N_j)]}. \end{equation} From the symmetry in \eqref{XXM4DUAL3}, assume $N_1 \geq N_2$ and $N_3 \geq N_4$. Note that $N_{34} = N_{12}$ and $m(N_j) \langle N_j\rangle^{-s} (N+N_j) \sim N_j N^{-s} \gtrsim N^{1-s} $ if $ N_j \gtrsim N $ and $ \sim N N_j^{-s} \ll N^{1-s} $ if $ N_j \ll N$. (Recall $s <0$.) Therefore, the worst case occurs when $N_3, N_4 \gtrsim N \gg N_1, N_2$. Then, we have \[ \text{RHS of } \eqref{XXM4DUAL3} \lesssim N^{2s-2} \langle N_1\rangle^s\langle N_2\rangle ^s N_{12} m^2(N_3) \leq N^{2s-1+}\prod_{j = 1}^4 N_j^{0-}. \] Now, we can finish the proof by summing up over $N_j$, $j = 1, \dots, 4$. \end{proof} \subsection{Quintilinear Estimate} In this subsection, we present an estimate for the quintilinear terms in \eqref{XX3rdMOD}. \begin{lemma} \label{LEM:XXQUINTILINEAR} Let ${u}$, ${v}$ be functions on $\mathbb{T}_\lambda \times \mathbb{R}$ with the spatial mean 0 for all $t \in \mathbb{R}$. Then, we have, for $s \geq -\frac{1}{2}$, \begin{equation} \label{XXQUINTILINEAR} \Big| \int_0^1 \Lambda_5(M_5; u, v, v, v, v) dt \Big| \lesssim \lambda^{0+}N^{4s+} \|(Iu, Iv)\|^5_{Y_{[0, 1]}^{0} }. \end{equation} The same estimate holds for $\Lambda_5(\widetilde{M_5}; u, u, u, v, v)$ and $ \Lambda_5(\widetilde{M_5}'; u, v, v, v, v)$, appearing in \eqref{XX3rdMOD}. \end{lemma} Note that the quintilinear multipliers $M_5$, $\widetilde{M_5}$, and $\widetilde{M_5}'$ are as in the non-periodic case. i.e. they satisfy the same pointwise estimates presented in Corollary \ref{COR:XM5BOUND}. We omit the proof of Lemma \ref{LEM:XXQUINTILINEAR}, since the lemma follows once we prove the following estimate as in the KdV case \cite{CKSTT4}: \begin{equation} \label{QQUINTILINEAR} \Big| \int_0^1 \int_{\mathbb{T}_\lambda} \mathbb{P}(f_1f_2f_3) f_4 f_5 dx dt \Big| \lesssim \lambda^{0+} \prod_{k = 1}^3 \| f_k \|_{Y^s_{j_k}} \|f_4\|_{X^{s,\frac{1}{2}}_{j_4}} \|f_5\|_{X^{s,\frac{1}{2}}_{j_5}} \end{equation} for all $j_k \in \{1, 2\}$, where $f_k$, $k = 1, \dots, 5$, are functions on $\mathbb{T}_\lambda \times \mathbb{R}$ with the spatial mean 0 and $\mathbb{P}$ is the orthogonal projection onto the nonzero (spatial) Fourier modes. As in \cite[Lemma 8.1]{CKSTT4}, \eqref{QQUINTILINEAR} follows once we prove the following trilinear estimate: \begin{equation} \label{YXXXTRILINEAR} \| f_1 f_2 f_3 \|_{X_{j_4}^{s-1, \frac{1}{2}}(\mathbb{T}_\lambda \times \mathbb{R})} \lesssim \lambda^{0+} \prod_{k = 1}^3 \|f_k\|_{Y_{j_k}^s(\mathbb{T}_\lambda \times \mathbb{R})} \end{equation} for $s \geq \frac{1}{2}$ and all $j_k \in \{1, 2\}$, $k = 1, \dots, 4$. When $s > 1/2$, a slight adjustment of the argument in \cite[Lemma 8.1]{CKSTT4} and Theorem 3 in \cite{CKSTT5} provides the proof of \eqref{YXXXTRILINEAR}. However, when $s = 1/2$, we need to refine the proof of \cite[Theorem 3]{CKSTT5} since $d_j(\xi) \ne \xi^3$ in our case. By repeating the computation in \cite{CKSTT5}, we see that the proof of \eqref{YXXXTRILINEAR} for $\lambda = 1$ and $s = 1/2$ is reduced to proving (with the notation introduced in \eqref{XTAO1}) \begin{equation} \label{XTAO2} \Big\| \frac{1}{\langle\xi_3\rangle ^{\frac{1}{2}}\prod_{n = 1}^3 \langle\tau_n - d_{j_n}(\xi_n)\rangle ^{1/2} } \Big\|_{[4; \mathbb{Z} \times \mathbb{R}]} \lesssim 1, \end{equation} and \begin{equation} \label{XTAO3} \Big\| \frac{1} {\langle\xi_4\rangle ^{1/2}\prod_{n = 1}^3 \langle\tau_n - d_{j_n}(\xi_n)\rangle ^{1/2} } \Big\|_{[4; \mathbb{Z} \times \mathbb{R}]} \lesssim 1. \end{equation} In the appendix we prove \eqref{XTAO2} and \eqref{XTAO3}. The argument is based on an extension of the proof of \cite[Theorem 3]{CKSTT5}. \begin{lemma} \label{LEM:XXXTRILINEAR} Let $\lambda = 1$. Then, \eqref{YXXXTRILINEAR} holds for $s > 1/2$. Moreover, if $L_1 = 1/2 \sqrt{p^2 + 4q^2} \in \mathbb{Q}$, then \eqref{YXXXTRILINEAR} also holds for $s = 1/2$. Also, if the spatial Fourier supports of $f_k$ are dyadic, then \eqref{YXXXTRILINEAR} holds for $s = 1/2$ without any condition on $L_1$. \end{lemma} The general result for $ \lambda \geq 1$ can be obtained in the same manner as in \cite{CKSTT5}. Before stating the lemma, recall how the scaling works. Recall that $p$ and $q$ are the means of the $u$ and $v$ of the unmodified system \eqref{MB} on $\mathbb{T}$. When we apply the scaling, the means $p^\lambda$ and $q^\lambda$ of $u^\lambda$ and $v^\lambda$ on $\mathbb{T}_\lambda$ are given by $p^\lambda = \frac{p}{\lambda^2}$ and $q^\lambda = \frac{q}{\lambda^2}$. Then, we have $L_\lambda = \tfrac{1}{2}\sqrt{(p^\lambda)^2 + 4 (q^\lambda)^2} = \tfrac{1}{2\lambda^2} \sqrt{p^2 + 4q^2} = \tfrac{L_1}{\lambda^2}$. \begin{lemma} \label{LEM:XXXTRIX} Let $\lambda \geq 1$. Then, \eqref{YXXXTRILINEAR} holds for $s > 1/2$. Moreover, if $L_\lambda = \frac{L_1}{\lambda^2} \in \mathbb{Q}$, then \eqref{YXXXTRILINEAR} also holds for $s = 1/2$. Also, if the spatial Fourier supports of $f_k$ are dyadic, then \eqref{YXXXTRILINEAR} holds for $s = 1/2$ without any condition on $L_\lambda$. \end{lemma} \begin{remark} \label{rmk4.11} \rm The extra condition $L_\lambda = \frac{L_1}{\lambda^2} \in \mathbb{Q}$ is not really a restriction since we can always choose the scaling parameter $\lambda$ such that $L_\lambda \in \mathbb{Q}$. Moreover, in proving Lemma \ref{LEM:XXQUINTILINEAR}, \eqref{YXXXTRILINEAR} is used after the dyadic decomposition. Hence, the issue of $L_\lambda$ being rational/irrational becomes irrelevant for our purpose. \end{remark} \subsection{Almost Conservation Law} Now, we are ready to establish the global well-posedness for the mean 0 system \eqref{mean0MB} in $H^{-1/2}(\mathbb{T}) \times H^{-1/2}(\mathbb{T})$, which, in turn, implies the global well-posedness of the original Majda-Biello system \eqref{MB} in $H^{-1/2}(\mathbb{T}) \times H^{-1/2}(\mathbb{T})$. Applying the $I$ operator to \eqref{mean0MB}, we obtain the $I$-system. \begin{equation} \label{XXISYSTEM} \begin{gathered} \partial_t I u + \partial_x^3 Iu + q \partial_x I v + \frac{1}{2} \partial_x I( v^2) = 0 \\ \partial_t I v + \partial_x^3 Iv + q \partial_x I u +p \partial_x I v + \partial_x I(uv) = 0 \\ \big(Iu, Iv\big) (x, 0) = (Iu_0, Iv_0) \in L^2 \times L^2. \end{gathered} \end{equation} Then, \eqref{MB} is well-posed on $[0, T]$ in $ H^s \times H^s$ if and only if \eqref{XXISYSTEM} is well-posed on $[0, T]$ in $ L^2 \times L^2$. The local well-posedness of the $I$-system \eqref{XXISYSTEM} in $L^2 \times L^2$ for small initial data follows from the vector-valued bilinear estimate for \eqref{mean0MB} \cite[Proposition 7.9]{OH1} and the interpolation lemma \cite[Lemma 12.1]{CKSTT5}. Fix $T > 0$ and a mean 0 initial condition $(u_0, v_0)$ for \eqref{mean0MB}. By the scaling $\big(u^\lambda , v^\lambda \big)(x, t) = \frac{1}{\lambda^2} \big(u, v\big)(\frac{x}{\lambda}, \frac{t}{\lambda^3})$ and by choosing $\lambda \sim N^{-\frac{2s}{3+2s}}$, we have \[ \|(Iu^\lambda_0, Iv^\lambda_0)\|_{L^2\times L^2} \ll1. \] In the following, we work only on the $\lambda$-scaled $I$-system, and thus we drop the superscript $\lambda$. Our goal is to show that the $\lambda$-scaled $I$-system is well-posed on the time interval $[0, \lambda^3T]$. For simplicity, assume $s = -\frac{1}{2}$ in the following. Note that $\lambda \sim N^{1/2}$ when $s = -1/2$. Let $\vec{w} = (u, v)$ and $\vec{w_0} = (u_0, v_0)$. Recall that the first modified energy $E^{(1)}(t) = \|I\vec{w}(t)\|_{L^2}$, and that Lemmata \ref{LEM:XXTRILINEAR}, \ref{LEM:XXQUADRILINEAR}, and \ref{LEM:XXQUINTILINEAR} control the time growth of the third modified energy $E^{(3)}(t)$. Thus, we need to show that $E^{(3)}(t)$ and $E^{(1)}(t)$ are comparable. We state the following lemma whose proof is presented at the end of this section. \begin{lemma} \label{LEM:XXGAP} Let $s = -1/2$. Then, we have \begin{equation} \label{XXGAP} \big|E^{(3)}(t) - E^{(1)}(t)\big| \lesssim \|I \vec{w}(t) \|^3_{L^2} + \|I \vec{w}(t) \|^4_{L^2}. \end{equation} \end{lemma} First, from the local theory, if the $I$-system is locally well-posed on $[t, t+1]$, then we have \begin{equation} \label{XXLOCALGROWTH} \sup_{t'\in [t, t+1]} \|I\vec{w}(t') \|_{L^2} \leq \| I\vec{w}\|_{Y^{0}_{[t, t+1]}} \leq 2 \|I\vec{w}(t) \|_{L^2}. \end{equation} Now, choose $\varepsilon_0 = \|I\vec{w_0}\|_{L^2} $ sufficiently small such that: (a) $\| I\vec{w}(t) \|_{L^2} < 2\varepsilon_0$ guarantees the local well-posedness of the $\lambda$-scaled $I$-system on $[t, t+1]$. (b) $\| I\vec{w}(t) \|_{L^2} < 2\varepsilon_0$ together with \eqref{XXLOCALGROWTH} makes $\| I\vec{w}(t+1) \|_{L^2}$ small sufficient so that \[ C_1\big(\| I\vec{w}(t+1) \|^3_{L^2}+\| I\vec{w}(t+1) \|^4_{L^2}\big) \leq \tfrac{1}{2} \| I\vec{w}(t+1) \|^2_{L^2}, \] where $C_1$ is the constant from \eqref{XXGAP}. If (b) is satisfied, then from \eqref{XXGAP} we have $ \| I\vec{w}(t+1) \|^2_{L^2} \leq 2 |E^{(3)}(t+1)|$. Since $\| I\vec{w_0}\|_{L^2} = \varepsilon_0 < 2\varepsilon_0$, the solution exists on $[0, 1]$. Moreover, by Lemma \ref{LEM:XXGAP}, we have $|E^{(3)}(0)| \leq \varepsilon_0^2+2C_1 \varepsilon_0^3$. Then, from Lemmata \ref{LEM:XXTRILINEAR}, \ref{LEM:XXQUADRILINEAR}, and \ref{LEM:XXQUINTILINEAR}, we have $ |E^{(3)}(t)| \leq |E^{(3)}(0)| + C_2N^{-2+}\varepsilon_0^3$ for all $t \in [0, 1]$. Putting everything together, we have \[ \| I\vec{w}(1) \|^2_{L^2} \leq 2 |E^{(3)}(1)| \leq 2( \varepsilon_0^2+2C_1 \varepsilon_0^3 + C_2 N^{-2+} \varepsilon_0^3) < 4\varepsilon_0^2 \] as long as $2C_2 N^{-2+}\varepsilon_0 < 1$. Then, by condition (a), the solution is guaranteed to exist on $[1, 2]$. In general, after $K$ iterations, we have \[ \| I\vec{w}(K) \|^2_{L^2} \leq 2( \varepsilon_0^2+2C_1 \varepsilon_0^3 + 2K C_2 N^{-2+}\varepsilon_0^3) < 4\varepsilon_0^2 \] as long as $K C_2 N^{-2+}\varepsilon_0 < 1$, guaranteeing the existence on $[0, K+1]$. Hence, this procedure establishes the well-posedness on the time interval of size $\sim [ 0, N^{2-}] $. Recall that our goal is to show the well-posedness of the $\lambda$-scaled $I$-system on $[0, \lambda^3T]$. Therefore, by choosing $N = N(T)$ large such that $N^{2-} > \lambda^3 T \sim N^{-\frac{6s}{3+2s}} T \sim N^\frac{3}{2} T$, we establish the well-posedness of the $\lambda$-scaled $I$-system on $[0, \lambda^3T]$. \section{Global Well-Posedness on $\mathbb{T}$, $0< \alpha< 1$} \label{CHAP:YGWP} In \cite{OH1}, assuming the mean 0 condition on $u_0$, we established the local well-posedness of the Majda-Biello system \eqref{MB} with $\alpha \in (0, 4] \setminus \{1\}$ in $H^s(\mathbb{T}) \times H^s(\mathbb{T}) $ for $s \geq \min( 1, s_0+ )$, where \begin{equation} \label{YS0} s_0 = s_0(\alpha) = \frac12 + \frac{1}{2} \max( \nu_{c_1}, \nu_{d_1}, \nu_{d_2}). \end{equation} Since the Hamiltonian $H(u, v) $ controls the $H^1 $ norm of the solution $(u, v)$, \eqref{MB} is globally well-posed in $H^1(\mathbb{T}) \times H^1(\mathbb{T})$. For the values of $\alpha$ such that $s_0 \geq 1$, this result is the sharp. However, we have $s_0 = 1/2$ for almost every $\alpha \in(0, 4] \setminus \{1\}$. Thus, the global well-posedness in $H^1 \times H^1$ is far from being optimal for almost every $\alpha$. Now, fix $\alpha \in(0, 4] \setminus \{1\}$ such that $s_0 < 1$, i.e. \eqref{MB} is locally well-posed in $H^{s_0+} \times H^{s_0+}$. In this section, we establish global well-posedness results below the energy space $H^1 \times H^1$, using the $I$-method. \subsection{Modified Energies} For $s \in (s_0, 1)$, define $m: \mathbb{R} \longmapsto \mathbb{R} $ to be the even, smooth, monotone Fourier multiplier given by \[ m(\xi ) = \begin{cases} 1, & \text{for } |\xi | \leq N \\ \frac{N^{1-s}}{|\xi|^{1-s} }, & \text{for } |\xi| \geq 2N, \end{cases} \] for $N \gg 1$ (to be determined later), and the operator $I$ to be the corresponding Fourier multiplier operator defined by $\widehat{I f}(\xi) = m(\xi)f(\xi)$. The operator $I$ is smoothing of order $1-s:$ $H^s \to H^1$ and we have $ \|f \|_{X^{s', b'}} \lesssim \|I f \|_{X^{s' +1- s, b'}} \lesssim N^{1-s} \|f \|_{X^{s', b'}}$ for any $s', b' \in \mathbb{R}$. Now, define the first modified energy $E^{(1)}(t)$ by $E^{(1)}(t) = H(Iu, Iv)(t) = \int (Iu)_x^2 + \alpha (I v)_x^2 - I u (I v)^2$. By Plancherel and the fact that $m$, $u$, and $v$ are real-valued, we have \begin{align*} E^{(1)}(t) = & - \Lambda_2(\xi_1\xi_2m(\xi_1)m( \xi_2); u, u) -\alpha \Lambda_2(\xi_1\xi_2m(\xi_1)m( \xi_2); v, v)\\ &- \Lambda_3(m(\xi_1)m( \xi_2)m(\xi_3); u, v, v). \end{align*} Using \eqref{MB}, we have \begin{equation} \label{Y1stMOD} \frac{d}{dt} E^{(1)} (u, v) = \Lambda_3(M_3;u, v, v ) + \Lambda_4(M_4;u, u, v, v ) + \Lambda_4(\widetilde{M_4};v, v, v, v ), \end{equation} where \begin{equation} \label{YM3} M_3 = i (\xi_1^3 m^2(\xi_1) + \alpha \xi_2^3 m^2(\xi_2) + \alpha \xi_3^3 m^2(\xi_3)) - i (\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3)m(\xi_1)m(\xi_2)m(\xi_3), \end{equation} and \begin{equation} \label{YM4} M_4 = 2i[\xi_{23} m(\xi_1)m(\xi_{23})m(\xi_4) ]_{\substack{1 \leftrightarrow 2 \\ 3 \leftrightarrow 4}}, \quad \widetilde{M_4} = \tfrac{i}{2}[\xi_{14} m(\xi_{14})m(\xi_{2})m(\xi_3)]_{\rm sym}. \end{equation} We like to point out the presence of \emph{the resonance equation}: \begin{equation} \label{YRESEQ} \xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3 = 0 \quad\text{with } \xi_1 + \xi_2 + \xi_3 = 0 \end{equation} in the second term of $M_3$. (Since $\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3$ does not appear in the denominator, it does not really cause a resonance in this case. Nonetheless, we refer to $\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3 \sim 0$ as the resonant case.) Note that $M_3$ is not symmetric in $\xi_1, \xi_2$, and $\xi_3$, since $\alpha \ne 1$. To prove Theorem \ref{mainTHM:YGWP1}, we need to estimate the time growth of $E^{(1)}(t)$ using multilinear analysis. However, this asymmetry prohibits certain cancellations which would be present if $\alpha = 1$. It turns out that the argument for the growth bound on $M_3$ requires separate treatments for the resonant and non-resonant cases as in the local theory \cite{OH1}, and that the trilinear term with $M_3$ has the worst decay in \eqref{Y1stMOD}. See \cite{OHTHESIS} for the proof of Theorem \ref{mainTHM:YGWP1}. Now, we define the second modified energy $E^{(2)}(t)$ by \begin{equation} \label{Z2ndMOD} E^{(2)}(t) = E^{(1)}(t) + \Lambda_3(\sigma_3; u, v, v ), \end{equation} where the 3-multiplier $\sigma_3$ is to be chosen later. Using \eqref{MB}, we have \begin{align*} \frac{d}{dt} \Lambda_3(\sigma_3; u, v, v ) &= i \Lambda_3\big((\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3)\sigma_3(\xi_1, \xi_2, \xi_3); u, v, v \big) \\ &\quad -2i \Lambda_4(\xi_{23} \sigma_3 (\xi_, \xi_{23}, \xi_4); u, u, v, v)\\ &\quad -\tfrac{i}{2}\Lambda_4( \xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3) ; v, v, v, v). \end{align*} By choosing \begin{equation} \label{YSIGMA3} \sigma_3 = \frac{iM_3}{\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3}, \end{equation} we cancel the trilinear term in \eqref{Y1stMOD}. Then, we have \begin{equation} \label{Y2ndMODDD} \begin{aligned} \frac{d}{dt} E^{(2)} (u, v) = & \ \Lambda_4(M_4;u, u, v, v ) -2i \Lambda_4(\xi_{23} \sigma_3 (\xi_1, \xi_{23}, \xi_4); u, u, v, v) \\ &+ \Lambda_4(\widetilde{M_4};v, v, v, v ) -\tfrac{i}{2}\Lambda_4( \xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3) ; v, v, v, v). \end{aligned} \end{equation} \begin{remark} \label{REM:ZE2} \rm $E^{(1)}$ is real-valued, since $m(\cdot)$ is real-valued and even. By definition, $\sigma_3$ is real-valued. Moreover, $\sigma_3(-\xi_1,-\xi_2,-\xi_3) = \sigma_3(\xi_1,\xi_2,\xi_3)$. Hence, for real-valued $u$ and $v$, $E^{(2)}(t)$ defined in \eqref{Z2ndMOD} is real-valued for all $t$. \end{remark} From \eqref{YM3} and \eqref{YSIGMA3}, we have \begin{align*} - 2i \xi_{23} \sigma_3 (\xi_1, \xi_{23}, \xi_4) = & 2 i \xi_{23} \frac{\xi_1^3 m^2(\xi_1) + \alpha \xi_{23}^3 m^2(\xi_{23}) + \alpha \xi_4^3 m^2(\xi_4)}{\xi_1^3 + \alpha \xi_{23}^3 + \alpha \xi_4^3} \\ & -2i\xi_{23} m(\xi_1)m(\xi_{23})m(\xi_4). \end{align*} Note that the second term above exactly cancels the term with $M_4$ in \eqref{Y2ndMODDD}. A similar cancellation occurs between $\widetilde{M_4}$ and $-\tfrac{i}{2} \xi_{14} \sigma_3(\xi_{14}, \xi_2, \xi_3)$ in \eqref{Y2ndMODDD}. Hence, we have \begin{equation}\label{Y2ndMOD} \frac{d}{dt} E^{(2)} (u, v) = \Lambda_4(M'_4;u, u, v, v ) + \Lambda_4(\widetilde{M_4}';v, v, v, v ), \end{equation} where \begin{gather} \label{YM'_4} M'_4 = \Big[2 i \xi_{23} \frac{\xi_1^3 m^2(\xi_1) + \alpha \xi_{23}^3 m^2(\xi_{23}) + \alpha \xi_4^3 m^2(\xi_4)} {\xi_1^3 + \alpha \xi_{23}^3 + \alpha \xi_4^3} \Big]_{\substack{1\leftrightarrow2 \\3\leftrightarrow4}} \\ \label{YWTM'_4} \widetilde{M_4}' = \Big[\frac{ i}{2} \xi_{14} \frac{\xi_{14}^3 m^2(\xi_{14}) + \alpha \xi_{2}^3 m^2(\xi_{2}) + \alpha \xi_3^3 m^2(\xi_3)}{\xi_{14}^3 + \alpha \xi_{2}^3 + \alpha \xi_3^3}\Big]_{\rm sym}. \end{gather} Remark \ref{REM:ZE2} tells us that we need to consider only the real parts of the quadrilinear expressions in \eqref{Y2ndMOD}, i.e. $\text{Re}\Lambda_4(M'_4;u, u, v, v )$ and $\text{Re}\Lambda_4(\widetilde{M_4}';v, v, v, v )$. Also, note that the resonance equation \eqref{YRESEQ} appears in the denominators of $M'_4$ and $\widetilde{M_4}'$. Since we assume $s_0 <1$, we know that the denominator can not be exactly 0. However, it can be very small, causing the small denominator problem. Thus, we need to proceed using the Diophantine conditions as in the local theory. When $\alpha = 1$, one of the quadrilinear multipliers, $\widetilde{M_4}$, was given by \begin{equation*} \widetilde{M_4} =\Big[ 2i\xi_{23} \frac{\xi_{1}m^2(\xi_{1}) +\xi_{23}m^2(\xi_{23})+ \xi_{4}m^2(\xi_{4})}{\xi_1^3 + \xi_{23}^3+\xi_4^3} \Big]_{\substack{1\leftrightarrow 2\\ 3\leftrightarrow 4}}, \end{equation*} which could be singular. In the proof of Lemma \ref{LEM:XM4BOUND}, we first used the algebraic identity $\xi_1^3 + \xi_{23}^3+\xi_4^3 = 3 \xi_1\xi_{23}\xi_4$ to cancel $\xi_{23}$ in the numerator and denominator. Then, we obtained pointwise cancellations, using the other algebraic identity \eqref{XALGEBRA2} and the symmetry of $\widetilde{M_4}$. However, when $\alpha \ne 1$, we do not have such nice algebraic identities or symmetry of the multipliers. Therefore, we can not hope to have reasonable pointwise estimates on $M_4'$ and $\widetilde{M_4}'$. It turns out that we have a different kind of cancellation in this case. In estimating $\text{Re}\Lambda_4(M'_4;u, u, v, v )$, we can use the symmetry $1 \leftrightarrow 2$ and $3 \leftrightarrow 4$. As we will see in Subsection \ref{SEC:Y2ndMOD}, the contribution of $\text{Re}\Lambda_4(M'_4;u, u, v, v )$ near one resonance set is exactly cancelled with that of the permuted expression near its corresponding resonance set. This cancellation takes place as the whole sums. In the following subsections, we discuss the proof of Theorem \ref{mainTHM:YGWP2}. Lastly, consider the third modified energy $E^{(3)}$ given by \[ E^{(3)}(t) = E^{(2)}(t) + \Lambda_4(\sigma_4; u, u, v, v ) + \Lambda_4(\widetilde{\sigma_4}; v, v, v, v ), \] where $\sigma_4$ and $\widetilde{\sigma_4}$ are chosen later to cancel the quadrilinear terms in \eqref{Y2ndMOD}. Using \eqref{MB}, we have \begin{align*} \frac{d}{dt} \Lambda_4(\sigma_4; u,u, v, v ) = & i \Lambda_4\big((\xi_1^3 + \xi_2^3 + \alpha \xi_3^3 + \alpha \xi_4^3)\sigma_4(\xi_1, \xi_2, \xi_3, \xi_4); u, u, v, v \big) \\ & + \Lambda_5(M_5; u, u, u, v, v) + \Lambda_5(M_5'; u, v, v, v, v), \end{align*} where $M_5 = 2i \xi_{34} \sigma_4(\xi_1, \xi_{2}, \xi_{34}, \xi_5)$ and $ M_5' = i \xi_{23} \sigma_4(\xi_1, \xi_{23}, \xi_4, \xi_5)$, and \begin{align*} \frac{d}{dt} \Lambda_4(\widetilde{\sigma_4}; v,v, v, v ) = & \, i \alpha \Lambda_4\big(( \xi_1^3 + \xi_2^3 + \xi_3^3 + \xi_4^3)\widetilde{\sigma_4}(\xi_1, \xi_2, \xi_3, \xi_4); v, v, v, v \big) \\ & + \Lambda_5(\widetilde{M_5}; u, v, v, v, v), \end{align*} where $\widetilde{M_5} = 4 i \xi_{12} \sigma_4(\xi_{12}, \xi_{2}, \xi_4, \xi_5)$. Then, by choosing \begin{equation} \label{YSIGMA4} \sigma_4 = \frac{i M_4'}{\xi_1^3 + \xi_2^3 + \alpha \xi_3^3 + \alpha \xi_4^3}, \quad \widetilde{\sigma_4} = \frac{i \widetilde{M_4}'}{\alpha \xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3 + \alpha \xi_4^3}, \end{equation} we have \begin{equation} \label{Y3rdMOD} \frac{d}{dt} E^{(3)} (u, v) = \Lambda_5(M_5;u, u,u, v, v ) + \Lambda_5(M_5';u, v, v, v, v ) + \Lambda_5(\widetilde{M_5};u, v, v, v, v ). \end{equation} Note that $\sum_{j = 1}^4 \xi_j^3$ appearing in the denominator of $\widetilde{\sigma_4}$ allows us the algebraic identity \eqref{XALGEBRA2} to establish a reasonable bound on $\widetilde{M_5}$. However, there is a problem with $\sigma_4$. We have \[ M_5 = -4 i \xi_{34} \xi_{234} \times \frac{\xi_1^3 m^2(\xi_1) + \alpha \xi_{234}^3 m^2(\xi_{234}) + \alpha \xi_5^3 m^2(\xi_5)}{\xi_1^3 + \alpha \xi_{234}^3 + \alpha \xi_5^3} \times \frac{1}{\xi_1^3 + \xi_2^3 + \alpha \xi_{34}^3 + \alpha \xi_5^3}. \] The denominator of the third factor is 0 when $\xi_2 = - \xi_1$ and $\xi_5 = - \xi_{34}$, while the second factor is equal to 1 for $|\xi_j| \ll N , j = 1, \dots, 5$. This shows that $M_5$ is unbounded over \[ \big\{ (\xi_1, \dots, \xi_5) \in (\mathbb{Z}/\lambda)^5 : \xi_{12} = \xi_{345} = 0, \xi_{34} \ne 0 \text{ or} -\xi_2, \text{ and } |\xi_j| \ll N , j = 1, \dots, 5 \big\}. \] Since $\Lambda_5(M_5;u, u,u, v, v )$ is the only term with 3 $u$'s and 2 $v$'s in \eqref{Y3rdMOD}, we can not use the other two quintilinear expressions to gain any cancellation. Therefore, there is no control on the growth of the third modified energy $E^{(3)}$ and thus Theorem \ref{mainTHM:YGWP2} with the second modified energy $E^{(2)}$ is the best global well-posedness result we can obtain via the I-method. \subsection{Scaling and Local Well-Posedness of the $I$-System} \label{SEC:YISYSTEM} Our goal is to establish the well-posedness of the Majda-Biello system \eqref{MB} with the initial condition $(u_0, v_0) \in H^s(\mathbb{T}) \times H^s(\mathbb{T})$ on an arbitrary time interval $[0, T]$ for all $ s \geq S$, where $S = S(s_0)$ is a function depending on $s_0$. First, apply the $\lambda$-scaling $\big(u^\lambda , v^\lambda \big)(x, t) = \tfrac{1}{\lambda^2} \big(u, v\big)(\tfrac{x}{\lambda}, \tfrac{t}{\lambda^3})$ $ \big(u_0^\lambda , v_0^\lambda \big)(x) = \tfrac{1}{\lambda^2} \big(u_0, v_0\big)(\tfrac{x}{\lambda}) $ to the Majda-Biello system \eqref{MB}, and consider the following Cauchy problem on $\mathbb{T}_\lambda$: \begin{equation} \label{YSCALEDIVP} \begin{gathered} u_t^\lambda + u^\lambda_{xxx} + v^\lambda v^\lambda_x = 0\\ v^\lambda_t + \alpha v^\lambda_{xxx} + (u^\lambda v^\lambda)_x = 0 \\ \big(u^\lambda , v^\lambda \big)(x, 0) = \big(u^\lambda_0, v^\lambda_0 \big)(x, 0) \in H^s(\mathbb{T}_\lambda) \times H^s(\mathbb{T}_\lambda). \end{gathered} \end{equation} Then \eqref{MB} is well-posed on the time interval $[0, T]$ if and only if \eqref{YSCALEDIVP} is well-posed on $[0, \lambda^3T]$. Applying the $I$ operator to \eqref{YSCALEDIVP}, we obtain the $\lambda$-scaled $I$-system. \begin{equation} \label{YISYSTEM} \begin{gathered} \partial_t I u^\lambda + \partial_x^3 Iu^\lambda + \frac{1}{2} \partial_x I(v^\lambda)^2 = 0 \\ \partial_t I v^\lambda + \alpha \partial_x^3 Iv^\lambda + \partial_x I(u^\lambda v^\lambda ) = 0 \\ \big(Iu^\lambda, Iv^\lambda\big) (x, 0) = (Iu_0^\lambda, Iv_0^\lambda) \in H^1(\mathbb{T}_\lambda) \times H^1(\mathbb{T}_\lambda). \end{gathered} \end{equation} Then \eqref{MB} is well-posed on $[0, T]$ with the initial condition $(u_0, v_0) \in H^s (\mathbb{T})\times H^s(\mathbb{T})$ if and only if the $\lambda$-scaled $I$-system \eqref{YISYSTEM} is well-posed on $[0, \lambda^3 T]$ with the initial condition $(Iu_0,I v_0) \in H^1(\mathbb{T}) \times H^1(\mathbb{T})$. From the local theory in \cite{OH1} and the interpolation lemma \cite[Lemma 12.1]{CKSTT5}, it follows that \eqref{YISYSTEM} is locally well-posed on the time interval $[0, 1]$ for small initial data in $H^1(\mathbb{T}_\lambda) \times H^1(\mathbb{T}_\lambda)$ satisfying \begin{equation} \label{YSMALLNESSY} \lambda^{s_0+} \|(Iu^\lambda_0, Iv^\lambda_0)\|_{H^1(\mathbb{T}_\lambda) \times H^1(\mathbb{T}_\lambda) } \leq \varepsilon_0 \ll 1, \end{equation} for some small $\varepsilon_0 > 0$. We point out that the scaling constant $\lambda^{s_0+}$ appears in \eqref{YSMALLNESSY} due to the fact that the crucial bilinear estimate \eqref{bilinear1} (and \eqref{bilinear2}) holds on $\mathbb{T}_\lambda \times \mathbb{R}$ with a constant $\sim \lambda^{\frac{1}{2} + \frac{1}{2} \nu_{c_1} +}$ (and $\lambda^{\frac{1}{2} + \frac{1}{2} \max( \nu_{d_1}, \nu_{d_2})+}$), respectively. i.e., the contraction argument scales like $\sim \lambda^{\frac{1}{2} + \frac{1}{2} \max( \nu_{c_1}, \nu_{d_1}, \nu_{d_2})+} = \lambda^{s_0+}$. When $\alpha = 1$ (and for KdV \cite{CKSTT4}), the scaling constant $\lambda^{0+}$ did not play an important role. However, when $\alpha \ne 1$, it is important to keep this constant $\lambda^{s_0+}$. A direct calculation shows \begin{equation} \label{YISCALING} \|(Iu^\lambda_0, Iv^\lambda_0)\|_{H^1(\mathbb{T}_\lambda) \times H^1(\mathbb{T}_\lambda) } \lesssim \lambda^{-\frac{3}{2} - s} N^{1-s} \|(u_0, v_0)\|_{H^s(\mathbb{T})\times H^s(\mathbb{T})}. \end{equation} Then, from \eqref{YSMALLNESSY} and \eqref{YISCALING}, we choose $\lambda$ so that the $\lambda$-scaled initial condition $(Iu^\lambda_0, Iv^\lambda_0)$ is sufficiently small. i.e., \begin{equation} \label{YLD} \lambda^{s_0+} \lambda^{-\frac{3}{2} - s} N^{-s} \|(u_0, v_0)\|_{H^s\times H^s} = \varepsilon_0 \ll 1\Longrightarrow \lambda \sim N^{\frac{2-2s}{3+2s -2s_0-}}. \end{equation} Note that we have $\| (u_0^\lambda, v_0^\lambda) \|_{L^2 \times L^2} = \lambda^{-\frac{3}{2}}\|(u_0, v_0)\|_{L^2 \times L^2}$. By choosing $\lambda$ as in \eqref{YLD}, we see that $\lambda$ is a positive power of $N$ for $s, s_0 <1$. Thus, for fixed $(u_0, v_0)$ on $\mathbb{T}$, we can make the $\lambda$-scaled initial condition $\| (u_0^\lambda, v_0^\lambda) \|_{L^2 \times L^2} $ arbitrarily small. Also, from the $L^2$ conservation of the $\lambda$-scaled system \eqref{YSCALEDIVP} and the fact that $m (\cdot) \leq 1$, we have \begin{equation} \label{YL2CONTROL} \|(Iu^\lambda, Iv^\lambda) (t)\|_{L^2 \times L^2} \leq \|(u^\lambda, v^\lambda)(t)\|_{L^2 \times L^2} = \|(u^\lambda_0, v^\lambda_0)\|_{L^2 \times L^2} \end{equation} as long as the solution exists. In the following, we work only on the $\lambda$-scaled $I$-system \eqref{YISYSTEM}, and thus we drop the superscript $\lambda$. Our goal is to show that \eqref{YISYSTEM} is well-posed on the time interval $[0, \lambda^3T]$. \subsection{On the Hamiltonian} \label{SEC:YHAMILTONIAN} In this subsection, we discuss some properties of the Hamiltonian $H(u, v) = \int u_x^2 + \alpha v_x^2 - u v^2 dx$. Sobolev embedding and the conservation of the $L^2$ norm and the Hamiltonian yield the following a priori bound on the $H^1$ norm of the solution. (c.f. \cite{BS}.) \begin{lemma} \label{LEM:YHAMILH1} Let $(u, v)$ be a smooth solution of \eqref{MB} with the initial condition $(u_0, v_0)$. Then, we have \[ \|(u(t), v(t))\|_{H^1 \times H^1} \lesssim \big(1 + \|(u_0, v_0)\|_{L^2 \times L^2} \big) \|(u_0, v_0)\|_{H^1 \times H^1}, \] where the implicit constant is independent of $\lambda \geq 1$. \end{lemma} Now, we'd like to use $ H(f, g) $ to control $\|(f, g)\|_{ H^1(\mathbb{T}_\lambda)}$ when $\|(f, g)\|_{ H^1(\mathbb{T}_\lambda)} \ll 1$. In this case, we have $\|(f, g)\|_{\dot{H}^1}^{1/2} \gg \|(f, g)\|_{\dot{H}^1}^{2} \sim \int f_x^2 + \alpha g_x^2$. For our purpose, it turns out that $H(f, g)$ is not sufficient to control $\|(f, g)\|_{ H^1(\mathbb{T}_\lambda)}$. Instead, we use $\| (f, g) \|^2_{L^2} + H(f, g)$ to control $\|(f, g)\|_{ H^1(\mathbb{T}_\lambda)}$. \begin{lemma} \label{LEM:YH1CONTROL} Assume that $\|(f, g)\|_{L^2 \times L^2} \leq \varepsilon_0 \ll 1$. Then, we have \[ \|(f, g)\|^2_{H^1\times H^1} \lesssim \| (f, g) \|^2_{L^2} + H(f, g). \] In particular, if $\|(Iu, Iv)(t)\|_{L^2 \times L^2} \leq \varepsilon_0 \ll 1$, then we have \begin{equation} \label{YH1CONTROL} \|(Iu, Iv)(t)\|^2_{H^1\times H^1} \lesssim \| (Iu, Iv)(t) \|^2_{L^2} + E^{(1)}(t). \end{equation} \end{lemma} \begin{proof} Let $C$ be the constant such that $\|f\|_{L^\infty} \leq C \|f\|_{H^1}$ and let $\varepsilon_0$ be sufficiently small such that $C \varepsilon_0 \leq 1$. Then, we have \[ \Big| \int fg^2 dx\Big| \leq \|f\|_{L^\infty} \|g\|_{L^2}^2 \leq \tfrac{1}{2} \|f\|_{H^1}^2 + \tfrac{C^2}{2}\|g\|_{L^2}^4 \leq \tfrac{1}{2} \|f\|_{H^1}^2 +\tfrac{1}{2} \|g\|_{L^2}^2. \] Hence, we have \[ \| (f, g) \|^2_{L^2} + H(f, g) \geq \tfrac{1}{2} \|f\|_{H^1}^2 +\tfrac{1}{2} \|g\|_{L^2}^2 + \alpha \|g\|^2_{\dot{H}^1} \geq \min(\tfrac{1}{2}, \alpha) \|(f, g)\|^2_{H^1\times H^1}. \] \end{proof} As a corollary of the proof, we obtain a control on $E^{(1)}(t)$. \begin{corollary} \label{COR:YH1CONTROL} If $\|(Iu, Iv)(t)\|_{L^2 \times L^2} \leq \varepsilon_0 \ll 1$, then we have \[ |E^{(1)}(t)| \leq \max(\tfrac{3}{2}, \alpha) \|(Iu, Iv)(t)\|^2_{H^1 \times H^1}. \] \end{corollary} \subsection{Useful Estimates} \label{SEC:YYESTIMATE} By solving the resonance equation $ \xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3 = 0 $ with $\xi_1+\xi_2+\xi_3= 0$, we obtain $(\xi_2, \xi_3) = (-c_1\xi_1, -c_2\xi_1)$ or $(-c_2\xi_1, -c_1\xi_1)$, where $c_1$ and $c_2$ are as in \eqref{c_1}. Now, define two non-resonance sets $A$ and $B$ by \begin{gather*} A = \{ (\xi_1, \xi_2, \xi_3) : \xi_1+\xi_2+\xi_3= 0, |\xi_2 + c_1 \xi_1| \geq 1 \text{ and } |\xi_2 + c_2 \xi_1| \geq 1 \}\\ B = \{ (\xi_1, \xi_2, \xi_3) : \xi_1+\xi_2+\xi_3= 0, |\xi_2 + c_1 \xi_1| \geq \tfrac{1}{\lambda} \text{ and } |\xi_2 + c_2 \xi_1| \geq \tfrac{1}{\lambda} \}. \end{gather*} Recall that the bilinear estimates \eqref{bilinear1} and \eqref{bilinear2} on $\mathbb{T}_\lambda \times \mathbb{R}$ hold for $s\geq 0$ as long as we are away from the resonance sets. See Remark \ref{REM:bilinear}. Then, as a direct corollary of (the proof of) \cite[Propositions 3.7 and 3.8]{OH1}, we have the following lemma. \begin{lemma} \label{COR:YDUALBILINEAR1} Let $f, g, h$ have the spatial mean 0 on $\mathbb{T}_\lambda$. Then, we have \begin{itemize} \item[(a)] on $A$, i.e. away from the resonance set, \begin{equation} \label{YDUALBILINEAR1} \Big| \int_0^1 \Lambda_3(\chi_A; f, g, h) dt \Big| \lesssim \begin{cases} \lambda^{0+}\|f\|_{X^{-1, \frac{1}{2}} [0, 1]}\|g\|_{X_\alpha^{0, \frac{1}{2}} [0, 1]}\|h\|_{X_\alpha^{0, \frac{1}{2}} [0, 1]} \\ \lambda^{0+}\|f\|_{X^{0, \frac{1}{2}} [0, 1]} \|g\|_{X_\alpha^{-1, \frac{1}{2}}[0, 1]}\|h\|_{X_\alpha^{0, \frac{1}{2}} [0, 1]}. \end{cases} \end{equation} \item[(b)] on $B$, i.e. away from the resonance set, \begin{equation} \label{YDUALBILINEAR2} \Big| \int_0^1 \Lambda_3(\chi_B; f, g, h) dt \Big| \lesssim \begin{cases} \lambda^{1/2} \|f\|_{X^{-1, \frac{1}{2}} [0, 1]}\|g\|_{X_\alpha^{0, \frac{1}{2}} [0, 1]}\|h\|_{X_\alpha^{0, \frac{1}{2}} [0, 1]} \\ \lambda^{1/2} \|f\|_{X^{0, \frac{1}{2}} [0, 1]}\|g\|_{X_\alpha^{-1, \frac{1}{2}}[0, 1]} \|h\|_{X_\alpha^{0, \frac{1}{2}}[0, 1]}. \end{cases} \end{equation} \item[(c)] on $B^c$, i.e. near the resonance set, \begin{equation} \label{YDUALBILINEAR3} \Big| \int_0^1 \Lambda_3(\chi_{B^c}; u, v, v) dt \Big| \lesssim \begin{cases} \lambda^{s_0+}\|f\|_{X^{-1- s_0-, \frac{1}{2}} [0, 1]} \|g\|_{X_\alpha^{s_0+, \frac{1}{2}} [0, 1]} \|h\|_{X_\alpha^{s_0+, \frac{1}{2}} [0, 1]}\\ \lambda^{s_0+}\|f\|_{X^{s_0+, \frac{1}{2}} [0, 1]} \|g\|_{X_\alpha^{-1-s_0-, \frac{1}{2}} [0, 1]} \|h\|_{X_\alpha^{s_0+, \frac{1}{2}} \times [0, 1]}. \end{cases} \end{equation} \end{itemize} \end{lemma} Next, we present bilinear estimates analogous to Lemma \ref{LEM:XXXBILINEAR}. In the next subsection, we use these estimates to establish a control on the quadrilinear terms in \eqref{Y2ndMOD}. \begin{lemma} \label{LEM:YPRODESTIMATE} \textup{(a)} Let $s' \geq - 1$ and $ s \geq s' + \frac{3}{2}$. Then \begin{equation} \label{YPRODESTIMATE1} \|v_1 v_2 \|_{X^{s', \frac{1}{2}}} \lesssim \prod_{j = 1}^2 \| v_j \|_{Y^s_\alpha}. \end{equation} \textup{(b)} Let $s' \geq -\frac{3}{4}$ and $s \geq s'+\frac{3}{2} $. Then \begin{equation} \label{YPRODESTIMATE2} \| u v \|_{X_\alpha^{s', \frac{1}{2}}} \lesssim \| u \|_{Y^s} \| v\|_{Y^s_\alpha}. \end{equation} \end{lemma} \begin{proof} $\circ$ Proof of \eqref{YPRODESTIMATE1}: By symmetry, assume $\langle\tau_1 - \alpha\xi_1^3\rangle \geq \langle\tau_2 - \alpha\xi_2^3\rangle$. If $\langle\tau - \xi^3\rangle \lesssim \langle\tau_1 - \alpha\xi_1^3\rangle$, then the proof basically follows from Sobolev and H\"older inequalities as in case (4.2) in Theorem 3 in \cite{CKSTT5}, since $s -s' \geq \frac{3}{2} $ and $s >0$. Hence, we assume $\langle\tau_1 - \alpha \xi_1^3\rangle, \langle\tau_2 - \alpha \xi_2^3\rangle \ll \langle\tau - \xi^3\rangle $. In this case, we have $ 1\ll \langle\tau - \xi^3\rangle \sim | \xi^3-\alpha \xi_1^3-\alpha \xi_2^3|$. Thus, it suffices to show \begin{equation}\label{YBILINEAR2} \Big\| \int_{\substack{\xi = \xi_1 + \xi_2 \\ \tau = \tau_1 + \tau_2}} \frac{ | \xi^3-\alpha \xi_1^3-\alpha \xi_2^3|^{1/2} f(\xi_1, \tau_1)g(\xi_2, \tau_2)} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s} \langle\tau_1 - \alpha \xi_1^3\rangle^{1/2} \langle\tau_2 - \alpha \xi_2^3\rangle^{1/2}} \Big\|_{L^2_{\xi, \tau}} \lesssim \|f\|_{L^2_{\xi, \tau}} \|g\|_{L^2_{\xi, \tau}}. \end{equation} Now, let $\Gamma_{\xi}(\xi_1) = \xi^3 - \alpha \xi_1^3 - \alpha \xi_2^3$. When $|\xi_1|\sim |\xi|$, we have $|\Gamma_{\xi}(\xi_1)| \lesssim |\xi|^3$. Then, we have \[ \frac{|\Gamma_{\xi}(\xi_1)|^{1/2}} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s}} \lesssim \frac{|\xi|^\frac{3}{2}} {\langle\xi\rangle ^{-s'+s}} \lesssim 1, \] since $s - s' \geq \frac{3}{2}$. By symmetry, the same conclusion holds when $|\xi_2|\sim |\xi|$. When $|\xi_1| \gg |\xi|$, we have $|\Gamma_{\xi}(\xi_1)| \lesssim |\xi \xi_1^2|$ and $|\xi_2|\sim |\xi_1|$. Then, we have \begin{align*} \frac{|\Gamma_{\xi}(\xi_1)|^{1/2}} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s}} \lesssim \frac{|\xi|^{1/2} |\xi_1|} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{2s}} \lesssim 1, \end{align*} since $2s \geq 1 $ if $s' \leq -\tfrac{1}{2}$ and $2s -s' \geq s-s' \geq \frac{3}{2}$ if $s' \geq -1/2$. Then, $L^4_{x, t}, L^4_{x, t}$ H\"older inequality and Bourgain's $L^4_{x, t}$ Strichartz estimate \cite{BO1} establish \eqref{YBILINEAR2}. \noindent $\circ$ Proof of \eqref{YPRODESTIMATE2}: If $\langle\tau - \alpha \xi^3\rangle \lesssim \langle\tau_1 - \xi_1^3\rangle$ or $\langle\tau - \alpha\xi^3\rangle \lesssim \langle\tau_2 - \alpha\xi_2^3\rangle$, then the proof again follows from Sobolev and H\"older inequalities as in case (4.2) in Theorem 3 in \cite{CKSTT5}. Hence, we assume $\langle\tau_1 - \xi_1^3\rangle, \langle\tau_2 - \alpha \xi_2^3\rangle \ll \langle\tau - \alpha \xi^3\rangle $. In this case, we have $ 1\ll \langle\tau - \alpha \xi^3\rangle \sim | \alpha \xi^3- \xi_1^3-\alpha \xi_2^3|$. Thus, it suffices to show \begin{equation}\label{YBILINEAR22} \Big\| \int_{\substack{\xi = \xi_1 + \xi_2 \\ \tau = \tau_1 + \tau_2}} \frac{ | \alpha \xi^3- \xi_1^3-\alpha \xi_2^3|^{1/2} f(\xi_1, \tau_1)g(\xi_2, \tau_2)} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s} \langle\tau_1 - \xi_1^3\rangle^{1/2} \langle\tau_2 - \alpha \xi_2^3\rangle^{1/2}} \Big\|_{L^2_{\xi, \tau}} \lesssim \|f\|_{L^2_{\xi, \tau}} \|g\|_{L^2_{\xi, \tau}}. \end{equation} Now, let $\widetilde{\Gamma}_{\xi}(\xi_1) = \alpha \xi^3 - \xi_1^3 - \alpha \xi_2^3$. When $|\xi_1| \sim |\xi|$, we have $|\widetilde{\Gamma}_{\xi}(\xi_1)| \sim |\xi|^3$. Then \begin{align*} \frac{|\widetilde{\Gamma}_{\xi}(\xi_1)|^{1/2}} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s}} \sim \frac{|\xi|^\frac{3}{2} } {\langle\xi\rangle ^{-s'+s}} \lesssim 1, \end{align*} since $s - s' \geq \frac{3}{2}$. When $|\xi_1|\gg |\xi|$, we have $|\widetilde{\Gamma}_{\xi}(\xi_1)| \sim |\xi_1|^3$ and $|\xi_2|\sim|\xi_1|$. Then \begin{align*} \frac{|\widetilde{\Gamma}_{\xi}(\xi_1)|^{1/2}} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s}} \sim \frac{|\xi_1|^\frac{3}{2}} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{2s}} \lesssim 1, \end{align*} since $2s \geq 3/2$ if $s' \leq 0$ and $2s -s' \geq s-s' \geq \frac{3}{2}$ if $s' \geq 0$. When $|\xi_1|\ll |\xi|$, we have $|\widetilde{\Gamma}_{\xi}(\xi_1)| \lesssim |\xi^2\xi_1|$ and $|\xi_2|\sim|\xi|$. Then \[ \frac{|\widetilde{\Gamma}_{\xi}(\xi_1)|^{1/2}} {\langle\xi\rangle ^{-s'}\langle\xi_1\rangle^{s}\langle\xi_2\rangle^{s}} \lesssim \frac{|\xi||\xi_1|^{1/2}} {\langle\xi\rangle ^{-s'+ s}} \lesssim 1, \] since $ s - s' \geq 3/2$. Then, $L^4_{x, t}, L^4_{x, t}$ H\"older inequality and Bourgain's $L^4_{x, t}$ Strichartz estimate \cite{BO1} establish \eqref{YBILINEAR22}. \end{proof} \subsection{Quadrilinear Estimates and Almost Conservation Law} \label{SEC:Y2ndMOD} First, we state the growth estimates on the quadrilinear terms appearing in \eqref{Y2ndMOD}. This provides a control on the growth of the second modified energy $E^{(2)} (t)$. \begin{lemma} \label{LEM:YM4GROWTH3} Let $s > 5/8$. Let $M_4$ be as in \eqref{YM'_4}. Assume the mean 0 condition on $u$. Then \begin{equation} \label{YM4GROWTH3} \begin{aligned} &\Big|\int_0^1 \mathop{\rm Re} \Lambda_4(M_4'; u, u, v, v) dt \Big|\\ &\lesssim \max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \|Iu\|^2_{Y^1[0, 1]}\|Iv\|^2_{Y_\alpha^1[0, 1]}. \end{aligned} \end{equation} \end{lemma} \begin{lemma} \label{LEM:YM4GROWTH4} Let $s > 5/8$. Let $\widetilde{M_4}$ be as in \eqref{YWTM'_4}. Then \begin{equation} \label{YM4GROWTH4} \Big|\int_0^1 \mathop{\rm Re} \Lambda_4(\widetilde{M_4}; v, v, v, v) dt \Big| \lesssim \max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \|Iv\|^4_{Y_\alpha^1[0, 1]}. \end{equation} \end{lemma} The condition $s > 5/8$ is not really a restriction to us since we are proving the global well-posedness only for $s > 5/7$. Lemma \ref{LEM:YH1CONTROL} says that $ E^{(1)}(t)$ basically controls $\| (Iu, Iv)(t)\|_{H^1 \times H^1}$. Now, we need to control $E^{(1)}(t)$ by $E^{(2)}(t)$. \begin{lemma} \label{LEM:Y1st2ndMOD} Let $\sigma_3$ be as in \eqref{YSIGMA3}. Let $\lambda$ be as in \eqref{YLD}. Then \begin{equation} \label{Y1st2ndMOD} \big| E^{(2)}(t) - E^{(1)}(t) \big| = \big| \Lambda_3(\sigma_3; u, v, v) \big| \lesssim \|Iu(t)\|_{H^1} \|Iv(t)\|_{H^1}^2. \end{equation} \end{lemma} The proofs of these lemmata are presented at the end of this section. We first prove Theorem \ref{mainTHM:YGWP2}, assuming these lemmata. Let $\vec{w} = (u, v)$ and $\vec{w_0} = (u_0, v_0)$, and fix $T$. From the local theory, if \eqref{YISYSTEM} is locally well-posed on $[t, t+1]$, then we have \begin{equation} \label{YYLOCALGROWTH} \sup_{t'\in [t, t+1]} \|I\vec{w}(t') \|_{H^1 } \leq \| I\vec{w}\|_{Y^1 \times Y^1_\alpha{[t, t+1]}} \leq 2 \|I\vec{w}(t) \|_{H^1 }. \end{equation} Let $C_\alpha = \min(\frac{1}{2}, \alpha)^{-1/2}$. Now, choose $\varepsilon_0 = \max( \|I\vec{w_0}\|_{H^1 }, \|\vec{w_0}\|_{L^2 })$ sufficiently small such that \begin{itemize} \item[(a)] $\| I\vec{w}(t) \|_{H^1 } <4 C_\alpha \varepsilon_0$ guarantees the local well-posedness of \eqref{YISYSTEM} for $[t, t+1]$. \item[(b)] $\| I\vec{w}(t) \|_{L^2 } \leq \varepsilon_0$ guarantees that \eqref{YH1CONTROL} holds. (Note that $\|\vec{w_0}\|_{L^2 } \leq \varepsilon_0$ implies $\| I\vec{w}(t) \|_{L^2 } \leq \varepsilon_0$ by \eqref{YL2CONTROL} as long as the solution exists.) \item[(c)] $\|I\vec{w}(t) \|_{H^1 } < 4 C_\alpha \varepsilon_0$ together with \eqref{YYLOCALGROWTH} makes $\|I\vec{w}(t+1)\|_{H^1 }$ small sufficient so that \[ C_\alpha^2 C_1\|I\vec{w}(t+1)\|^3_{H^1 } \leq \tfrac{1}{2}\|I\vec{w}(t+1)\|^2_{H^1 }, \] where $C_1 $ is the constant from \eqref{Y1st2ndMOD}. \end{itemize} Now, we proceed with the iteration. Since $ \|I\vec{w_0}\|_{H^1 \times H^1} = \varepsilon_0 \leq4 C_\alpha \varepsilon_0$, the solution exists on $[0, 1]$. By Lemmata \ref{LEM:YH1CONTROL} and \ref{LEM:Y1st2ndMOD}, we have \[ \| I\vec{w}(1) \|^2_{H^1} \leq C_\alpha^2 \big(\| I\vec{w}(1) \|^2_{L^2} + |E^{(2)}(1)| \big) + C_\alpha^2 C_1 \| I\vec{w}(1) \|^3_{H^1}. \] Then, by the condition (c), we have \begin{equation} \label{YUYU} \| I\vec{w}(1) \|^2_{H^1} \leq 2C_\alpha^2 \big(\| I\vec{w}(1) \|^2_{L^2} + |E^{(2)}(1)| \big) . \end{equation} By Lemma \ref{LEM:Y1st2ndMOD}, Corollary \ref{COR:YH1CONTROL}, and the condition (c), we have \begin{equation} \label{YYIMETHOD2} |E^{(2)}(0)| \leq |E^{(1)}(0)| + C_1 \varepsilon_0^3 \leq \max (2, \alpha+\tfrac{1}{2}) \varepsilon_0^2\leq 4.5 \varepsilon_0^2. \end{equation} From Lemmata \ref{LEM:YM4GROWTH3}, \ref{LEM:YM4GROWTH4}, and \eqref{YYLOCALGROWTH}, we have \begin{equation} \label{YYIMETHOD3} |E^{(2)}(t)| \leq |E^{(2)}(0)| + C_2 \max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \varepsilon_0^4 \end{equation} for all $t \in [0, 1]$. Then, by \eqref{YUYU}, \eqref{YYIMETHOD2}, and \eqref{YYIMETHOD3}, we have \begin{align*} \| I\vec{w}(1) \|^2_{H^1} & \leq 2C_\alpha^2\big(\| I\vec{w}(1) \|^2_{L^2} + |E^{(2)}(1)| \big) \\ & \leq 2C_\alpha^2\varepsilon_0^2 + 9C_\alpha^2\varepsilon_0^2 + 2C_\alpha^2 C_2 \max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \varepsilon_0^4\\ &< 13C_\alpha^2\varepsilon_0^2 \end{align*} as long as $\max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \ll 1$. Then, by condition (a), the solution is guaranteed to exist on $[1, 2]$. In general, after $K$ iterations, we have \begin{align*} \| I\vec{w}(K) \|^2_{H^1} & \leq 2C_\alpha^2\varepsilon_0^2 + 9C_\alpha^2\varepsilon_0^2 +2K C_\alpha^2 C_2 \max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \varepsilon_0^4 \\ &< 13C_\alpha^2\varepsilon_0^2 \end{align*} as long as $K \max(\lambda^{0+} N^{-\frac{1}{2}+}, \lambda^{s_0+} N^{-2+2s_0+}) \ll 1$, guaranteeing the well-posedness on $[0, K+1]$. Hence, this procedure establishes the well-posedness on the time interval of size $\sim [ 0, \min(\lambda^{0-} N^{\frac{1}{2}-}, \lambda^{-s_0-} N^{2-2s_0-})] $. Now, using \eqref{YLD}, we choose $N = N(T)$ such that \begin{equation} \label{YCHOOSEN2} \begin{gathered} \lambda^{0-} N^{\frac{1}{2}-} > \lambda^3 T \Longleftrightarrow N^{\frac{1}{2}-} \gtrsim \lambda^{3+} \sim N^{(3+)\frac{2-2s}{3+2s -2s_0-}}\\ \lambda^{-s_0-} N^{2-2s_0-} > \lambda^3 T \Longleftrightarrow N^{2-2s_0-} \gtrsim \lambda^{3+s_0+} \sim N^{(3+s_0+)\frac{2-2s}{3+2s -2s_0-}}. \end{gathered} \end{equation} By solving the inequalities in the exponents, we obtain \begin{equation} \label{YSRANGE2} s \geq \max \left( \frac{6(s_0+) - 2 (s_0+)^2}{5 - (s_0+)}, \frac{2 (s_0+) + 9}{14} \right). \end{equation} Then \eqref{YCHOOSEN2} can be satisfied by choosing large $N$ if $s$ satisfies \eqref{YSRANGE2}. This establishes the well-posedness of the $\lambda$-scaled $I$-system \eqref{YISYSTEM} on $[0, \lambda^3T]$ in $H^1(\mathbb{T}_\lambda) \times H^1(\mathbb{T}_\lambda)$. \smallskip The rest of this section is devoted for the proofs of Lemmata \ref{LEM:YM4GROWTH3}, \ref{LEM:YM4GROWTH4}, and \ref{LEM:Y1st2ndMOD}. Recall that $s_0 = s_0(\alpha)$ in \eqref{YS0} is fixed such that $s_0 \in [\frac{1}{2}, 1)$. \begin{proof}[Proof of Lemma \ref{LEM:YM4GROWTH3}] From \eqref{YM'_4}, we have \begin{equation} \label{PM4} \begin{aligned} M'_4 &= i \Big[ \xi_{23} \frac{\xi_1^3 m^2(\xi_1) + \alpha \xi_{23}^3 m^2(\xi_{23}) + \alpha \xi_4^3 m^2(\xi_4)} {\xi_1^3 + \alpha \xi_{23}^3 + \alpha \xi_4^3} \\ &\quad+ \xi_{14} \frac{\xi_2^3 m^2(\xi_2) + \alpha \xi_{14}^3 m^2(\xi_{14}) + \alpha \xi_3^3 m^2(\xi_3)} {\xi_2^3 + \alpha \xi_{14}^3 + \alpha \xi_3^3} \Big]_{\substack{1\leftrightarrow2 \\3\leftrightarrow4}} =:i [ {\rm I} + {\rm II}]_{\substack{1\leftrightarrow2 \\3\leftrightarrow4}} . \end{aligned} \end{equation} We prove the estimate for ${\rm I}$ (which holds for ${\rm II}$ after switching the indices $1\leftrightarrow2$ and $3\leftrightarrow4$), but we use $M_4' = i {\rm I} + i {\rm II}$ when we need cancellation between the contributions of ${\rm I}$ and ${\rm II}$. \noindent $\bullet$ {\bf Case (1):} $N_j \ll N$, $ j = 1, 2, 3, 4$. We have $m(\xi_j) = 1$ and $m(\xi_{23}) = m(\xi_{14}) = 1$. Then, from \eqref{PM4}, we have $M_4' = i \xi_{1234} = 0$. Thus, we assume $\max N_j \gtrsim N$. This implies that $\mathop{\rm med} N_j :=$ the second largest of $N_j \gtrsim N$ since $\xi_{1234} = 0$. In this setting, the resonance equation for ${\rm I}$ is given by $\Gamma_{\xi_1}(\xi_{23}) = \xi_1^3 + \alpha \xi_{23}^3 + \alpha \xi_4^3 = 0$. From Lemmata \ref{COR:YDUALBILINEAR1} (c) and \ref{LEM:YPRODESTIMATE} (b), we have \begin{align*} &\Big| \int_0^1 \Lambda_4( {\rm III} ; u, u, v, v) dt \Big|\\ & \lesssim \lambda^{s_0+} \|u\|_{X^{0, \frac{1}{2}}[0, 1]} \|\langle\partial_x\rangle ^{-\frac{3}{4}}u\langle\partial_x\rangle ^{-\frac{3}{4}}v\|_{X_\alpha^{-\frac{3}{4}, 1/2}[0, 1]} \|v\|_{X_\alpha ^{0, 1/2}[0, 1]} \\ & \lesssim \lambda^{s_0+} \|u\|_{Y^0[0, 1]}^2\|v\|_{Y_\alpha^0[0, 1]}^2, \end{align*} where $ {\rm III} = \frac{\xi_{23} \langle\xi_{23}\rangle ^{s_0+}} {\langle\xi_1\rangle^{s_0+}\langle\xi_4\rangle ^{s_0+} \langle\xi_2\rangle^\frac{3}{4}\langle\xi_3\rangle ^\frac{3}{4}\langle\xi_{23}\rangle ^\frac{3}{4}} $. Note that $\xi_{23}$ in the numerator of ${\rm III}$ allowed us to use Lemma \ref{COR:YDUALBILINEAR1} (c). Then, it suffices to prove \begin{equation} \label{PM41} P_1 : = \Big|\frac{{\rm I}\cdot {\rm III}^{-1}}{\prod_{j = 1}^4 m(\xi_j) \langle\xi_j\rangle} \Big| \lesssim N^{-2+2s_0+} \prod_{j = 1}^4 N_j^{0-}, \end{equation} If there is no resonance, i.e. on $A = \{ |\xi_{23} + c_1 \xi_1| \geq 1$ and $ |\xi_{23} + c_2\xi_1| \geq 1\},$ then, from Lemmata \ref{COR:YDUALBILINEAR1} (a) and \ref{LEM:YPRODESTIMATE} (b), we have \begin{equation} \begin{aligned} \label{PM42} \Big| \int_0^1 \Lambda_4( \text{IV}; u, u, v, v) dt \Big| & \lesssim \lambda^{0+} \|u\|_{X^{0, \frac{1}{2}}[0, 1]} \|\langle\partial_x\rangle ^{-\frac{3}{4}}u\langle\partial_x\rangle ^{-\frac{3}{4}}v\|_{X_\alpha^{-\frac{3}{4}, \frac{1}{2}}[0, 1]} \|v\|_{X_\alpha ^{0, \frac{1}{2}}[0, 1]} \\ & \lesssim \lambda^{0+} \|u\|_{Y^0[0, 1]}^2\|v\|_{Y_\alpha^0[0, 1]}^2, \end{aligned} \end{equation} where $ \text{IV} = \frac{\xi_{23} }{\langle\xi_2\rangle^{3/4}\langle\xi_3\rangle ^{3/4}\langle\xi_{23}\rangle ^{\frac{3}{4}}} $. Then, it suffices to prove \begin{equation} \label{PM43} P_2 : = \Big|\frac{{\rm I}\cdot \text{IV}^{-1}}{\prod_{j = 1}^4 m(\xi_j) \langle\xi_j\rangle} \Big| \lesssim N^{-\frac{1}{2}+} \prod_{j = 1}^4 N_j^{0-}, \end{equation} Note that we can switch the indices $1\leftrightarrow 2$ and $3\leftrightarrow 4$ in ${\rm III}$ and \text{IV} in establishing \eqref{PM41} and \eqref{PM43}. \noindent $\bullet$ {\bf Case (2):} $N_{23} \ll N$ $\circ$ Subcase (2.a): $N_1 \ll N $ $\Longrightarrow N_4 \ll N$. In this case, we have $N_2 \sim N_3 \gtrsim N$ since $\max N_j, \mathop{\rm med} N_j \gtrsim N$. Also, we have $m(\xi_1) = m(\xi_4) = m(\xi_{23}) = 1$. Then, ${\rm I} = \xi_{23}$ and thus we have \[P_1 = \frac{N_{23}}{m(N_2)m(N_3)\prod_{j = 1}^4N_j} \frac{N_2^{s_0+}N_3^{s_0+} N_1^\frac{3}{4}N_4^\frac{3}{4}N_{14}^\frac{3}{4}}{N_{14}N_{14}^{s_0+}} \lesssim N^{-2 + s_0+} \prod_{j = 1}^4N_j^{0-}.\] \noindent $\circ$ Subcase (2.b): $N_1 \gtrsim N $ $\Longrightarrow N_4 \sim N_1 \gtrsim N$. In this case, there is no resonance since $N_{23} \ll N \lesssim N_1 \sim N_4$, i.e. we are on $A$ and thus $|\Gamma_{\xi_1}(\xi_{23})| \sim N_1^3$. Suppose $N_2 \ll N$. Then, we have $N_3 \ll N$, and \[ P_2 \sim \frac{N_{23}N_{1}^3m^2(N_1)}{|\Gamma_{\xi_1}(\xi_{23})|m(N_1)m(N_4)\prod_{j = 1}^4 N_j} \frac{N_2^\frac{3}{4}N_3^\frac{3}{4}N_{23}^\frac{3}{4}}{N_{23}} \sim \frac{N_{23}^\frac{3}{4}}{N_1^2 N_2^\frac{1}{4}N_3^\frac{1}{4}} \lesssim N^{-\frac{3}{2}+} \prod_{j = 1}^4N_j^{0-}. \] Now, suppose $N_2 \gtrsim N$. Then, we have $N_3 \sim N_2 \gtrsim N$ since $N_{23} \ll N$. If $N_1 \gtrsim N_2$, then \[ P_2 \sim \frac{N_{23}N_{1}^3m^2(N_1)}{|\Gamma_{\xi_1}(\xi_{23})|\prod_{j = 1}^4 m(N_j) N_j} \frac{N_2^\frac{3}{4}N_3^\frac{3}{4}N_{23}^\frac{3}{4}}{N_{23}} \lesssim \frac{1}{N^{\frac{5}{4}-2s} N_1^{\frac{1}{2} + 2s} } \lesssim N^{-\frac{7}{4}+} \prod_{j = 1}^4N_j^{0-}. \] If $N_2 \gtrsim N_1$, then we exactly obtain the above computation (with $1\leftrightarrow 2$ and $3\leftrightarrow 4$), by using $\text{IV}$ after switching the indices $1\leftrightarrow 2$ and $3\leftrightarrow 4$. \noindent $\bullet$ {\bf Case (3):} $N_{23} \gtrsim N$ and $N_1 \gtrsim N \gg N_4$. In this case, there is no resonance, thus we have $|\Gamma_{\xi_1}(\xi_{23})| \sim N_1^3$. Note that $\max(N_2, N_3) \gtrsim N_{23} \gtrsim N$. Also, we have $N_{23} \sim N_1$ since $N_{23} = N_{14} \gg N_4$. Then, we have $N_1^3 m^2(N_1) \sim N_{23}^3 m^2(N_{23}) \gg N_4^3 m^2(N_4)$, since $N_1^3 m^2(N_1) \sim N_1^{1+2s}N^{2-2s} \gtrsim N^3 \gg N_4^3 m^2(N_4)$. \noindent $\circ$ Subcase (3.a): $N_2, N_3 \gtrsim N$. After switching the indices $1\leftrightarrow 2$ and $3\leftrightarrow 4$ in $\text{IV}$, we have \begin{align*} P_2 &\sim \frac{N_{23}N_{1}^3m^2(N_1)}{|\Gamma_{\xi_1}(\xi_{23})| \prod_{j = 1}^3 m(N_j)\prod_{j = 1}^4 N_j} \frac{N_1^\frac{3}{4}N_4^\frac{3}{4}N_{14}^\frac{3}{4}}{N_{14}}\\ &\sim \frac{1}{N^{1-s} N_1^{\frac{1}{2} -s} N_2^s N_3^s} \lesssim N^{-\frac{3}{2}+} \prod_{j = 1}^4N_j^{0-}. \end{align*} \noindent $\circ$ Subcase (3.b): $N_2 \gtrsim N \gg N_3$ (Symmetry takes care of the case $N_3 \gtrsim N \gg N_2$). In this case, we have $N_2 \sim N_{23} \sim N_1$ and $m(\xi_3) = 1$. After switching the indices $1\leftrightarrow 2$ and $3\leftrightarrow 4$ in $\text{IV}$, we have \begin{align*} P_2 \sim \frac{N_{23}N_{1}^3m^2(N_1)}{|\Gamma_{\xi_1}(\xi_{23})|m(N_1)m(N_2)\prod_{j = 1}^4 N_j} \frac{N_1^\frac{3}{4}N_4^\frac{3}{4}N_{14}^\frac{3}{4}}{N_{14}} \sim \frac{1}{ N_1^{1/2}} \lesssim N^{-\frac{1}{2}+} \prod_{j = 1}^4N_j^{0-}. \end{align*} \noindent $\bullet$ {\bf Case (4):} $N_{23} \gtrsim N$ and $N_4 \gtrsim N \gg N_1$. In this case, there is no resonance at $\xi_{23} = -c_1 \xi_1$ or $\xi_{23} = -c_2 \xi_1$ since $N_{23} \sim N_4 \gg N_1$. However, recall that $\Gamma_{\xi_1} (\xi_{23}) \sim 0$ when $\xi_1 \sim 0$. Thus, we only have $|\Gamma_{\xi_1} (\xi_{23})| \gtrsim N_1 N_{23}^2$. Note that we have $|\xi_{23}^3m^2(\xi_{23})| = N^{2-2s} \xi_{23}^{1+2s}$. By Mean Value Theorem, \begin{equation} \label{PM48} \begin{aligned} |\alpha \xi_{23}^3m^2(\xi_{23}) + \alpha \xi_4^3m^2(\xi_{4})| &= |\alpha \xi_{23}^3m^2(\xi_{23}) - \alpha (\xi_{23} + \xi_1) ^3m^2(\xi_{23} + \xi_1) | \\ & \sim N^{2-2s}N_{23}^{2s} N_1 \gg N_1^3 \sim |\xi_1^3m^2(\xi_1)| . \end{aligned} \end{equation} In this case, we have $\max(N_2, N_3) \sim N_4 \gtrsim N$. Without loss of generality, assume $N_2 \gtrsim N$. Then \begin{equation} \label{PM47} m(N_2) m(N_3) N_2 N_3 \gtrsim N^{1-s} N_2^s \min(N^{1-s}N_3^s, N_3) \gtrsim N^{1-s}N_2^s. \end{equation} Note that $\min(N^{1-s}N_3^s, N_3) $ equals $N^{1-s}N_3^s$ if $N_3 \gtrsim N$, and equals $N_3$ if $N_3 \ll N$. This point is not used here but will be used in Case (5). After switching the indices $1\leftrightarrow 2$ and $3\leftrightarrow 4$ in $\text{IV}$, we have \begin{align*} P_2 &\sim \frac{N_{23}N^{2-2s}N_{23}^{2s} N_1}{|\Gamma_{\xi_1}(\xi_{23}) |m(N_2)m(N_3)m(N_4)\prod_{j = 1}^4 N_j} \frac{N_1^\frac{3}{4} N_4^\frac{3}{4}N_{14}^\frac{3}{4}}{N_{14}}\\ &\sim \frac{1}{ N_4^{1/2}} \lesssim N^{-\frac{1}{2}+} \prod_{j = 1}^4N_j^{0-}. \end{align*} \noindent $\bullet$ {\bf Case (5):} $N_{23}, N_1, N_4 \gtrsim N$. In this case, we can have resonances (for ${\rm I}$); i.e., $\xi_1^3 + \alpha \xi_{23}^3 + \alpha \xi_4^3 \sim 0$ over $B = \{ |\xi_{23} + c_1\xi_1| \leq 1 \} $ and $B' = \{ |\xi_{23} + c_2\xi_1| \leq 1 \}$. On $B$, we have $ \xi_{23} = -c_1 \xi_1 + \delta $ and $ \xi_{4} = -c_2 \xi_1 - \delta $ where $ |\delta| \leq 1$ such that $\xi_{23}, \xi_4 \in \mathbb{Z}/\lambda$. For fixed $\xi_{23}$, this is equivalent to \begin{equation} \label{PM44} \xi_1 (\delta) = -\tfrac{1}{c_1} (\xi_{23} - \delta) = : \zeta_1(\delta) \text{ and } \xi_4(\delta) = \tfrac{c_2}{c_1}(\xi_{23}-\delta) -\delta =: \zeta_2(\delta). \end{equation} Then, we have $\sum_{\xi_3} \sum_{\xi_1}\sum_{\xi_4} \chi_B = \sum_{\xi_3}\sum_{\xi_{23}}\sum_{|\delta| \leq 1}$. When we repeat the proof for ${\rm II}$, this case corresponds to when $N_{14}, N_2, N_3 \gtrsim N$, and the resonances for ${\rm II}$ occur over $C = \{ |\xi_{14} + c_1\xi_2| \leq 1 \} $ and $ C' = \{ |\xi_{14} + c_2\xi_2| \leq 1 \}. $ On $C$, we have $ \xi_{14} = -c_1 \xi_2 - \delta $ and $ \xi_{3} = -c_2 \xi_2 + \delta $ where $ |\delta| \leq 1$ such that $\xi_{14}, \xi_3 \in \mathbb{Z}/\lambda$. For fixed $\xi_{14} = -\xi_{23}$, this is equivalent to \begin{gather} \label{PM45} \xi_2 (\delta) = -\tfrac{1}{c_1} (\xi_{14} + \delta) = \tfrac{1}{c_1} (\xi_{23} - \delta) = -\zeta_1(\delta) ,\\ \label{PM46} \xi_3(\delta) = \tfrac{c_2}{c_1}(\xi_{14} + \delta) +\delta = - \Big(\tfrac{c_2}{c_1}(\xi_{23}-\delta) -\delta\Big) = -\zeta_2(\delta), \end{gather} where $\zeta_1(\delta)$ and $\zeta_2(\delta)$ are defined in \eqref{PM44}. As before, we have $\sum_{\xi_4} \sum_{\xi_2}\sum_{\xi_3} \chi_C = \sum_{\xi_4}\sum_{\xi_{14}}\sum_{|\delta| \leq 1}$. By putting the contributions of ${\rm I}$ over $B$ and of ${\rm II}$ over $C$, we have \begin{align*} & \Lambda_4({\rm I} \cdot \chi_B + {\rm II}\cdot\chi_C; u, u, v, v ) \\ & = \Big(\frac{1}{2\pi\lambda} \Big)^3 \sum_{\xi_3}\sum_{\xi_{23}}\sum_{|\delta| \leq 1} \xi_{23} \frac{\xi_1^3 m^2(\xi_1) + \alpha \xi_{23}^3 m^2(\xi_{23}) + \alpha \xi_4^3 m^2(\xi_4)} {\xi_1^3 + \alpha \xi_{23}^3 + \alpha \xi_4^3} \\ &\quad\times \widehat{u}(\xi_1) \widehat{u} (\xi_2) \widehat{v} (\xi_3) \widehat{v} (\xi_4)\\ &\quad + \Big(\frac{1}{2\pi\lambda} \Big)^3 \sum_{\xi_4}\sum_{\xi_{14}}\sum_{|\delta| \leq 1}\xi_{14} \frac{\xi_2^3 m^2(\xi_2) + \alpha \xi_{14}^3 m^2(\xi_{14}) + \alpha \xi_3^3 m^2(\xi_3)} {\xi_2^3 + \alpha \xi_{14}^3 + \alpha \xi_3^3} \\ &\quad\times \widehat{u} (\xi_1) \widehat{u} (\xi_2) \widehat{v}(\xi_3) \widehat{v} (\xi_4) \end{align*} Using \eqref{PM44}, \eqref{PM45}, \eqref{PM46}, and $\xi_{14} = -\xi_{23}$, the above expression equals \begin{align*} &\Big(\frac{1}{2\pi\lambda} \Big)^3 \sum_{\xi_3}\sum_{\xi_{23}}\sum_{|\delta| \leq 1} \xi_{23} \frac{\zeta_1^3 m^2(\xi_1) + \alpha \xi_{23}^3 m^2(\xi_{23}) + \alpha \zeta_2^3 m^2(\zeta_2)} {\zeta_1^3 + \alpha \xi_{23}^3 + \alpha \zeta_2^3} \\ &\times \widehat{u}(\zeta_1) \widehat{u} (\xi_{23} -\xi_3) \widehat{v} (\xi_3) \widehat{v} (\zeta_2)\\ & - \Big(\frac{1}{2\pi\lambda} \Big)^3 \sum_{\xi_4}\sum_{\xi_{23}}\sum_{|\delta| \leq 1}\xi_{23} \frac{- \zeta_1^3 m^2(\zeta_1) - \alpha \xi_{23}^3 m^2(\xi_{23}) - \alpha \zeta_2^3 m^2(\zeta_2)} {-\zeta_1^3 - \alpha \xi_{23}^3 - \alpha \zeta_2^3} \\ & \times \widehat{u} (\xi_{14}- \xi_4) \widehat{u} (-\zeta_1) \widehat{v}(-\zeta_2) \widehat{v} (\xi_4) \end{align*} Now, note that $u$ and $v$ are real-valued, i.e. $\widehat{u}(-\xi) = \overline{\widehat{u}}(\xi)$ and $\widehat{v}(-\xi) = \overline{\widehat{v}}(\xi)$. In particular, $\widehat{u} (\xi_{14}- \xi_4) = \overline{\widehat{u}} (\xi_{23}+ \xi_4)$. Then, letting $\theta = \xi_3$ in the first term and $\theta = -\xi_4$ in the second term, we have \begin{align*} & \Big(\frac{1}{2\pi\lambda} \Big)^3 \sum_{\theta}\sum_{\xi_{23}}\sum_{|\delta| \leq 1} \xi_{23} \frac{\zeta_1^3 m^2(\zeta_1) + \alpha \xi_{23}^3 m^2(\xi_{23}) + \alpha \zeta_2^3 m^2(\zeta_2)} {\zeta_1^3 + \alpha \xi_{23}^3 + \alpha \zeta_2^3} \\ & \times \big(\widehat{u}(\zeta_1) \widehat{u} (\xi_{23} -\theta) \widehat{v} (\theta) \widehat{v} (\zeta_2) - \overline{\widehat{u} (\xi_{23} - \theta) \widehat{u} (\zeta_1) \widehat{v}(\zeta_2) \widehat{v} (\theta)}\big). \end{align*} i.e., the contribution in this case is purely imaginary. Hence, $\text{Re} \Lambda_4({\rm I} \cdot \chi_B + {\rm II}\cdot\chi_C; u, u, v, v ) = 0$. The (real) contribution of ${\rm I}$ over its resonant set $B$ exactly cancelled the (real) contribution of ${\rm II}$ over its resonant set $C$ as the whole sum. A similar computation shows $\text{Re}\Lambda_4({\rm I} \cdot \chi_{B'} + {\rm II}\cdot\chi_{C'}; u, u, v, v ) = 0$. Now, we focus only on ${\rm I}$ again. From the above argument, we need to work only on the non-resonant set $ A = \{ |\xi_{23} + c_1\xi_1| \geq 1 $ and $ |\xi_{23} + c_2\xi_1| \geq 1 \}$. Without loss of generality, assume $N_2 \geq N_3$. Then, $N_2 \gtrsim N_{23}$. Also, note that $\max(N_1, N_4) \gtrsim N_{23} $. \noindent $\circ$ Subcase (5.a): $N_1 \sim N_{23} \gg N_4 \gtrsim N$. In this case, we have $|\Gamma_{\xi_1}(\xi_{23}) | \sim N_1^3$. If $N_2 \sim N_{23}$, then we have, using \eqref{PM47}, \begin{align*} P_2 & \sim \frac{N_{23}N_{23}^3 m^2(N_{23})}{|\Gamma_{\xi_1}(\xi_{23})| \prod_{j = 1}^4 m(N_j) N_j} \frac{N_2^\frac{3}{4}N_3^\frac{3}{4} N_{23}^\frac{3}{4}}{N_{23}}\\ &\lesssim \frac{ N_{23}^{-\frac{5}{4} + 2s}}{N_1^s N_4^s N^{1-s} N_2^{s-\frac{3}{4}} \min(N^{1-s} N_3^{s-\frac{3}{4}}, N_3^\frac{1}{4}) } \\ &\lesssim \max \Big(\frac{1}{N^{\frac{7}{4}-s+} N_4^s N_{23}^{0+}}, \frac{1}{N^{1-s} N_4^s N_{23}^{1/2}} \Big) \lesssim N^{-\frac{3}{2}+} \prod_{j = 1}^4N_j^{0-} . \end{align*} Now, suppose $N_2 \sim N_3 \gg N_{23}$. Then, after switching the indices in $\text{IV}$, we have \begin{align*} P_2 & \sim \frac{N_{23}N_{23}^3 m^2(N_{23})}{|\Gamma_{\xi_1}(\xi_{23})| \prod_{j = 1}^4 m(N_j) N_j} \frac{N_1^\frac{3}{4}N_4^\frac{3}{4}N_{14}^\frac{3}{4}}{N_{14}}\\ &\lesssim \frac{1}{N^{2-2s} N_{1}^{1/2} N_2^sN_4^{s - \frac{3}{4}}} \lesssim N^{-\frac{7}{4}+} \prod_{j = 1}^4N_j^{0-} . \end{align*} \noindent $\circ$ Subcase (5.b): $N_4 \sim N_{23} \gg N_1 \gtrsim N$. In this case, we have $|\Gamma_{\xi_1}(\xi_{23}) | \sim N_1 N_{23}^2$. Also, we can apply Mean Value Theorem as in Case (4). If $N_2 \sim N_{23} $, then we have, using \eqref{PM48} and \eqref{PM47}, \begin{align*} P_2 & \sim \frac{N_{23}N^{2-2s}N_{23}^{2s}N_1}{|\Gamma_{\xi_1}(\xi_{23}) |\prod_{j = 1}^4 m(N_j) N_j} \frac{N_2^\frac{3}{4} N_3^\frac{3}{4}N_{23}^\frac{3}{4}}{N_{23}}\\ &\lesssim \frac{ N_{23}^{-\frac{5}{4} + 2s}}{N_1^{s} N_4^s N^{1-s} N_2^{s-\frac{3}{4}} \min(N^{1-s} N_3^{s-\frac{3}{4}}, N_3^\frac{1}{4}) } \\ &\lesssim \max \Big(\frac{1}{N^{\frac{7}{4}-s+} N_1^s N_{23}^{0+}}, \frac{1}{N^{1-s} N_1^s N_{23}^{1/2}} \Big) \lesssim N^{-\frac{3}{2}+} \prod_{j = 1}^4N_j^{0-} . \end{align*} Now, suppose $N_2 \sim N_3 \gg N_{23}$. Then, after switching the indices in $\text{IV}$ and using \eqref{PM48}, we have $ P_2 \lesssim N^{2s-2} N_1^{ \frac{3}{4} -s} N_2^{-s} N_{4}^{-1/2} \lesssim N^{-\frac{7}{4}+} \prod_{j = 1}^4N_j^{0-} $. \noindent $\circ$ Subcase (5.c): $N_1\sim N_4 \gg N_{23} \gtrsim N$. In this case, we have $|\Gamma_{\xi_1}(\xi_{23}) | \sim N_1^3$. If $N_1 \gtrsim N_2 $, then we have, using \eqref{PM47}, \begin{align*} P_2 & \sim \frac{N_{23}N_1^3 m^2(N_1)}{|\Gamma_{\xi_1}(\xi_{23})| \prod_{j = 1}^4 m(N_j) N_j} \frac{N_2^\frac{3}{4} N_3^\frac{3}{4}N_{23}^\frac{3}{4}}{N_{23}} \\ &\lesssim \frac{ N_1^{-2+2s} N_{23}^{3/4}}{N_1^{s} N_4^s N^{1-s} N_2^{s-\frac{3}{4}} \min(N^{1-s} N_3^{s-\frac{3}{4}}, N_3^\frac{1}{4}) } \\ &\lesssim \max \Big(\frac{1}{N^{\frac{7}{4}-s+} N_{1}^{0+}N_2^s }, \frac{1}{N^{1-s} N_{1}^{1/2} N_2^s} \Big) \lesssim N^{-\frac{3}{2}+} \prod_{j = 1}^4N_j^{0-} . \end{align*} If $N_2 \gtrsim N_1$, then we have $N_2 \sim N_3 \gg N_{23}$. After switching the indices of $\text{IV}$, we have $ P_2 \lesssim N^{2s-2} N_{2}^{\frac{1}{4} -2s} \lesssim N^{-\frac{7}{4}+} \prod_{j = 1}^4N_j^{0-} $. \noindent $\circ$ Subcase (5.d): $N_1\sim N_4 \sim N_{23} \gtrsim N$. In this case, we may be ``close" to the resonant sets. However, we have $|\Gamma_{\xi_1}(\xi_{23}) | \gtrsim N_1^2 $ on $A$. If $N_2 \sim N_{23} $, then we have, using \eqref{PM47}, \begin{align*} P_2 & \sim \frac{N_{23}N_1^3 m^2(N_1)}{|\Gamma_{\xi_1}(\xi_{23})| \prod_{j = 1}^4 m(N_j) N_j} \frac{N_2^\frac{3}{4} N_3^\frac{3}{4}N_{23}^\frac{3}{4}}{N_{23}} \\ &\lesssim \frac{ N_1^{-1+2s} N_{23}^{3/4}}{N_1^{s} N_4^s N^{1-s} N_2^{s-\frac{3}{4}} \min(N^{1-s} N_3^{s-\frac{3}{4}}, N_3^\frac{1}{4}) } \\ &\lesssim \max \Big( \frac{1}{N^{2-2s} N_1^{0+} N_3^{2s-\frac{5}{4}-}}, \frac{1}{N^{1-s} N_{1}^{s- \frac{1}{2}} } \Big) \lesssim N^{-\frac{1}{2}+} \prod_{j = 1}^4N_j^{0-}, \end{align*} as long as $s > 5/8$. Now, suppose $N_2 \sim N_3 \gg N_{23}$. After switching the indices of $\text{IV}$, we have $ P_2 \lesssim N^{2s-2} N_1^\frac{5}{4} N_{2}^{-2s} \lesssim N^{-\frac{3}{4}+} \prod_{j = 1}^4N_j^{0-}, $ as long as $s > \frac{5}{8}$. \end{proof} \begin{proof}[Proof of Lemma \ref{LEM:YM4GROWTH4}] From \eqref{YWTM'_4}, we have \begin{equation} \label{QM4} \begin{aligned} \widetilde{M_4}' & = \Big[\frac{ i}{4} \xi_{23} \frac{\xi_{23}^3 m^2(\xi_{23}) + \alpha \xi_{1}^3 m^2(\xi_{1}) + \alpha \xi_4^3 m^2(\xi_4)} {\xi_{23}^3 + \alpha \xi_{1}^3 + \alpha \xi_4^3} \\ &\quad + \frac{ i}{4} \xi_{14} \frac{\xi_{14}^3 m^2(\xi_{14}) + \alpha \xi_{2}^3 m^2(\xi_{2}) + \alpha \xi_3^3 m^2(\xi_3)} {\xi_{14}^3 + \alpha \xi_{2}^3 + \alpha \xi_3^3} \Big]_{\rm sym}\\ &= \frac{i}{4} [ {\rm I} + {\rm II}]_{\rm sym}. \end{aligned} \end{equation} The proof is very similar to that of Lemma \ref{LEM:YM4GROWTH3}. In the following, we point out a few key points. \noindent $\bullet$ {\bf Case (1):} $N_j \ll N$, $ j = 1, 2, 3, 4$. From \eqref{QM4}, we have $\widetilde{M_4}' = \frac{i}{4} \xi_{1234} = 0$, since $m(\xi_j) = 1$ and $m(\xi_{23}) = m(\xi_{14}) = 1$. \noindent $\bullet$ {\bf Case (2):} $\max N_j \gtrsim N$. In this setting, the resonance equation for ${\rm I}$ is given by $\Gamma_{\xi_{23}}(\xi_{1}) = \xi_{23}^3 + \alpha \xi_{1}^3 + \alpha \xi_4^3 = 0$. As before, it follows from Lemmata \ref{COR:YDUALBILINEAR1} (c) and \ref{LEM:YPRODESTIMATE} (b) that it suffices to prove \eqref{PM41}. If there is no resonance, i.e. on $A = \{ |\xi_{1} + c_1 \xi_{23}| \geq 1 $ and $ |\xi_{1} + c_2\xi_{23}| \geq 1\},$ then, from Lemmata \ref{COR:YDUALBILINEAR1} (a) and \ref{LEM:YPRODESTIMATE} (a), we have \begin{equation} \label{QM42} \begin{aligned} \Big| \int_0^1 \Lambda_4( \text{IV}; v, v, v, v) dt \Big| & \lesssim \lambda^{0+} \|\langle\partial_x\rangle ^{-\frac{3}{4}}v\langle\partial_x\rangle ^{-\frac{3}{4}}v\|_{X^{-\frac{3}{4}, \frac{1}{2}}[0, 1]} \|v\|_{X_\alpha ^{0, \frac{1}{2}}[0, 1]}^2 \\ & \lesssim \lambda^{0+} \|v\|_{Y_\alpha^0[0, 1]}^4, \end{aligned} \end{equation} where $ \text{IV} = \frac{\xi_{23} }{\langle\xi_2\rangle^{3/4}\langle\xi_3\rangle ^{3/4}\langle\xi_{23}\rangle ^{\frac{3}{4}}} $. Then, it suffices to prove \eqref{PM43}. As before, we can switch the indices $1\leftrightarrow 2$ and $3\leftrightarrow 4$ in ${\rm III}$ and \text{IV} in establishing \eqref{PM41} and \eqref{PM43}. Note that we have $|\Gamma_{\xi_{23}}(\xi_{1})| \sim \max(N_1^3, N_{23}^3, N_4^3)$ away from the resonant set, i.e. on $A \cap \{ |\xi_{23}| \sim 0\}$. We have $|\Gamma_{\xi_{23}}(\xi_{1})| \gtrsim N_{23} N_1^2$ on $\{ |\xi_{23}| \sim 0\}$, and $|\Gamma_{\xi_{23}}(\xi_{1})| \gtrsim N_{23}^2$ on $A^c$. This is all we needed from the denominator of ${\rm I}$ in the proof of Lemma \ref{LEM:YM4GROWTH3} Also, note that we can estimate the numerator of ${\rm I}$ by either \[ \max(\xi_{23}^3 m^2(\xi_{23}), \xi_{1}^3 m^2(\xi_{1}), \xi_4^3 m^2(\xi_4)) \] or \[\xi_{1}^3 m^2(\xi_{1}) + \xi_4^3 m^2(\xi_4) = \xi_{1}^3 m^2(\xi_{1}) - (\xi_1 + \xi_{23})^3 m^2(\xi_1 + \xi_{23}) \] along with Mean Value Theorem when $N_1 \sim N_4 \gg \xi_{23}$. Lastly note that $\xi_{23}$ in ${\rm I}$ exactly cancels with $\xi_{23}$ in the numerators of ${\rm III}$ and $\text{IV}$. Hence, the proof basically follows from the proof of Lemma \ref{LEM:YM4GROWTH3} after replacing $(\xi_1, \xi_{23}, \xi_4)$ by $(\xi_{23}, \xi_{1}, \xi_4)$. \end{proof} \begin{proof}[Proof of Lemma \ref{LEM:Y1st2ndMOD}] From \eqref{YM3} and \eqref{YSIGMA3}, we have \[ \sigma_3 (\xi_1, \xi_2, \xi_3) = m(\xi_1)m(\xi_2)m(\xi_3) - \frac{\xi_1^3m^2(\xi_1) + \alpha \xi_2^3m^2(\xi_2) + \alpha \xi_3^3m^2(\xi_3)}{\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3} =: {\rm I} - {\rm II} . \] Since $t$ is fixed, we drop $t$-dependence in writing $u(t)$ and $v(t)$. Note that \eqref{Y1st2ndMOD} is equivalent to \begin{equation} \label{Y1st2ndMOD1} \Big| \Lambda_3\Big( \frac{\sigma_3}{\prod_{j = 1}^3 \langle\xi_j\rangle m(\xi_j) }; u, v, v \Big) \Big| \lesssim \|u\|_{L^2_x} \|v\|_{L^2_x}^2. \end{equation} By H\"older inequality and Sobolev embedding, we have \begin{equation} \label{Y1st2ndMOD2} \begin{aligned} &\Big| \Lambda_3 \Big( \frac{1}{\prod_{j = 1}^3 \langle\xi_j\rangle^{s_j} }; u, v, v \Big) \Big| \\ &= \Big|\int \langle\partial_x\rangle ^{-s_1} u \langle\partial_x\rangle ^{-s_2}v \langle\partial_x\rangle ^{-s_3}v dx\Big| \\ & \leq \|\langle\partial_x\rangle ^{-s_1} u\|_{L^3_x} \|\langle\partial_x\rangle ^{-s_2} v\|_{L^3_x} \|\langle\partial_x\rangle ^{-s_3} v\|_{L^3_x} \lesssim \|u\|_{L^2_x} \|v\|_{L^2_x}^2, \end{aligned} \end{equation} as long as $s_j > 1/6$. First, we estimate the contribution of ${\rm I}$. We have \[ \frac{{\rm I}}{\prod_{j = 1}^3 \langle\xi_j\rangle m(\xi_j) } = \frac{1}{\prod_{j = 1}^3 \langle\xi_j\rangle}. \] Hence, \eqref{Y1st2ndMOD1} for ${\rm I}$ follows from \eqref{Y1st2ndMOD2}. Now, we will consider the contribution of ${\rm II}$. Let \begin{equation} \label{Y1st2ndMOD3} M := \frac{{\rm II}}{\prod_{j = 1}^3 \langle\xi_j\rangle m(\xi_j) } = \frac{\xi_1^3m^2(\xi_1) + \alpha \xi_2^3m^2(\xi_2) + \alpha \xi_3^3m^2(\xi_3)}{\prod_{j = 1}^3 \langle\xi_j\rangle m(\xi_j)(\xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3)}. \end{equation} Without loss of generality, assume $N_2 \geq N_3$. \noindent $\bullet$ {\bf Case (1):} $N_j \ll N$, $j = 1, 2, 3$ $\Longrightarrow m(\xi_j) = 1$. In this case, we have $M = \langle N_1\rangle^{-1}\langle N_2\rangle ^{-1}\langle N_3\rangle ^{-1}$. Then, \eqref{Y1st2ndMOD1} follows from \eqref{Y1st2ndMOD2}. For the following argument, recall that the resonance $\Gamma_{\xi_1}(\xi_2) = \xi_1^3 + \alpha \xi_2^3 + \alpha \xi_3^3 \sim 0$ occurs only if $ N_1 \sim N_2 \sim N_3$. Also, note that $s \in (s_0, 1)$ with $s_0 \geq \frac{1}{2}$. In the following, we present the proof only for $\alpha \in (0, 1)$. From \eqref{c_1}, we have $c_1 > 1$ when $\alpha \in (0, 1)$, thus ruling out the possibility of the resonance in Case (2) below. When $\alpha \in (1, 4)$, one can basically repeat the same proof. (In this case, we have $c_1 \in (\frac{1}{2}, 1)$, which can be used to rule out the possibility of the resonance in Case (3).) \noindent $\bullet$ {\bf Case (2):} $N_1 = \max N_j \gtrsim N$ $\Longrightarrow N_2 \sim N_1 \gtrsim N$.\\ $\circ$ Subcase (2.a): $N_3 \ll N$ $\Longrightarrow m(\xi_3) = 1$. In this case we have no resonance and thus we have $|\Gamma_{\xi_1}(\xi_2)| \sim N_1^3$. Note that $ N_3^3 m^2(N_3) \ll N^3 \lesssim N_1^{1+2s}N^{2-2s} = N_1^3 m^2(N_1). $ Then, we have $ |M| \lesssim \frac{1}{N_1N_2N_3}$. Hence, \eqref{Y1st2ndMOD1} follows from \eqref{Y1st2ndMOD2}. \noindent $\circ$ Subcase (2.b): $N_3 \gtrsim N$. The resonance occurs only at $\xi_2 = -c_1 \xi_1$ or $\xi_3 = -c_1 \xi_1$. Since $c_1 > 1$, this implies $ |\xi_2| > |\xi_1|$ or $ |\xi_3| > |\xi_1|$. This can not happen in this case since $N_1 = \max N_j$. Thus, we have $|\Gamma_{\xi_1}(\xi_2)| \sim N_1^3$, and \[ |M| \lesssim \frac{N_1^3 m^2(N_1)}{N_1N_2N_3 m(N_1)m(N_2)m(N_3) |\Gamma_{\xi_1}(\xi_2)|} \sim \frac{1}{N^{1-s} N_1N_2N_3^s } \leq \frac{1}{N_1N_2N_3^s }. \] Then, \eqref{Y1st2ndMOD1} follows from \eqref{Y1st2ndMOD2} since $s > 1/2$. \noindent $\bullet$ {\bf Case (3):} $N_2 = \max N_j$.\\ $\circ$ Subcase (3.a): $N_2 \sim N_1 \geq N_3 $. If $N_3 \ll N$, then we have no resonance and the proof follows from Subcase (2.a). Now, assume $N_3 \gtrsim N$. In this case, the resonance occurs over the set $B = \{ |\xi_2 + c_1 \xi_1| \leq \frac{1}{\lambda}\}$. First, we consider the contribution of ${\rm II}$ on $B^c$. In this case, we have $|\Gamma_{\xi_1}(\xi_2)| \gtrsim \frac{N_1^2}{\lambda} $ on $B^c$, since $\Gamma_{\xi_1}(\xi_2)$ is a parabola (in $\xi_2$) such that $|\Gamma_{\xi_1}(c_1\xi_1)| = 0$ and $|\frac{d}{d \xi_2} \Gamma_{\xi_1}(c_1\xi_1)| \sim \xi_1^2$. Note that for $\lambda \sim N^{(1-s)\theta(s, s_0)} $ given in \eqref{YLD}, we have \begin{equation} \label{Y1st2ndMOD6} \frac{\lambda}{N^{1-s}} = N^{(1-s) (\theta(s, s_0) - 1)}\lesssim 1, \end{equation} since $ \theta(s, s_0) - 1 = \frac{-1 -2s +2s_0+}{3 + 2s - 2s_0 - } < 0$ for $ s > s_0$ and $s < 1$. Then \[ |M\chi_{B^c} | \lesssim \frac{N_1^3 m^2(N_1)\chi_{B^c} }{N_1N_2N_3 m(N_1)m(N_2)m(N_3) \Gamma_{\xi_1}(\xi_2)} \sim \frac{\lambda}{N^{1-s} N_1N_3^s } \sim \frac{1}{N_1^{1/2}N_2^{1/2}N_3^s}, \] and \eqref{Y1st2ndMOD1} follows from \eqref{Y1st2ndMOD2} since $s > 1/2$. Now, we consider the contribution of ${\rm II}$ on $B$. After adjusting \eqref{lowerbdC} and \eqref{lowerbdD} to the period $2\pi \lambda$, we have $|\Gamma_{\xi_1}(\xi_2)| \gtrsim \frac{N_1^{2-2s_0-} }{\lambda^{1+ 2s_0+}}$ on $B$. Also, on $B = \{ |\xi_2 + c_1 \xi_1| \leq \frac{1}{\lambda}\}$, there are only 2 values of $\xi_2$ and $\xi_3$ for each $\xi_1$, which we can write as $ \xi_2 = -c_1 \xi_1 + \delta_{\xi_1} \in \mathbb{Z}/\lambda$ and $ \xi_3 = -c_2 \xi_1 - \delta_{\xi_1} \in \mathbb{Z}/\lambda$ with $ |\delta_{\xi_1}| < \tfrac{1}{\lambda}$. Hence, there are only two terms in $\sum_{|\delta_{\xi_1}| \leq \frac{1}{\lambda}}$, and we have \begin{equation} \label{Y1st2ndMOD4} \begin{aligned} |\Lambda_3( \chi_B; u, v, v)| & = \Big| \Big(\frac{1}{2 \pi \lambda} \Big)^2\sum_{\xi_1 \in \mathbb{Z}/\lambda} \sum_{|\delta_{\xi_1}| \leq \frac{1}{\lambda}} \widehat{u}(\xi_1) \widehat{v}(-c_1 \xi_1 + \delta_{\xi_1}) \widehat{v}(-c_2 \xi_1 - \delta_{\xi_1}) \Big| \\ & \lesssim \lambda^{-1} \| \widehat{u} \|_{L^\infty_\xi} \| \widehat{v} \|_{L^2_\xi}^2 \leq \lambda^{-1} \| u \|_{L^1_x} \| v \|_{L^2_x}^2 \lesssim \lambda^{-1/2} \| u \|_{L^2_x} \| v \|_{L^2_x}^2, \end{aligned} \end{equation} where we used H\"older inequality (in $x$) in the last step. Using \eqref{Y1st2ndMOD4}, it suffices to prove $|M| \lesssim \lambda^{\frac{1}{2}}$. Since $N_1 \sim N_2 \sim N_3$ in this case, the numerator of $|M|$ is $ \sim N_1^3m^2(N_1)$. Then, we have \[ |M\chi_{B} | \lesssim \frac{N_1^3 m^2(N_1)\chi_{B} }{N_1^3 m^3(N_1) \Gamma_{\xi_1}(\xi_2)} \sim \frac{N_1^{1-s}\lambda^{1+2s_0+}}{N^{1-s} N_1^{2-2s_0-} } \lesssim \frac{\lambda^{1+2s_0+}}{ N^{2-2s_0-} }, \] since $1+s - 2s_0- > 0$ and $N_1 \gtrsim N$. Then, we have $|M\chi_B| \lesssim \lambda^{1/2}$ once we show \begin{equation} \label{Y1st2ndMOD5} \lambda^{\frac{1}{2}+ 2s_0+} \lesssim N^{2-2s_0-}. \end{equation} Since $s > s_0+$, we have $\text{RHS of } \eqref{Y1st2ndMOD5} > N^{2-2s}$. From \eqref{YLD}, we have \[ \text{LHS of } \eqref{Y1st2ndMOD5} \sim N^{(\frac{1}{2}+ 2s_0+) \frac{2-2s}{3+2s-2s_0-}}. \] Thus, \eqref{Y1st2ndMOD5} follows once we show $\frac{1}{2} + 2s_0+ \leq 3+2s-2s_0-$, which follows from $s_0+ < s < 1$. \noindent $\circ$ Subcase (3.b): $N_2 \sim N_3 \geq N_1 $. If $N_1 \ll N$, then we have $m(\xi_1) = 1$. Also, we have $|\Gamma_{\xi_1}(\xi_2)| \gtrsim N_1 N_2^2$ in this case. Then, we have $|M| \lesssim \frac{1}{N_1^2 N_2^{1/2} N_3^{1/2}}. $ Hence, \eqref{Y1st2ndMOD1} follows from \eqref{Y1st2ndMOD2}. Now, assume $N_1 \gtrsim N$. In view of Subcase (3.a), we need to consider this case only over the nonresonant set $B^c$. As before, we have $|\Gamma_{\xi_1}(\xi_2)| \gtrsim \frac{N_2^2}{\lambda} $ on $B^c$. Then, using \eqref{Y1st2ndMOD6}, we have \[ |M\chi_{B^c} | \lesssim \frac{N_2^3 m^2(N_2)\chi_{B^c} }{N_1N_2N_3 m(N_1)m(N_2)m(N_3) \Gamma_{\xi_1}(\xi_2)} \sim \frac{\lambda}{N^{1-s} N_1^s N_3 } \sim \frac{1}{N_1^s N_2^{1/2}N_3^{1/2}}, \] and \eqref{Y1st2ndMOD1} follows from \eqref{Y1st2ndMOD2} since $s > 1/2$. \end{proof} \section{Appendix: Proof of Lemma \ref{LEM:XXXTRILINEAR}} In this appendix, we present the proof of \eqref{XTAO2} and \eqref{XTAO3}, from which one can deduce Lemma \ref{LEM:XXXTRILINEAR}. Before going to the proof, we first list the linear estimates for $X^{s,b}_{j}$, $j = 1, 2$ defined in \eqref{XXSB}. c.f. \cite{CKSTT5}. For $s \geq s'$ and $b \geq b'$, $\|f\|_{X^{s',b'}_j} \leq \|f\|_{X^{s,b}_j}$. From spatial Sobolev inequality, we have \begin{equation} \label{AESTIMATE2} \|f\|_{L^2_t L^p_x} \lesssim \|f\|_{L^2_t H^s_x}= \|f\|_{X^{s,0}_j} \end{equation} for $0 \leq s < \frac{1}{2}$ and $2 \leq p \leq \frac{2}{1-2s}$ or for $s > 1/2 $ and $2 \leq p \leq \infty$. From spatial Sobolev inequality and Sobolev embedding in time, we have \begin{equation} \label{AESTIMATE3} \|f\|_{L^\infty_t L^p_x} \lesssim \|f\|_{L^\infty_t H^s_x} \lesssim \|f\|_{X^{s,\frac{1}{2}+}_j} \end{equation} for the same range of $s$ and $p$. In particular, we have $\|f\|_{L^\infty_{x, t}} \lesssim \|f\|_{X^{\frac{1}{2}+, \frac{1}{2}+}_j}$. By interpolating with the previous estimates, we have $\|f\|_{L^q_tL^r_x } \lesssim \|f\|_{X^{\frac{1}{2}+, \frac{1}{2}+}_j}$ for all $2 \leq q, r \leq \infty$. Interpolating this with \eqref{AESTIMATE2} for $s = 0$ and $p = 2$, we have \begin{equation} \label{AESTIMATE6} \|f\|_{L^q_tL^r_x } \lesssim \|f\|_{X^{\frac{1}{2} -\delta, \frac{1}{2}-\delta}_j} \end{equation} for all $0 < \delta < \frac{1}{2} $ and $ 2 \leq q, r <\frac{1}{\delta}$. From Bourgain's $L^6_{x, t}$ Strichartz estimate \cite{BO1}, we have \begin{equation} \label{AESTIMATE7} \|f\|_{L^6_{x, t}} \lesssim \|f\|_{X^{0+, \frac{1}{2}+}_j}. \end{equation} Interpolating this with \eqref{AESTIMATE6}, we obtain $\|f\|_{L^q_{x, t}} \lesssim \|f\|_{X^{\delta, \frac{1}{2}}_j}$ for all $0 < \delta < \frac{1}{2}$ and $2\leq q < \frac{6}{1-2\delta}$. In particular when $ q = 6$, we obtain \begin{equation} \label{COR:XL6} \|f\|_{L^6_{x, t}} \lesssim \|f\|_{X^{0+, \frac{1}{2}}_j} \end{equation} If we interpolate \eqref{AESTIMATE7} with Bourgain's $L^4_{x, t}$ Strichartz estimate \cite{BO1} \begin{equation} \label{AESTIMATE9} \|f\|_{L^4_{x, t}} \lesssim \|f\|_{X^{0, \frac{1}{3}}_j}, \end{equation} then we have, for $4 0$. Thus, we have $|l| \lesssim |\xi_4|^3$. Then, we have $|\lambda| \lesssim |\xi_4|^3$ from \eqref{X1P7} and $M_{\rm max} \ll |\xi_4|^3$. Also, we have $N_3 \ll |\xi_4|^\frac{1}{6}$ in this case since $N_3^{50} \ll \xi_1, \xi_2 \lesssim |\xi_4|$. Hence, by Lemma \ref{LEM:XDIVISORS}, we have \[ \# \{ (n, l) \in \mathbb{Z}^2: |l - \lambda | \lesssim M_{\rm max}, |n + \xi_4 | \lesssim N_3, n| l \big\} \lesssim \min (N_3, M_{\rm max}) \leq N_3^{1/2} M_{\rm max}^{1/2}. \] i.e., there are at most $\sim N_3^{1/2} M_{\rm max}^{1/2}$ values of $n = \xi_1 + \xi_2$ which can contribute to \eqref{X1P5}. Note that for each fixed value of $n = \xi_1 + \xi_2$, we have \[ \# \{ l \in \mathbb{Z} : |l - \lambda | \lesssim M_{\rm max}, \, n|l\} \leq 1. \] Otherwise, if $n|l$ and $n|l'$ with $l, l' = \lambda + O(M_{\rm max})$, then $l = q n$ an $l' = q'n$ for some $q, q' \in \mathbb{Z}$. Then, we have $|l-l'| = |q - q'| n \gtrsim |\xi_4| \gg M_{\rm max}^{50}$ unless $q = q'$. Thus, if $n|l$ and $|l - \lambda| \lesssim M_{\rm max}$, then $l$ is uniquely determined. On the other hand, we have \[ l = -3\xi_{12}\xi_{14}\xi_{24} = -3 n (\xi_1\xi_2+ n\xi_4 + \xi_4^2). \] i.e., the value of $\xi_1\xi_2$ is uniquely determined as well. Hence, , $\xi_1$ and $\xi_2$ are uniquely determined (up to permutation). Therefore, the contribution to \eqref{X1P5} is at most $\sim N_3^{1/2} M_{\rm max}^{1/2}$ in this case. Now, we consider the case when $j_1, j_2, j_3$ are not all equal. \noindent $\circ$ Subcase (2.b): $j_1 = 2$, $j_2 = j_3= 1$ (The proof for the cases $j_1 \ne j_2 = j_3$ or $j_2 \ne j_1 = j_3$ is similar.) In this case, we have \begin{equation} \label{X1Q3} \sum_{k =1}^3 (-1)^{j_k}\xi_k = \xi_1 - \xi_2 - \xi_3 = 2\xi_1 + \xi_4 = - 2\xi_{23} - \xi_4. \end{equation} Thus \begin{equation} \label{X1Q4} \tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) = \tau_4 - \xi_4^3 + \big(\tfrac{p}{2} + L_1 \big)\xi_4 + 3 \xi_{12}\xi_{13}\xi_{14} + 2L_1 \xi_1. \end{equation} Let $ l = 3 \xi_{12} \xi_{13} \xi_{14}$ and $\lambda = - \tau_4 + \xi_4^3 - \big(\frac{p}{2} + L_1 \big)\xi_4$. Note that $\lambda$ is fixed as before. Then \begin{equation*} \big| \tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) \big| \lesssim M_{\rm max} \Longleftrightarrow | l - (\lambda - 2L_1\xi_1) | \lesssim M_{\rm max}. \end{equation*} Fix $\xi_1$. Then, by Subcase (2.a) or \eqref{XDIVISORS1}, for each fixed $n = \xi_1 + \xi_2 \sim -\xi_4$, we have \begin{equation} \label{X1Q1} \# \{ l \in \mathbb{Z} : |l - (\lambda- 2L_1\xi_1) | \lesssim M_{\rm max}, \, n|l\} \leq 1. \end{equation} Moreover, if there exists $l = 3 \xi_{12} \xi_{13} \xi_{14}$ such that $n: = \xi_{12} | l$, then $\xi_1$ and $\xi_2$ are uniquely determined (up to permutation) as before. The issue here is that there may be $\xi_1' \ne \xi_1, \xi_2$ such that $n = \xi_1 + \xi_2 = \xi_1' + \xi_2' $ and $n |l' \ne l$ where $l ' = 3\xi_{12}'\xi_{13}' \xi_{14}'$ and $|l' - (\lambda- 2L_1\xi_1') | \lesssim M_{\rm max}$. We need to show that this can not happen for fixed $\xi_4$. If there exists such $\xi_1'$ and $l'$, then, on the one hand, we have \begin{equation} \label{X1Q2} |l - l' + 2L_1(\xi_1 - \xi_1') | \leq |l - (\lambda- 2L_1\xi_1) | + |l' - (\lambda- 2L_1\xi_1') | \lesssim M_{\rm max}. \end{equation} On the other hand, \begin{align*} l - l' + 2L_1(\xi_1 - \xi_1') &= - 3 n (\xi_1+\xi_4)(\xi_2 + \xi_4) + 3 n (\xi_1'+\xi_4')(\xi_2' + \xi_4') + 2L_1(\xi_1 - \xi_1') \\ &= 3n(\xi_1'\xi_2' - \xi_1\xi_2) + 2L_1(\xi_1 - \xi_1')\\ &= (3n(\xi_1 + \xi_1' - n) + 2L_1)(\xi_1 - \xi_1'), \end{align*} since $n = \xi_{12} = \xi_{12}'$ and $\xi_4$ is fixed. Clearly, the second factor is not $0$. Also, $\xi_1 + \xi_1' - n \ne 0$ since $\xi_1' \ne \xi_2$. Then, it follows that $|l - l' + 2L_1(\xi_1 - \xi_1') | \gg M_{\rm max}^{50} $, since $n = \xi_{12} \gg M_{\rm max}^{50}$ and $L_1 = 1/2\sqrt{ p_1^2 + 4q_1^2}$ is a fixed number, This clearly contradicts with \eqref{X1Q2}. Hence, for fixed $n = \xi_{12}$, there exists at most one value of $\xi_1$ contributing \eqref{X1Q1}. Therefore, the contribution to \eqref{X1P5} is at most $O(N_3)$ in this case since $|n + \xi_4 | \lesssim N_3$. Next, we show that the contribution to \eqref{X1P5} is at most $O(M_{\rm max})$. Then, we can conclude that it is at most $O(N_3^{1/2} M_{\rm max}^{1/2})$. In this case, we need to assume $L_1 \in \mathbb{Q}$. i.e., $L_1 = \frac{a}{b}$ for some $a, b \in \mathbb{Z}$. Then, from \eqref{X1Q3} and \eqref{X1Q4}, we have \begin{equation} \label{X1Q7} \big| \tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) \big| \lesssim M_{\rm max} \Longleftrightarrow | \widetilde{l} - \widetilde{\lambda} | \lesssim M_{\rm max}, \end{equation} where \begin{equation} \label{X1Q9} \widetilde{l} = \xi_{23}(3b \xi_{12}\xi_{13} + 2a) \quad \text{and} \quad \widetilde{\lambda} = - b \lambda - 2 a \xi_4. \end{equation} Note that $\widetilde{\lambda}$ depends only on $\xi_4$ and $\tau_4$, and thus it is fixed. Also, note that $|\widetilde{l}| \sim \xi_1^2\xi_2$, since $\xi_1 \geq \xi_2 \gg \xi_3^{50}$. We consider the following two cases: $\xi_1 \gg \xi_2^2$ and $\xi_1 \lesssim \xi_2^2$. \noindent $\circ$ Subsubcase (2.b.i): $\xi_1 \gg \xi_2^2$. Let $n = 3 b \xi_{12}\xi_{13} + 2a$ and $\zeta = 3 b \xi_4^2 + 2a$. Then, we have $ |n - \zeta | = 3b | \xi_{4}( \xi_2 + \xi_{3}) + \xi_{2}\xi_3| \lesssim \xi_1 \xi_2$. Now, we claim that, for fixed $|\widetilde{l}| \sim \xi_1^2 \xi_2$, we have \begin{equation} \label{X1Q5} \# \{ n \in \mathbb{Z} : |n - \zeta| \lesssim \xi_1 \xi_2, \ n | \widetilde{l} \} \leq 1. \end{equation} Suppose not. i.e., there exist two integers $n_1, n_2$ in the above set. Then, by Lemma \ref{LEM:XDIVISION}, we would have \begin{equation} \label{X1Q6} \prod_{j = 1}^2 n_j \big| \, \widetilde{l} \gcd (n_1, n_2). \end{equation} Now, left-hand side of \eqref{X1Q6} is $O(\zeta^2) = O(\xi_4^4) = O(\xi_1^4)$. From \eqref{X1Q5}, we have $\gcd (n_1, n_2) \lesssim \xi_1 \xi_2$. Then the right-hand side of \eqref{X1Q6} $\lesssim\widetilde{l} \, \xi_1\xi_2 \sim \xi_1^3 \xi_2^2 \ll \xi_1^4 \sim$ LHS of \eqref{X1Q6}, which is a contradiction. Hence, for fixed $\widetilde{l}$, there's at most one value for $n = 3b \xi_{12}\xi_{13} + 2a$. Once $\widetilde{l}$ and $n$ are fixed, then $\xi_{23}$ is also determined since $\widetilde{l} = \xi_{23} n$. Then, $\xi_1 = -\xi_{23} - \xi_4$ is determined as well since $\xi_4$ is fixed. Now, note that \[ \widetilde{l} = \xi_{23}(3b \xi_{34}\xi_{24} + 2a) = 3b\xi_{23}(\xi_2 \xi_3 + \xi_{23}\xi_4 + \xi_4^2) + 2a \xi_{23}. \] This implies that $\xi_2 \xi_3$ is also determined, and thus $\xi_2$ and $\xi_3$ are uniquely determined since $\xi_2 \gg \xi_3^{50}$. Hence, $\xi_1, \xi_2, \xi_3$ are determined for fixed $\widetilde{l}$. In view of \eqref{X1Q7}, there are $O(M_{\rm max})$ many possible values for $\widetilde{l}$. Therefore, the contribution to \eqref{X1P5} is at most $O(M_{\rm max})$. \noindent $\circ$ Subsubcase (2.b.ii): $\xi_1 \lesssim \xi_2^2$. Recall that we'd like to prove \begin{equation} \label{X1Q8} \| \chi_A \|_{[4; \mathbb{Z}\times \mathbb{R}]} \lesssim (N_1M_1M_2M_3)^{1- \frac{2}{100}-}, \end{equation} where $A$ is as in \eqref{X1P9}. Note that \[ \chi_{\xi_{23} = \xi_2 + O(N_3)} \leq \sum_{K \in c \mathbb{Z}} \chi_{\xi_{23} = K + O(N_3)} \chi_{\xi_{2} = K + O(N_3)} \] for some $c \sim O(N_3)$. Thus, %LHS of \eqref{X1Q8} \[ \| \chi_A \|_{[4; \mathbb{Z}\times \mathbb{R}]} \leq \sum_{K \in c \mathbb{Z}} \chi_A \chi_{\xi_{23} = K + O(N_3)} \chi_{\xi_{2} = K + O(N_3)}. \] Without loss of generality, assume $|K| \gg (N_3 M_1M_2M_3)^{50}$. Let \begin{align*} m_K (\xi_{23}, \xi_{2}, \tau_{23}, \tau_2) & = \chi_A \chi_{\xi_{23} = K + O(N_3)} \chi_{\xi_{2} = K + O(N_3)} \\ & = \chi_A\chi_ {\{K + O(N_3)\}}(\xi_{23})\chi_ {\{K + O(N_3)\}}(\xi_{2}). \end{align*} Then, by letting $J_1 = \{23\}$ and $J_2 = \{2\}$, we have \[ \# \{ K \in c \mathbb{Z}: (\xi_{23}, \xi_{2}, \tau_{23}, \tau_2) \in \mathop{\rm supp}_{J_k} (m_K) \} \lesssim 1,\ k = 1, 2 \] since $c \sim O(N_3)$. Hence, by Schur's test \cite[Lemma 3.11]{TAO}, it is sufficient to show \begin{equation*} \sup_{K}\|\chi_A \chi_{\xi_{23} = K + O(N_3)} \chi_{\xi_{2} = K + O(N_3)} \|_{[4; \mathbb{Z}\times \mathbb{R}]} \lesssim (N_1M_1M_2M_3)^{1- \frac{2}{100}-}. \end{equation*} Using the notation in \eqref{X1Q9}, we have $\widetilde{l} = O(\xi_1^2\xi_2)$ with $|\widetilde{l} - \widetilde{\lambda}| \lesssim M_{\rm max}$. Suppose that, for fixed $\widetilde{l}$, there exist 7 values of $n = \xi_{23}$ with $n | \widetilde{l}$. i.e., $n_j $ is of the form $n_j = \xi_{23}$ for some $\xi_2$ and $ \xi_3$, $j = 1, \dots, 7$. Then, by the constraint $|\xi_{23} - K| \lesssim N_3$, we have $ \gcd(n_j, n_k) \leq | \xi_{23} - \xi_{23}'| \lesssim N_3$. Then, by Lemma \ref{LEM:XDIVISION}, we have \begin{equation} \label{X1Q10} \prod_{j = 1}^7 n_j \big| \, \widetilde{l} \prod_{1\leq j < k \leq 7} \gcd (n_j, n_k). \end{equation} On the one hand, we have the LHS of \eqref{X1Q10} $\sim \xi_2^7 \gtrsim \xi_1^{3.5}$. On the other hand, we have RHS of \eqref{X1Q10} $\sim \xi_1^2 \xi_2 N_3^{21} \leq \xi_1^3 N_3^{21} \ll \xi_1^{3.5},$ which is a contradiction to \eqref{X1Q10}. Therefore, for each fixed $\widetilde{l}$, there can be at most 6 such $n = \xi_{23}$ (which determines $\xi_1, \xi_2, \xi_3$ uniquely as before.) Since there are at most $O(M_{\rm max})$ many possible values for $\widetilde{l}$, we conclude that the contribution to \eqref{X1P5} is at most $O(M_{\rm max})$. \noindent$\circ$ Subcase (2.c): $j_1 = j_2 = 1,$ $ j_3 = 2$ (The proof for the case $j_1 = j_2 = 2,$ $ j_3 = 1$ is similar). In this case, we have $ \sum_{k =1}^3 (-1)^{j_k}\xi_k = -\xi_1 - \xi_2 + \xi_3 = -2\xi_{12} - \xi_4$. Then, with $L_1 = \frac{a}{b}$, we have $\tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) = \tfrac{1}{b}(l - \lambda)$, where \begin{equation} \label{X1R1} l = \xi_{12}(3b\xi_{13}\xi_{14} - 2a) \quad \text{and} \quad \lambda = -b\tau_4 + b\xi_4^3 - \big( \tfrac{pb}{2} + a\big)\xi_4. \end{equation} Thus \begin{equation} \label{X1R2} \big| \tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) \big| \lesssim M_{\rm max} \Longleftrightarrow | l - \lambda | \lesssim M_{\rm max}. \end{equation} Note that $l \sim \xi_1^2\xi_2 \ll M_{\rm max}^{50}$. Thus, without loss of generality, we may assume $\lambda \sim \xi_1^2\xi_2 \leq \xi_1^3$. Let $n = \xi_{12}$. Then, $n| l $ and $| n + \xi_4 | \lesssim N_3 \ll \xi_4^{1/50}$. Then, by Lemma \ref{LEM:XDIVISORS}, we have \begin{equation} \label{X1R3} \# \{ l \in \mathbb{Z} : |l - \lambda | \lesssim M_{\rm max}, \, n = \xi_{12} | l\} \leq 1 \end{equation} for each fixed $n = \xi_{12}$, and \begin{equation} \label{X1R4} \# \{ n \in \mathbb{Z} : |n + \xi_4 | \lesssim N_3, \, n|l\} \leq 3, \end{equation} for each fixed $l$ satisfying \eqref{X1R1} and \eqref{X1R2}. Hence, it follows that the contribution to \eqref{X1P5} is at most $O(N_3)$ from \eqref{X1R3} and that it is at most $O(M_{\rm max})$ from \eqref{X1R4}. \begin{remark} \label{rmk6.3} \rm We like to point out the issue when $L_1 \notin \mathbb{Q}$. For example, in the second half of Subcase (2.b), we need to count the number of $(\xi_1, \xi_2) \in \mathbb{Z}^2$ satisfying $|l + 2L_1\xi_1- \lambda | \lesssim M_{\rm max}$. Let $\eta = l + 2L_1\xi_1$. We can show that there's exactly 1 value of $l$ for each fixed $\eta$. (Suppose $\eta = l + 2L_1\xi_1 = l' + 2L_1 \xi_1'$. Then, we have $l - l' = 2L_1(\xi_1' - \xi_1)$. If $L_1 \notin \mathbb{Q}$, then we must have $l = l'$ and $\xi_1 = \xi_1'$ since $l - l' \in \mathbb{Z}$ and $2L_1(\xi_1' - \xi_1) \notin \mathbb{Q} \setminus \{0\}$.) From \eqref{XDIVISORS2}, we have $ \# \{ (\xi_1, \xi_2) \in \mathbb{Z}^2 : |\xi_{12} + \xi_4 | \lesssim N_3, \xi_{12} | l \} \lesssim 3$ for each $l$ with $|l| \lesssim |\xi_4|^3$. Hence, if we can express the number of possible values for $\eta$ with $| \eta - \lambda | \lesssim M_{\rm max}$ in terms of some power of $M_{\rm max}$, we can conclude the proof for $L_1 \notin \mathbb{Q}$ as well. However, $\eta$ is in $\mathbb{Z} + L_1 \mathbb{Z}$, which is dense in $\lambda + O(M_{\rm max})$, and we do not know how to count. As we've seen, our counting argument strongly depends on the divisions among the integers. \end{remark} \noindent $\bullet$ {\bf Part 2:} Proof of \eqref{XTAO3}. The proof of \eqref{XTAO3} basically follows from \cite{CKSTT5}. We include the argument for the sake of completeness. Note that \eqref{XTAO3} is equivalent to showing \begin{equation} \label{X1N1} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim \|u_1\|_{X_{j_1}^{0,\frac{1}{2}}} \|u_2\|_{X_{j_2}^{0,\frac{1}{2}}}\|u_3\|_{X_{j_3}^{0,\frac{1}{2}}}\|u_4\|_{X_{j_4}^{\frac{1}{2}, 0}} \end{equation} for $j_1, j_2, j_3, j_4 \in \{1, 2\}$. First, note that by $L^4_{x, t}, L^4_{x, t}, L^\infty_t L^2_x, L^2_t L^\infty_x$ H\"older inequality along with \eqref{AESTIMATE9}, \eqref{AESTIMATE3}, and \eqref{AESTIMATE2}, we have \begin{equation} \label{X1N2} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim \|u_1\|_{X_{j_1}^{0,\frac{1}{3}}} \|u_2\|_{X_{j_2}^{0, \frac{1}{3}}}\|u_3\|_{X_{j_3}^{0,\frac{1}{2}+}}\|u_4\|_{X_{j_4}^{\frac{1}{2}+, 0}}. \end{equation} Then, it is sufficient to show that \begin{equation} \label{X1N3} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim \|u_1\|_{X_{j_1}^{0,\frac{1}{2}+}} \|u_2\|_{X_{j_2}^{0,\frac{1}{2}+}}\|u_3\|_{X_{j_3}^{0,\frac{1}{2}+}} \|u_4\|_{X_{j_4}^{\frac{1}{2}-\frac{1}{100}, 0}}. \end{equation} Once we prove \eqref{X1N3}, then by interpolating \eqref{X1N2} and \eqref{X1N3}, we have \begin{equation} \label{X1N4} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim \|u_1\|_{X_{j_1}^{0,\frac{1}{2}-}} \|u_2\|_{X_{j_2}^{0,\frac{1}{2}-}}\|u_3\|_{X_{j_3}^{0,\frac{1}{2}+}}\|u_4\|_{X_{j_4}^{\frac{1}{2}-, 0}}. \end{equation} Then, interpolating \eqref{X1N4} with \eqref{X1N2} after switching $u_1$ and $u_3$, we obtain \begin{equation*} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim \|u_1\|_{X_{j_1}^{0,\frac{1}{2}-}} \|u_2\|_{X_{j_2}^{0,\frac{1}{2}-}}\|u_3\|_{X_{j_3}^{0,\frac{1}{2}-}} \|u_4\|_{X_{j_4}^{\frac{1}{2}-, 0}}, \end{equation*} which is sufficient to prove \eqref{X1N1}. Hence, we shall focus on proving \eqref{X1N3}. Note that \eqref{X1N3} follows once we prove \begin{equation} \label{X1N5} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim N_4^{\frac{1}{2}-\frac{1}{100}-} \|u_1\|_{X_{j_1}^{0, \frac{1}{2}+}} \|u_2\|_{X_{j_2}^{0,\frac{1}{2}+}} \|u_3\|_{X_{j_3}^{0,\frac{1}{2}+}}\|u_4\|_{L^2_{x, t}}. \end{equation} Note that among $j_1, j_2, j_3 \in \{1, 2\}$, there are exactly two of them taking the same value or all of them take the same value. Thus, without loss of generality, assume $j_1 = j_2$. As in Part 1, assume $\xi_1$ and $\xi_2$ have the same sign. Moreover, assume $\xi_1 \geq \xi_2 \geq 0$. \noindent $\bullet$ {\bf Case (1):} $\langle\xi_3\rangle \lesssim N_4^{10}$ (The same proof works when $\langle\xi_1\rangle \lesssim N_4^{10}$ or $\langle\xi_2\rangle \lesssim N_4^{10}$). In this case, it is sufficient to show that \begin{equation*} \Big| \int u_1u_2u_3u_4 dx dt \Big| \lesssim \|u_1\|_{X_{j_1}^{0,\frac{1}{2}+}} \|u_2\|_{X_{j_2}^{0,\frac{1}{2}+}}\|u_3\|_{X_{j_3}^{0+, \frac{1}{2}+}}\|u_4\|_{X_{j_4}^{\frac{1}{2}-\frac{1}{100}-, 0}}. \end{equation*} This follows from $L^4_{x, t}, L^\infty_t L^2_x, L^6_{x, t}, L^\frac{12}{7}_t L^{12}_x$ H\"older inequality along with \eqref{AESTIMATE9}, \eqref{AESTIMATE3}, \eqref{COR:XL6}, and \eqref{AESTIMATE2}. \noindent $\bullet$ {\bf Case (2):} $\langle\xi_1\rangle, \langle\xi_2\rangle, \langle\xi_3\rangle \gg N_4^{10}$. By averaging arguments \cite[Proposition 5.1 with $b_2 = 0$]{TAO}, we can assume $\lambda_k = |\tau_k - d_{j_k}(\xi_k)| \sim 1$, $k = 1, 2, 3$. i.e., we can assume the Fourier transforms of $u_1, u_2, u_3$ are supported in $ \Omega_j = \{ (\xi, \tau)\in \mathbb{Z}\times \mathbb{R} : |\xi| \gg N_4^{10}, \tau = d_{j}(\xi) + O(1) \}$, $ j = 1$ or 2. Then, it is sufficient to show \begin{equation} \label{X1N6} \big\| \prod_{k = 1}^3 \chi_{\Omega_{j_k}} (\xi_k, \tau_k) \chi_{|\xi_4|\sim N_4} \big\|_{[4; \mathbb{Z} \times \mathbb{R}]} \lesssim N_4^{\frac{1}{2}-\frac{1}{100}-}. \end{equation} Let \[ \widehat{F}(\xi_{12}, \tau_{12}) = \int_{\substack{\xi_{12} = \xi_1+\xi_2\\ \tau_{12} = \tau_1 + \tau_2}} \chi_{\Omega_{j_1}}(\xi_1, \tau_1) \chi_{\Omega_{j_2}}(\xi_2, \tau_2) \widehat{u_1}(\xi_1,\tau_1)\widehat{u_2}(\xi_2,\tau_2). \] Then $F = \big( \mathbb{P}_{\Omega_{j_1}}u_1\big) \big( \mathbb{P}_{\Omega_{j_2}}u_2\big)$ and \begin{equation} \label{X1N7} \begin{aligned} \|F\|_{L^2_{x, t}} &\leq \| \mathbb{P}_{\Omega_{j_1}}u_1\|_{L^4_{x, t}} \| \mathbb{P}_{\Omega_{j_2}}u_2\|_{L^4_{x, t}} \lesssim \| \mathbb{P}_{\Omega_{j_1}}u_1\|_{X_{j_1}^{0, \frac{1}{3}}} \| \mathbb{P}_{\Omega_{j_2}}u_2\|_{X_{j_1}^{0, \frac{1}{3}}} \\ &\lesssim \|u_1\|_{L^2_{x, t}}\|u_2\|_{L^2_{x, t}} \end{aligned} \end{equation} from H\"older inequality and \eqref{AESTIMATE9}. From \eqref{X1N6} and \eqref{X1N7}, it is sufficient to show \begin{align*} &\int_{\substack{\xi_{12}+\xi_3+\xi_4 = 0\\ \tau_{12}+\tau_3+ \tau_4 = 0}} \widehat{F}(\xi_{12}, \tau_{12}) \widehat{u_3}(\xi_3, \tau_3) \widehat{u_4}(\xi_4, \tau_4) \chi_{|\xi_4| \sim N_4} \chi_{\Omega_{j_3}}(\xi_3, \tau_4)\\ &\lesssim N_4^{\frac{1}{2}-\frac{1}{100}-} \prod_{k = 1}^4 \|u_k\|_{L^2_{x, t}}, \end{align*} or equivalently \begin{equation} \label{X1N8} \big\| \chi_{\Omega} (\xi_{12}, \tau_{12}) \chi_{\Omega_{j_3}} (\xi_3, \tau_3) \chi_{|\xi_4|\sim N_4} \big\|_{[4; \mathbb{Z} \times \mathbb{R}]} \lesssim N_4^{\frac{1}{2}-\frac{1}{100}-}, \end{equation} where $ \Omega = \{(\xi_{12}, \tau_{12}): (\xi_k, \tau_k) \in \Omega_{j_k}, \ k = 1, 2 \}$. Note that we have $\xi_{12} = -\xi_3 + O(N_4)$, and \[ \chi_{\xi_{12} = -\xi_3 + O(N_4)} \leq \sum_{K \in c \mathbb{Z} } \chi_{\xi_{12} = -K + O(N_4)}\chi_{ \xi_3= K + O(N_4)} \] for some $c \sim O(N_4)$. Without loss of generality, assume $|K| \gg N_4^{10}$. Then, we have \[ \text{LHS of } \eqref{X1N8} \leq \sum_{K \in c \mathbb{Z} } m_K(\xi_{12}, \xi_3, \xi_4, \tau_{12}, \tau_3, \tau_4), \] where $m_K(\xi_{12}, \xi_3, \xi_4, \tau_{12}, \tau_3, \tau_4) $ is equal to \[ \chi_{\Omega} (\xi_{12}, \tau_{12}) \chi_{\Omega_{j_3}} (\xi_3, \tau_3) \chi_{|\xi_4|\sim N_4} \chi_{\xi_{12} = -K + O(N_4)}\chi_{ \xi_3= K + O(N_4)} \] Then, by letting $J_1 = \{12\}$ and $J_2 = \{2\}$, we have \[ \# \{ K \in c \mathbb{Z}: (\xi_{12}, \xi_{3},\xi_4, \tau_{12}, \tau_3, \tau_4) \in \mathop{\rm supp}_{J_k} (m_K) \} \lesssim 1, \ k = 1, 2 \] since $c \sim O(N_4)$. Hence, by Schur's test \cite[Lemma 3.11]{TAO}, it is sufficient to show that \begin{align*} &\sup_{K}\|\chi_{\Omega} (\xi_{12}, \tau_{12}) \chi_{\Omega_{j_3}} (\xi_3, \tau_3) \chi_{|\xi_4|\sim N_4} \chi_{\xi_{12} = -K + O(N_4)}\chi_{ \xi_3= K + O(N_4)} \|_{[4; \mathbb{Z}\times \mathbb{R}]} \\ &\lesssim N_4^{\frac{1}{2} -\frac{1}{100}-}. \end{align*} As in Part 1, by Cauchy-Schwarz, it suffices to show that \begin{align*} &\int_{\substack{\xi_{12} + \xi_3 + \xi_4 = 0\\ \tau_{12} + \tau_3 + \tau_4 = 0}} \chi_{\Omega} (\xi_{12}, \tau_{12}) \chi_{\Omega_{j_3}} (\xi_3, \tau_3) \chi_{\xi_{12} = -K + O(N_4)}\chi_{ \xi_3= K + O(N_4)} d \xi_{12} d\xi_3 d\tau_{12} d\tau_3\\ & \lesssim N_4^{1-\frac{2}{100}-} \end{align*} for all $(\xi_4, \tau_4) \in \mathbb{Z} \times \mathbb{R}$ with $|\xi_4|\sim N_4$. Integrating in $\tau_{12}, \tau_3$, we reduce to showing \begin{equation} \label{X1N9} \begin{gathered} \#\big\{ (\xi_1, \xi_2, \xi_3) \mathbb{Z}\in \mathbb{Z}^3: \xi_{12} + \xi_3 + \xi_4 = 0, \xi_{12} = -K +O(N_4), \\ \xi_3 = K + O(N_4), \Big| \tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) \Big| \sim 1 \big\} \lesssim N_4^{1-\frac{2}{100}-} \end{gathered} \end{equation} \noindent $\circ$ Subcase (2.a): $j_1 = j_2 = j_3$. Let $ l = 3 \xi_{12} \xi_{13} \xi_{14} $ and $\lambda = - \tau_4 + \xi_4^3 - \big(\frac{p}{2} + (-1)^{j_k+1}L_1 \big)\xi_4$. Then, we have $\tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) = l -\lambda$ in view of \eqref{X1P6}. Note that $\lambda$ is fixed. In this case, we have $|l| = 3|\xi_{12} \xi_{24} \xi_{14}| \lesssim |\xi_1|^3 \lesssim |K|^3$ since $\xi_{12} = -K + O(N_4)$ with $\xi_1 \geq \xi_2 \geq 0$ and $\xi_1 \gg N_4^{10}$. Then, $|l - \lambda| \sim 1 $ implies $|\lambda | \lesssim |K|^3$. Also, $|\xi_{12} + K| \lesssim N_4 \ll A^\frac{1}{10}$. Hence, by Lemma \ref{LEM:XDIVISORS}, it follows that the contribution to \eqref{X1N9} is at most $O(1)$. \noindent $\circ$ Subcase (2.b): $j_1 = j_2 = 1, j_3 = 2$ (The proof for $j_1 = j_2 = 2, j_3 = 1$ is similar). Let $L_1 = \frac{a}{b}$ for some $a, b \in \mathbb{Z}$, and let $l$ and $\lambda$ be defined as in \eqref{X1R1}. As in Subcase (2.a), we have $|l| \lesssim |K|^3$ and $|\xi_{12} + K| \lesssim N_4 \ll A^\frac{1}{10}$. Then, we have \[ \Big| \tau_4 + \sum_{k = 1}^3 d_{j_k}(\xi_k) \Big| \sim 1 \Longleftrightarrow |l - \lambda |\sim 1. \] Thus $|\lambda | \lesssim |K|^3$. Hence, by Lemma \ref{LEM:XDIVISORS}, it follows that the contribution to \eqref{X1N9} is at most $O(1)$. This completes the proof of Lemma \ref{LEM:XXXTRILINEAR}. \end{proof} \subsection*{Acknowledgements} The author would like to express his sincere gratitude to his Ph.D. advisor, Prof. Andrea R. Nahmod. The author is also grateful for the NSF summer support in 2005-2006 under Prof. Nahmod's grant DMS 0503542. \begin{thebibliography}{99} \bibitem{AR} V. Arnold, \emph{Geometrical Methods in the Theory of Ordinary Differential Equations}, 2nd ed., Springer-Verlag, New York, 1988. \bibitem{BS} J. Bona, R. Scott, \emph{Solutions of the Korteweg-de Vries equation in fractional order Sobolev spaces,} Duke Math. J. 43 (1976) no. 1, 87--99. \bibitem{BO1} J. Bourgain, \emph{Fourier transform restriction phenomena for certain lattice subsets and applications to nonlinear evolution equations II}, Geom. Funct. Anal., 3 (1993), 209--262. \bibitem{CKSTT2} J. Colliander, M. Keel, G. Staffilani, H. Takaoka, T. Tao, \emph{Global well-posedness for Sch\"odinger equations with derivative,} SIAM J. Math. Anal. 33 (2001) no.3, 649--669. \bibitem{CKSTT4}J. Colliander, M. Keel, G. Staffilani, H. Takaoka, T. Tao, \emph{Sharp Global Well-Posedness for KdV and Modified KdV on $\mathbb{R}$ and $\mathbb{T}$,} J. Amer. Math. Soc. 16 (2003), no. 3, 705--749. \bibitem{CKSTT5} J. Colliander, M. Keel, G. Staffilani, H. Takaoka, T. Tao, \emph{Multilinear Estimates for Periodic KdV Equations, and applications,} J. Funct. Anal. 211 (2004), no. 1, 173--218. \bibitem{GG} J. A. Gear, R. Grimshaw, \emph{Weak and Strong interactions between internal solitary waves, } Stud. Appl. Math. 70 (1984), no. 3, 235--258. \bibitem{HW} G. H. Hardy, E. M. Wright, \emph{An introduction to the theory of numbers,} 5th ed., Oxford Univ. Press, New York, 1979. \bibitem{HS} R. Hirota, J. Satsuma, \emph{Soliton solutions of a coupled Korteweg-de Vries equation,} Partial Diff. Eq. 2 (1981), 408--409. \bibitem{KT} T. Kappeler and P. Topalov, \emph{Global well-posedness of KdV in $H\sp {-1}(\mathbb T,\mathbb R)$,} Duke Math. J. 135 (2006), no. 2, 327--360. \bibitem{KPV4} C. Kenig, G. Ponce, and L. Vega, \emph{A bilinear estimate with applications to the KdV equation,} J. Amer. Math. Soc. 9 (1996), no. 2 573--603. \bibitem{MB} A. Majda, J. Biello, \emph{The nonlinear interaction of barotropic and equatorial baroclinic Rossby waves,} J. Atmospheric Sci. 60(2003), no. 15, 1809 --1821. \bibitem{OHTHESIS} C. (T.) Oh, \emph{Well-posedness theory of a one parameter family of coupled KdV-type systems and their invariant measures,} Ph.D. Thesis, University of Massachusetts Amherst (2007). \bibitem{OH1} T. Oh, \emph{Diophantine Conditions in Well-Posedness Theory of Coupled KdV-Type Systems: Local Theory,} preprint. \bibitem{OH3} T. Oh, \emph{Invariant Gibbs measures and a.s. global well-posedness for coupled KdV systems,} to appear in Diff. Integ. Equations. \bibitem{OH4} T. Oh, \emph{Invariance of the white noise for KdV,} to appear in Comm. Math. Phys. \bibitem{TAO} T. Tao, \emph{Multilinear weighted convolution of $L\sp2$-functions, and applications to nonlinear dispersive equations,} Amer. J. Math. 123 (2001), no. 5, 839--908. \end{thebibliography} \end{document}