\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{longtable}
\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2015 (2015), No. 212, pp. 1--16.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu}
\thanks{\copyright 2015 Texas State University - San Marcos.}
\vspace{9mm}}
\begin{document}
\title[\hfilneg EJDE-2015/212\hfil
Frobenius power series solutions]
{A matrix formulation of Frobenius power series solutions using products
of $4\times 4$ matrices}
\author[J. Mandelkern \hfil EJDE-2015/212\hfilneg]
{Jeremy Mandelkern}
\address{Jeremy Mandelkern \newline
Department of Mathematics,
Eastern Florida State College,
Melbourne, FL 32935, USA}
\email{mandelkernj@easternflorida.edu}
\thanks{Submitted January 12, 2015. Published August 17, 2015.}
\subjclass[2010]{34B30, 33C10, 68W30, 34-03, 01A55}
\keywords{Matrix power series; Frobenius theory; Bessel equation}
\begin{abstract}
In Coddington and Levison \cite[p.~119, Thm.~4.1]{coddington55} and
Balser \cite[p.~18-19, Thm.~5]{balser00}, matrix formulations of Frobenius
theory, near a regular singular point, are given using $2\times 2$ matrix
recurrence relations yielding fundamental matrices consisting of two
linearly independent solutions together with their quasi-derivatives.
In this article we apply a reformulation of these matrix methods to the
Bessel equation of nonintegral order. The reformulated approach of this
article differs from \cite{coddington55} and \cite{balser00} by its
implementation of a new ``vectorization'' procedure that yields recurrence
relations of an altogether different form: namely, it replaces the implicit
$2\times 2$ matrix recurrence relations of both \cite{coddington55} and
\cite{balser00} by explicit $4 \times 4$ matrix recurrence relations
that are implemented by means only of $4 \times 4$ matrix products.
This new idea of using a vectorization procedure may further enable the
development of symbolic manipulator programs for matrix forms of the
Frobenius theory.
\end{abstract}
\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\allowdisplaybreaks
\section{Introduction}
The first appearance of a Bessel function was in a 1738 memoir by Daniel
Bernoulli \cite{bernoulli38}, \cite[p.~356]{whittaker02} and since then,
numerous books and papers have been devoted to their properties
(\cite[ch.~XVII]{whittaker02}, \cite{watson22},
\cite[vol.~1, 2, \& 3, ch.~VI, VII, \& XIX]{erdelyi55}, \cite{euler64},
\cite[ch.~1, 4]{schlesinger95}, \cite[ch.~9--11]{abramowitz64},
\cite{frobenius73}, \cite{fuchs66}, \cite{fuchs68}, \cite{bessel24},
\cite{courant24}, \cite{gray22}, \cite{gray95}, \cite{hille69},
\cite{lommel68}, \cite{neumann67}, \cite{olver10},
\cite{schlesinger95}--\cite{schlesinger22}).
In this article, we give a matrix recursion involving products of
$4 \times 4$ matrices by which the power series representations of two
Bessel functions and their quasi-derivatives emerge. This approach appears
to be new: see, for example, the historical observations in Section 8 below.
\section{Matrix forms of Frobenius theory for second order equations}
\label{sec:matforms-frobenius}
Consider the first order system with $A(x)$ being a $2 \times 2$ matrix,
\begin{equation}
X'=\frac{1}{x}A(x)X,\text{ where } A(x)=\sum_{n=0}^{\infty}
A_nx^{n}\text{ is convergent for $|x|0.\label{1.1}
\end{equation}
This system is said to have a regular singular point (RSP) at $x=0$ and
will therefore have two linearly independent solutions defined for
$x \in (-a,0)\cup(0,a)$. We now give the matrix formulation of Frobenius
theory from \cite[p.~119, Thm.~4.1, Eq.~4.4]{coddington55} and
\cite[p.~20, Ex. 2]{balser00} stated as Theorem \ref{thm:1}.
\begin{theorem}\label{thm:1}
Assume $A_0$ has two distinct real eigenvalues $r_1$ and $r_2$, $r_1>r_2$,
$r_1-r_2\neq n$, $n=1,2,3,\ldots$. Let $P$ be the invertible change of
basis matrix for which
\begin{equation}
B_0=P^{-1}\cdot A_0\cdot P=
\begin{bmatrix}
r_1 & 0\\
0 & r_2
\end{bmatrix}.\label{1.2}
\end{equation}
Under the above assumptions, every system \eqref{1.1} has a unique fundamental
matrix solution of the form
\begin{equation}
Y(x)=T(x)x^{B_0},\quad
T(x)=\sum_{n=0}^{\infty}T_nx^{n},\quad
T_0=P,\quad 0<|x|0,\label{2.1}
\end{equation}
we obtain the 1\textsuperscript{st} order system \eqref{1.1} as demonstrated
by Baker in \cite{baker02}, where $\lambda=1$. Thus we set
\begin{equation}
X=\begin{pmatrix}
y\\
xy'
\end{pmatrix}\label{2.2}
\end{equation}
to yield
\begin{equation}
X'=\frac{1}{x}A(x)X,\label{2.3}
\end{equation}
where
\begin{equation}
A(x)=A_0+A_1x+A_2x^2 \label{2.4}
\end{equation}
with
\[
A_0= \begin{bmatrix}
0 & 1\\
\nu^2 & 0
\end{bmatrix},\quad
A_1=\underset{2 \times 2}{O},\quad
A_2= \begin{bmatrix}
0 & 0\\
-\lambda & 0
\end{bmatrix}.
\]
Before we apply Theorem \ref{thm:1} and later Theorem \ref{thm:2} to
system \eqref{2.3}--\eqref{2.4}, we first observe that the assumptions
$r_1-r_2\neq n$, $n=1,2,3,\ldots$, from Theorem \ref{thm:1} and that of
a good spectrum from Theorem \ref{thm:2} prohibit both the nonzero integer
and the nonzero half-integer values of the indicial roots of \eqref{2.1},
$r_{1,2} = \pm \nu$. However, for system \eqref{2.3}--\eqref{2.4}, the
assumptions of Theorems \ref{thm:1} and \ref{thm:2} may be slightly
relaxed to admit the nonzero half-integer values of $\nu$.
This analysis follows from a theorem of
Kaplan \cite[p.~369, Thm.~5, Eq.~9--40]{kaplan58}.
Recall in the scalar Frobenius theory, with the solution ansatz
\[
y(x) = \sum_{n=0}^{\infty}c_nx^{n+r},
\]
\eqref{2.1} gives rise to the two-term recurrence relation
\[
c_n=-\frac{\lambda}{(n+r+v)(n+r-v)}c_{n-2},
\]
hence the theorem of Kaplan applies ensuring that two linearly
independent non-logarithmic power series solutions exist whenever
the indicial roots $r_{1,2}$ of \eqref{2.1} satisfy $(r_1-r_2)/k\neq n$,
$n=1,2,3,\ldots$.
For the Bessel equation \eqref{2.1}, Kaplan's parameter $k$ is 2, so
it follows that the fundamental matrix solutions to \eqref{2.3}--\eqref{2.4},
given by both Theorems \ref{thm:1} and \ref{thm:2}, are valid so long as
$\nu \neq 1,2,3,\ldots$.
Summarizing, the assumptions on the indicial roots $r_{1,2}$, in
Theorems \ref{thm:1} and \ref{thm:2}, are sufficient (but not necessary)
to exclude all the logarithmic cases in the Frobenius theory as required
for Theorems \ref{thm:1} and \ref{thm:2} to apply. Our objective now is
to apply the matrix form of Frobenius theory, as stated in Theorem \ref{thm:1},
to obtain a fundamental matrix solution $Y(x,\lambda)$ to the system form of
the Bessel equation \eqref{2.3}--\eqref{2.4}. To this end, we compute
the monodromy matrix $B_0$ in \eqref{1.2} by a diagonalization of $A_0$
whose eigenvalues are $r_{1,2}=\pm \nu$. We find
\begin{equation}
B_0=P^{-1}\cdot A_0\cdot P=
\begin{bmatrix}
\nu & 0\\
0 & -\nu
\end{bmatrix},\label{2.5}%
\end{equation}
where the matrices
\[
P^{-1}=\begin{bmatrix} \frac{1}{2} & \frac{1}{2\nu}\\
\frac{1}{2} & -\frac{1}{2\nu}\end{bmatrix},\quad
A_0=\begin{bmatrix}0 & 1 \\ \nu^2 & 0\end{bmatrix},\quad
P=\begin{bmatrix}1 & 1 \\ \nu & -\nu\end{bmatrix}.
\]
Next we compute the matrix exponential $x^{B_0}$ as
\begin{align}
x^{B_0} & =\exp\Big(
\begin{bmatrix}
\nu & 0\\
0 & -\nu
\end{bmatrix}
\ln x\Big)
=\sum_{n=0}^{\infty}\Big( \frac{1}{n!}
\begin{bmatrix}
\nu & 0\\
0 & -\nu
\end{bmatrix}^{n}\ln^{n}x\Big) \label{2.6}\\
& =
\begin{bmatrix}
\sum_{n=0}^{\infty}\frac{1}{n!}\nu^{n}\ln^{n}x & 0\\
& \\
0 & \sum_{n=0}^{\infty}\frac{1}{n!}(-\nu)^{n}\ln^{n}x
\end{bmatrix} =
\begin{bmatrix}
x^{\nu} & 0\\
0 & x^{-\nu}
\end{bmatrix}\label{2.6a}.
\end{align}
Observe now that the recurrence relation \eqref{1.4} for the Bessel
equation \eqref{2.3}--\eqref{2.4} becomes for $n=2,3,4,\ldots$,
\begin{equation}
T_n
\begin{bmatrix}
\nu+n & 0\\
0 & -\nu+n
\end{bmatrix} -
\begin{bmatrix}
0 & 1\\
\nu^2 & 0
\end{bmatrix}
T_n =
\begin{bmatrix}
0 & 0\\
-\lambda & 0
\end{bmatrix} T_{n-2},\label{2.7}%
\end{equation}
with the initialization given by \eqref{1.3}:
\[
T_0=P=\begin{bmatrix}1 & 1 \\ \nu & -\nu\end{bmatrix}.
\]
It follows from $A_1=\underset{2 \times 2}{O}$, that
$T_1=\underset{2 \times 2}{O}$ and then from \eqref{2.7} that:
\begin{equation}
T_{2n-1}=\underset{2 \times 2}{O},\quad n=1,2,3,\ldots.\label{2.8}
\end{equation}
Now to get $T_2$ from $T_0$, we observe that for $n=2$, \eqref{2.7} becomes
\begin{equation}
\begin{bmatrix}
a_2 & b_2\\
c_2 & d_2
\end{bmatrix}
\begin{bmatrix}
\nu+2 & 0\\
0 & -\nu+2
\end{bmatrix} -
\begin{bmatrix}
0 & 1\\
\nu^2 & 0
\end{bmatrix}
\begin{bmatrix}
a_2 & b_2\\
c_2 & d_2
\end{bmatrix} =
\begin{bmatrix}
0 & 0\\
-\lambda & -\lambda
\end{bmatrix},\label{2.9}
\end{equation}
where we have set
\[
T_2=\begin{bmatrix}
a_2 & b_2\\c_2 & d_2
\end{bmatrix}.
\]
Here we can now observe the main obstacle in solving \eqref{2.9} for $T_2$:
namely, the matrix $T_2$ appears in a ``commutator'' form, multiplying
a $2\times 2$ matrix once on the left and once on the right, thus prohibiting
the explicit solution for $T_2$ in the matrix form of the recurrence
relation \eqref{2.9}. However, $T_2$ is uniquely determined by matrix
theory from Gantmacher \cite[p.~225, Eq.~32]{gantmacher59} or the restatement
as a lemma in Balser \cite[p.~212, Eq.~A.1]{balser00}, which asserts that
the general commutator matrix equation for square matrices $A$, $B$, $C$, and $X$:
\begin{equation}
A\cdot X-X\cdot B=C\label{2.10}
\end{equation}
has a unique solution $X$ if $A$ and $B$ have no common eigenvalues.
This criteria holds in \eqref{2.9} with
\begin{equation}
A=-\begin{bmatrix}
\text{0} & 1\\
\nu^2 & 0
\end{bmatrix},\quad
B=-\begin{bmatrix}
\nu+2 & 0\\
0 & -\nu+2
\end{bmatrix},\quad \nu\neq 1,2,3,...,\label{2.11}
\end{equation}
so the existence of a unique solution of \eqref{2.9} for $X=T_2$
follows directly from this matrix theory.
\section{A vectorization procedure}\label{sec:vectorization-procedure}
To overcome the commutator equation \eqref{2.9}, so that a solution
procedure for $T_2$ and more generally $T_n$ emerges, we now introduce
a rather simple approach that seems to have eluded others:
namely, we will first write the four scalar equations for the four unknowns
$a_2$, $b_2$, $c_2$ and $d_2$ (well-known) and then generate, by means
of products of suitable $4\times 4$ matrices (the new idea), an explicit
solution formula. This first yields, by computation of the matrix products
in \eqref{2.9}, the equivalent $4\times 4$ linear system
\begin{equation}
\begin{gathered}
(\nu+2)a_2-c_2=0\\
(-\nu+2)b_2-d_2=0\\
(\nu+2)c_2-\nu^2a_2=-\lambda\\
(-\nu+2)d_2-\nu^2b_2=-\lambda
\end{gathered} \label{2.12}
\end{equation}
Now we introduce a ``vectorization'' of the matrices $T_0$, $T_2$, $T_{4}$,
$\ldots$ as follows:
\begin{equation}
\overset{\rightharpoonup}{T}_0:=
\begin{pmatrix}
a_0\\
b_0\\
c_0\\
d_0
\end{pmatrix}
=
\begin{pmatrix}
1\\
1\\
\nu\\
-\nu
\end{pmatrix},\quad
\text{where } T_0=P=
\begin{bmatrix}
a_0 & b_0\\
c_0 & d_0
\end{bmatrix}
=
\begin{bmatrix}
1 & 1\\
\nu & -\nu
\end{bmatrix}
,\label{2.13}
\end{equation}
and for $n=1,2,3,\ldots$,
\begin{equation}
\overset{\rightharpoonup}{T}_{2n}:=
\begin{pmatrix}
a_{2n}\\
b_{2n}\\
c_{2n}\\
d_{2n}
\end{pmatrix}
,~\text{where}~T_{2n}=
\begin{bmatrix}
a_{2n} & b_{2n}\\
c_{2n} & d_{2n}
\end{bmatrix}.\label{2.14}
\end{equation}
Thus the rows of the $2\times 2$ matrices $T_{2n}$ are ``stacked'' on top
of one another to form the $4\times 1$ vectors $\overset{\rightharpoonup}{T}_{2n}$.
Now in the second stage of our vectorization procedure, the $4\times 4$ linear
system \eqref{2.12} is written as
\begin{align}
M_2\overset{\rightharpoonup}{T}_2 & =\Lambda\overset{\rightharpoonup
}{T}_0,\quad M_2=
\begin{bmatrix}
(\nu+2) & 0 & -1 & 0\\
0 & (-\nu+2) & 0 & -1\\
-\nu^2 & 0 & (\nu+2) & 0\\
0 & -\nu^2 & 0 & (-\nu+2)
\end{bmatrix}
,\label{2.15}\\
\Lambda & :=
\begin{bmatrix}
0 & 0 & 0 & 0\\
0 & 0 & 0 & 0\\
-\lambda & 0 & 0 & 0\\
0 & -\lambda & 0 & 0
\end{bmatrix}.
\end{align}
By the assumption that $\nu \neq \pm 1$, $M_2$ is invertible and hence:
\begin{align}
\overset{\rightharpoonup}{T}_2
& =M_2^{-1}\cdot\Lambda\cdot
\overset{\rightharpoonup}{T}_0 \nonumber\\
&=
\begin{bmatrix}
\frac{2+\nu}{4(1+\nu)}
& 0 & \frac{1}{4(1+\nu)} & 0\\
0
& \frac{2-\nu}{4(1-\nu)} & 0 & \frac{1}{4(1-\nu)}\\%
\frac{\nu^2}{4(1+\nu)}
& 0 & \frac{2+\nu}{4(1+\nu)} & 0\\
0 & \frac{\nu^2}{4(1-\nu)} & 0 & \frac{2-\nu}{4(1-\nu)}%
\end{bmatrix}
\begin{bmatrix}
0 & 0 & 0 & 0\\
0 & 0 & 0 & 0\\
-\lambda & 0 & 0 & 0\\
0 & -\lambda & 0 & 0
\end{bmatrix}
\begin{pmatrix}
1\\
1\\
\nu\\
-\nu
\end{pmatrix} \label{2.16}\\
& =
\begin{pmatrix}
\frac{-\lambda}{2^2(\nu+1)}\\[4pt]
\frac{-\lambda}{2^2(1-\nu)}\\[4pt]
\frac{-\lambda}{2^2(\nu+1)}\\[4pt]
\frac{-\lambda}{2^2(1-\nu)}
\end{pmatrix}. \nonumber
\end{align}
Finally, we revert from the vector form of the solution to obtain the
$2\times 2$ matrix solution to \eqref{2.9} as
\begin{equation}
T_2=
\begin{bmatrix}
a_2 & b_2\\
c_2 & d_2
\end{bmatrix}
=
\begin{bmatrix}
\frac{-\lambda}{2^2(\nu+1)} & \frac{-\lambda}{2^2(1-\nu)}\\
\frac{-\lambda}{2^2(\nu+1)} & \frac{-\lambda}{2^2(1-\nu)}
\end{bmatrix}.
\label{2.17}
\end{equation}
Clearly, the $4\times 4$ matrix equation \eqref{2.15} is preferable to
the $2\times 2$ commutator matrix equation \eqref{2.9}. Accordingly, we can
now proceed to generate a ``closed form'' solution of the matrix recurrence
relation \eqref{2.7} for the Bessel equation by employing the above
vectorization procedure in the general case. Thus, putting $n=2j$
in \eqref{2.7} and taking
\[
T_{2j}=
\begin{bmatrix}
a_{2j} & b_{2j}\\
c_{2j} & d_{2j}
\end{bmatrix},\quad
\overset{\rightharpoonup}{T}_{2j}=
\begin{pmatrix}
a_{2j}\\
b_{2j}\\
c_{2j}\\
d_{2j}
\end{pmatrix},
\]
then the recurrence relation \eqref{2.7} becomes
\begin{equation}
M_{2j}\overset{\rightharpoonup}{T}_{2j}=\Lambda\overset{\rightharpoonup
}{T}_{2j-2},\quad \text{for }j=1,2,3,\ldots,\label{2.18}
\end{equation}
where
\[
M_{2j}=
\begin{bmatrix}
(\nu+2j) & 0 & -1 & 0\\
0 & (-\nu+2j) & 0 & -1\\
-\nu^2 & 0 & (\nu+2j) & 0\\
0 & -\nu^2 & 0 & (-\nu+2j)
\end{bmatrix}.
\]
By the assumption that $\nu \neq \pm 1, \pm 2, \pm 3,\ldots$, $M_{2j}$
is invertible and hence:
\begin{equation}
\overset{\rightharpoonup}{T}_{2j}=M_{2j}^{-1}\cdot\Lambda
\overset{\rightharpoonup}{\cdot T}_{2j-2},\label{2.19}
\end{equation}
where
\[
M_{2j}^{-1}=
\begin{bmatrix}
\frac{2j+\nu}{4j(j+\nu)} & 0 & \frac{1}{4j(j+\nu)} & 0\\
0 & \frac{2j-\nu}{4j(j-\nu)} & 0 & \frac{1}{4j(j-\nu)}\\
\frac{\nu^2}{4j(j+\nu)} & 0 & \frac{2j+\nu}{4j(j+\nu)} & 0\\
0 & \frac{\nu^2}{4j(j-\nu)} & 0 & \frac{2j-\nu}{4j(j-\nu)}%
\end{bmatrix}.
\]
Now proceeding to implement the recursion for $n=2j-2, 2j-4, \ldots, 2$ gives
\begin{equation}
\overset{\rightharpoonup}{T}_{2j}=(M_{2j}^{-1}\Lambda)\overset{\rightharpoonup
}{T}_{2j-2}=(M_{2j}^{-1}\Lambda)(M_{2j-2}^{-1}\Lambda)(M_{2j-4}^{-1}
\Lambda)\cdots(M_2^{-1}\Lambda)\overset{\rightharpoonup}{T}_0,\quad
\label{2.20}
\end{equation}
for $j=1,2,3,\ldots$.
So, by our vectorization procedure, it follows that \eqref{2.7} becomes
\begin{equation}
\overset{\rightharpoonup}{T}_{2j}
=\Big( \prod^{j-1}_{N=0} (M_{2j-2N}^{-1}\Lambda)\Big)
\overset{\rightharpoonup}{T}_0,\quad j=1,2,3,\ldots.\label{2.21}
\end{equation}
As all matrices in \eqref{2.21} are known, we can compute the product, obtaining
\begin{equation}
\overset{\rightharpoonup}{T}_{2j}=
\begin{pmatrix}
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j}\cdot j!\cdot(\nu+1)_{j}}\\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j}\cdot j!\cdot(1-\nu)_{j}}\\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}\cdot(2j+\nu)}{2^{2j}\cdot j!\cdot(\nu+1)_{j}}\\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}\cdot(2j-\nu)}{2^{2j}\cdot j!\cdot(1-\nu)_{j}}
\end{pmatrix} \label{2.22}
\end{equation}
where $ \alpha_{j}=(\alpha)(\alpha+1)\cdots(\alpha+j-1)$ is the Pochhammer symbol.
Finally, we revert from vector form to yield the closed form $2\times 2$
matrix solution of \eqref{2.7},
\begin{equation}
T_{2j}=
\begin{bmatrix}
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j}\cdot j!\cdot(\nu+1)_{j}} &
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j}\cdot j!\cdot(1-\nu)_{j}}\\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}\cdot(2j+\nu)}{2^{2j}\cdot j!\cdot(\nu+1)_{j}} &
\frac{(-1)^{j}\cdot\lambda^{j}\cdot(2j-\nu)}{2^{2j}\cdot j!\cdot(1-\nu)_{j}}
\end{bmatrix} \label{2.23}.
\end{equation}
From \eqref{1.3}, \eqref{2.6a}, and \eqref{2.23}, the fundamental solution
to \eqref{2.3}--\eqref{2.4} given by Theorem \ref{thm:1} is then
\begin{equation} \label{2.25}
\begin{aligned}
Y(x,\lambda) & =
\begin{bmatrix}
y_1(x,\lambda) & y_2(x,\lambda)\\
xy_1'(x,\lambda) & xy_2'(x,\lambda)
\end{bmatrix} \\
&=T(x)x^{B_0}=
\Big( \sum_{j=0}^{\infty}
T_{2j}\Big) x^{2j}\cdot
\begin{bmatrix}
x^{\nu} & 0\\
0 & x^{-\nu}
\end{bmatrix}
\\
& =\Big( \sum_{j=0}^\infty T_{2j}\cdot
\begin{bmatrix}
x^{\nu} & 0\\
0 & x^{-\nu}
\end{bmatrix}
\Big) x^{2j}\\
&=
\begin{bmatrix}
\sum_{j=0}^\infty \frac{(-1)^{j}\cdot\lambda^{j}\cdot
x^{2j+\nu}}{2^{2j}\cdot j!\cdot(\nu+1)_{j}}
&
\sum_{j=0}^\infty \frac{(-1)^{j}\cdot\lambda^{j}\cdot
x^{2j-\nu}}{2^{2j}\cdot j!\cdot(1-\nu)_{j}}\\[4pt]
\sum_{j=0}^\infty \frac{(-1)^{j}\cdot\lambda^{j}
\cdot(2j+\nu)\cdot x^{2j+\nu}}{2^{2j}\cdot j!\cdot(\nu+1)_{j}} &
\sum_{j=0}^\infty \frac{(-1)^{j}\cdot\lambda^{j}
\cdot(2j-\nu)\cdot x^{2j-\nu}}{2^{2j}\cdot j!\cdot(1-\nu)_{j}}
\end{bmatrix}. %\label{2.25}
\end{aligned}
\end{equation}
As system \eqref{2.3}--\eqref{2.4} is the matrix formulation of the Bessel
equation \eqref{2.1}, we may expect the fundamental matrix \eqref{2.25}
to contain Bessel functions. This connection will now be made in the next section.
\section{Connection to Bessel functions}\label{sec:connection}
From \eqref{2.25}, the Bessel functions and the modified Bessel functions
are not yet transparent. This is because these special functions receive
their definitions from the standard Frobenius theory, which uses a scalar
recurrence relation for the equations:
\begin{gather}
x^2u''+xu'+(x^2-\nu^2)u=0\label{3.1}\quad \text{Bessel equation}\\
x^2u'+xu'-(x^2+\nu^2)u=0\label{3.2}\quad \text{modified Bessel equation}
\end{gather}
where no eigenparameter $\lambda$ appears. The Bessel functions then receive
their definitions by making special choices of constants multiplying the
Frobenius solutions $u_{i}(x)$, $i=1,2,3,4$ to \eqref{3.1} and \eqref{3.2}
where $u_1(x)=y_1(x,1)$, $u_2(x)=y_2(x,1)$, $u_{3}(x)=y_1(x,-1)$, and
$u_{4}=y_2(x,-1)$, so that
\begin{gather}
J_{\nu}(x)=\frac{1}{2^{\nu}\Gamma(\nu+1)}u_1(x)
=\sum_{j=0}^{\infty}\Big( \frac{(-1)^{j}}{j!\Gamma(\nu+1+j)}\Big)
\left( \frac{x}{2}\right) ^{2j+\nu},\label{3.3}
\\
J_{-\nu}(x)=\frac{1}{2^{-\nu}\Gamma(\nu+1)}u_2(x)
=\sum_{j=0}^{\infty}\Big( \frac{(-1)^{j}}{j!\Gamma(1-\nu+j)}\Big)
\left( \frac{x}{2}\right) ^{2j-\nu},\label{3.4}
\\
I_{\nu}(x)=\frac{1}{2^{\nu}\Gamma(\nu+1)}u_{3}(x)
=\sum_{j=0}^{\infty}\Big( \frac{1}{j!\Gamma(\nu+1+j)}\Big)
\left(\frac{x}{2}\right) ^{2j+\nu},\label{3.5}
\\
I_{-\nu}(x)=\frac{1}{2^{-\nu}\Gamma(\nu+1)}u_{4}(x)
=\sum_{j=0}^{\infty}\Big( \frac{1}{j!\Gamma(1-\nu+j)}\Big)
\left(\frac{x}{2}\right) ^{2j+\nu}.\label{3.6}
\end{gather}
We now wish to identify which Bessel functions are generated in the
fundamental matrix solution $Y(x,\lambda)$ in \eqref{2.25}.
Comparison between \eqref{2.25}, \eqref{3.3}--\eqref{3.6} reveals that
the entries of $Y(x,\lambda)$ contain weighted Bessel functions of the
arguments $\sqrt{\lambda} x$ and $\sqrt{|\lambda|}x$, or more specifically,
\begin{gather}
Y(x,\lambda)=
\begin{bmatrix}
2^{\nu}\Gamma(\nu+1)\lambda^{-\nu/2}J_{\nu}(\sqrt{\lambda}x) &
2^{-\nu}\Gamma(1-\nu)\lambda^{\nu/2}J_{-\nu}(\sqrt{\lambda}x)\\[4pt]
2^{\nu}\Gamma(\nu+1)\lambda^{-\nu/2}x J_{\nu}'(\sqrt{\lambda}x) &
2^{-\nu}\Gamma(1-\nu)\lambda^{\nu/2}x J_{-\nu}'(\sqrt{\lambda}x)
\end{bmatrix}
\intertext{for $\lambda\geq 0$, and}
Y(x,\lambda)=\begin{bmatrix}
2^{\nu}\Gamma(\nu+1)| \lambda| ^{-\nu/2}I_{\nu
}(\sqrt{| \lambda| }x) & 2^{-\nu}\Gamma(1-\nu)|
\lambda| ^{\nu/2}I_{-\nu}(\sqrt{| \lambda| }x)
\\[4pt]
2^{\nu}\Gamma(\nu+1)| \lambda| ^{-\nu/2}x
I_{\nu}'(\sqrt{| \lambda| }x) & 2^{-\nu}\Gamma
(1-\nu)| \lambda| ^{\nu/2}xI_{-\nu}'
(\sqrt{| \lambda| }x)
\end{bmatrix}
\label{3.7}
\end{gather}
for $\lambda<0$.
\begin{remark}\label{rmk:5} \rm
(1) One consequence of Theorem \ref{thm:1}, due to the monodromy matrix $B_0$
and the initialization $T_0=P$ in the recurrence relation \eqref{1.4},
is that the fundamental matrix solution \eqref{1.3} to \eqref{2.3}--\eqref{2.4}
captures the Frobenius solutions to the scalar Bessel equation \eqref{2.1}
in the first row of \eqref{2.25} and their quasi-derivatives in the second row.
(2) Evidently the $4\times 4$ matrix recurrence relation \eqref{2.21} has enabled
two Bessel functions and their quasi-derivatives to emerge by a novel means:
namely, as products of $4\times 4$ matrices.
\end{remark}
We now demonstrate that our vectorization procedure applies equally well
in implementing Theorem \ref{thm:2}.
\section{An example of the vectorization procedure using Theorem \ref{thm:2}:
Bessel equation}\label{sec:example2}
In this section we take note of the fact that the vectorization procedure,
used to solve the matrix recurrence relation \eqref{1.4} in Theorem \ref{thm:1},
applies equally well to solve the recurrence relation \eqref{1.6} in
Theorem \ref{thm:2}. As it turns out, Theorem \ref{thm:2} yields in the first
row of its fundamental matrix $X(x,\lambda)$, linear combinations of the
two Frobenius solutions $y_1(x,\lambda)$ and $y_2(x,\lambda)$, obtained earlier
by application of Theorem \ref{thm:1}.
We find $S_{2n-1}=\underset{2\times 2}{O}$, $n=1,2,3,\ldots$, while for $n=2$,
the recurrence relation \eqref{1.6} yields the system
\begin{equation}
\begin{gathered}
2a_2+\nu^2b_2-c_2=0\\
a_2+2b_2-d_2=0\\
-\nu^2a_2+2c_2+\nu^2d_2=-\lambda\\
-\nu^2b_2+c_2+2d_2=0
\end{gathered} \label{4.1}
\end{equation}
where $S_2=\begin{bmatrix}
a_2 & b_2\\
c_2 & d_2
\end{bmatrix}$.
Similarly to equation \eqref{2.16}, we find by employing a vectorization
procedure that
\begin{align*} %\label{4.2}
\overset{\rightharpoonup}{S}_2
&:=\begin{pmatrix}
a_2\\
b_2\\
c_2\\
d_2
\end{pmatrix}
=(M_2^{-1}\Lambda)\overset{\rightharpoonup}{S}_0
\\
& =\underbrace{\frac{1}{4(\nu^2-1)}\begin{bmatrix}
\nu^2-2 & \nu^2 & -1 & \nu^2\\
1 & \nu^2-2 & 1 & -1\\
-\nu^2 & \nu^{4} & \nu^2-2 & \nu^2\\
\nu^2 & -\nu^2 & 1 & \nu^2-2
\end{bmatrix}}_{M_2^{-1}}
\underbrace{ \begin{bmatrix}
0 & 0 & 0 & 0\\
0 & 0 & 0 & 0\\
-\lambda & 0 & 0 & 0\\
0 & -\lambda & 0 & 0
\end{bmatrix}}_{\Lambda}
\underbrace{ \begin{pmatrix}1\\
0\\
0\\
1
\end{pmatrix}}_{\overset{\rightharpoonup}{S_0}}
\\
& =\begin{pmatrix}
\frac{-\lambda}{2^{5}}\left\{ \frac{1}{\nu+1}+\frac{1}{1-\nu}\right\} \\[4pt]
\frac{-\lambda}{2^{5}}\left\{ \frac{1}{\nu}\left[ \frac{1}{\nu+1}+\frac{1}{1-\nu}\right] \right\} \\[4pt]
\frac{-\lambda}{2^{5}}\left\{ \frac{2+\nu}{\nu+1}+\frac{2-\nu}{1-\nu}\right\}\\[4pt]
\frac{-\lambda}{2^{5}}\left\{ \frac{1}{\nu}\left[ \frac{1}{\nu+1}+\frac{1}{1-\nu}\right] \right\}
\end{pmatrix}.
\end{align*}
\begin{equation} \label{4.3}
\begin{aligned}
\overset{\rightharpoonup}{S}_{2j} & =(M_{2j}^{-1}\Lambda
)\overset{\rightharpoonup}{S}_{2j-2}=(M_{2j}^{-1}\Lambda)(M_{2j-2}^{-1}%
\Lambda)(M_{2j-4}^{-1}\Lambda)\cdots(M_2^{-1}\Lambda
)\overset{\rightharpoonup}{S}_0\\
& =\Big( \prod_{N=0}^{j-1}(M_{2j-2N}^{-1}\Lambda)\Big)
\overset{\rightharpoonup}{S}_0 \\
& =
\begin{pmatrix}
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j+1}\cdot j!}\left\{ \frac{1}{(\nu
+1)_{j}}+\frac{1}{(1-\nu)_{j}}\right\} \\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j+1}\cdot j!}\left\{ \frac{1}{\nu}\left[
\frac{1}{(\nu+1)_{j}}-\frac{1}{(1-\nu)_{j}}\right] \right\} \\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j+1}\cdot j!}\left\{ \frac{2j+\nu}
{(\nu+1)_{j}}+\frac{2j-\nu}{(1-\nu)_{j}}\right\} \\[4pt]
\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j+1}\cdot j!}\left\{ \frac{1}{\nu}\left[
\frac{1}{(\nu+1)_{j}}+\frac{1}{(1-\nu)_{j}}\right] \right\}
\end{pmatrix}
,\quad j=1,2,3,\ldots,
\end{aligned}
\end{equation}
and
\begin{equation}
M_{2j}^{-1}=\frac{1}{4(\nu^2-j^2)}\begin{bmatrix}
\frac{\upsilon^2-2j^2}{j} & \nu^2 & -1 & \frac{\nu^2}{j}\\
1 & \frac{\upsilon^2-2j^2}{j} & \frac{1}{j} & -1\\
-\nu^2 & \frac{\nu^{4}}{j} & \frac{\upsilon^2-2j^2}{j} & \nu^2\\
\frac{\nu^2}{j} & -\nu^2 & 1 & \frac{\upsilon^2-2j^2}{j}
\end{bmatrix}.
\label{4.4}
\end{equation}
Accordingly, the fundamental solution matrix \eqref{1.5} is readily calculated as
\begin{equation} \label{4.5}
\begin{aligned}
X(x,\lambda)
&= \begin{bmatrix} y_{3}(x,\lambda) & y_{4}(x,\lambda)\\
xy_{3}'(x,\lambda) & xy_{4}'(x,\lambda)
\end{bmatrix}
=S(x)x^{A_0}\\
&=\sum_{j=0}^\infty \Big\{
S_{2j}\cdot\begin{bmatrix}
\frac{x^{\nu}+x^{-\nu}}{2} & \frac{x^{\nu}-x^{-\nu}}{2\nu}\\[2pt]
\frac{\nu\left( x^{\nu}-x^{-\nu}\right) }{2} & \frac{x^{\nu}+x^{-\nu}}{2}
\end{bmatrix} \Big\} x^{2j}
\end{aligned}
\end{equation}
The above expression is a matrix whose first row is
\[
\frac{1}{2}\sum_{j=0}^\infty A_{2j}x^{2j+\nu}+\frac{1}%
{2}\sum_{j=0}^\infty B_{2j}x^{2j-\nu}, \quad
\frac{1}{2\nu}\sum_{j=0}^\infty A_{2j}x^{2j+\nu}-\frac
{1}{2\nu}\sum_{j=0}^\infty B_{2j}x^{2j-\nu},
\]
and its second row is
\begin{gather*}
\frac{1}{2}\sum_{j=0}^\infty (2j+\nu)A_{2j}x^{2j+\nu
}+\frac{1}{2}\sum_{j=0}^\infty (2j-\nu)B_{2j}x^{2j-\nu},
\\
\frac{1}{2\nu}\sum_{j=0}^\infty (2j+\nu)A_{2j}x^{2j+\nu
}-\frac{1}{2\nu}\sum_{j=0}^\infty (2j-\nu)B_{2j}x^{2j-\nu}\,.
\end{gather*}
Here $A_{2j}$ and $B_{2j}$ are given by
\begin{equation}
A_{2j}=\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j}\cdot j!\cdot(\nu+1)_{j}},\quad
B_{2j}=\frac{(-1)^{j}\cdot\lambda^{j}}{2^{2j}\cdot j!\cdot(1-\nu)_{j}
},\label{4.6}
\end{equation}
and we have used
\begin{equation}
\begin{aligned}
x^{A_0}&=\exp\Big( \begin{bmatrix}
0 & 1\\
\nu^2 & 0
\end{bmatrix} \cdot\ln x\Big)
=\sum_{n=0}^{\infty}\frac{1}{n!}
\Bigg( \underbrace{P\cdot\begin{bmatrix}
\nu & 0\\
0 & -\nu
\end{bmatrix} \cdot P^{-1}}_{A_0}\Bigg) ^{n}\cdot\ln^{n}x\\
&=
\begin{bmatrix}
\frac{x^{\nu}+x^{-\nu}}{2} & \frac{x^{\nu}-x^{-\nu}}{2\nu}\\[4pt]
\frac{\nu\left( x^{\nu}-x^{-\nu}\right) }{2} & \frac{x^{\nu}+x^{-\nu}}{2}
\end{bmatrix}.
\end{aligned}\label{4.7}
\end{equation}
Comparison between \eqref{4.5}--\eqref{4.6} and \eqref{2.25} now reveals:
\begin{equation} \label{4.8}
\begin{aligned}
X(x,\lambda) & =\begin{bmatrix}
y_{3}(x,\lambda) & y_{4}(x,\lambda)\\[4pt]
xy_{3}'(x,\lambda) & xy_{4}'(x,\lambda)
\end{bmatrix} \\
& =\begin{bmatrix}
\frac{1}{2}y_1(x,\lambda)+\frac{1}{2}y_2(x,\lambda) &
\frac{1}{2\nu}y_1(x,\lambda)-\frac{1}{2\nu}y_2(x,\lambda)\\[4pt]
x\left( \frac{1}{2}y_1'(x,\lambda)+\frac{1}{2}y_2'(x,\lambda)\right)
& x\left( \frac{1}{2\nu}y_1'(x,\lambda
)-\frac{1}{2\nu}y_2'(x,\lambda)\right)
\end{bmatrix} ,
\end{aligned}
\end{equation}
where the first row of $X(x,\lambda)$ may be represented in terms of Bessel
functions as
\begin{equation}
\begin{aligned}
&y_{3}(x,\lambda) \\
&=\begin{cases}
\frac{1}{2}\lambda^{-\nu/2}\cdot2^{\nu}\Gamma(\nu+1)
J_{\nu}(\sqrt{\lambda}x)
+\frac{1}{2}\lambda^{\nu/2}\cdot 2^{-\nu}\Gamma(1-\nu)
J_{-\nu}(\sqrt{\lambda}x), &\lambda\geq0
\\
\frac{1}{2}| \lambda| ^{-\nu/2}\cdot 2^{\nu}\Gamma
(\nu+1) I_{\nu}(\sqrt{| \lambda| }x)+\frac{1}%
{2}| \lambda| ^{\nu/2}\cdot 2^{-\nu}\Gamma(1-\nu)
I_{-\nu}(\sqrt{| \lambda| }x), &\lambda<0
\end{cases}
\end{aligned} \label{4.9}
\end{equation}
\begin{equation}
\begin{aligned}
&y_{4}(x,\lambda)\\
&=\begin{cases}
\frac{1}{2\nu}\lambda^{-\nu/2}\cdot2^{\nu}\Gamma(\nu+1) J_{\nu}
(\sqrt{\lambda}x)-\frac{1}{2\nu}\lambda^{\nu/2}\cdot 2^{-\nu}\Gamma(1-\nu)
J_{-\nu}(\sqrt{\lambda}x), &\lambda\geq 0
\\
\frac{1}{2\nu}| \lambda| ^{-\nu/2}\cdot 2^{\nu}\Gamma
(\nu+1) I_{\nu}(\sqrt{| \lambda| }x)-\frac{1}
{2\nu}| \lambda| ^{\nu/2}\cdot2^{-\nu}\Gamma(1-\nu)
I_{-\nu}(\sqrt{| \lambda| }x), &\lambda<0
\end{cases}
\end{aligned} \label{4.10}
\end{equation}
\begin{remark}\label{rmk:6} \rm
One consequence of Theorem \ref{thm:2}, due to the monodromy matrix $A_0$ and
the initialization $S_0=I$ in recurrence relation \eqref{1.6}, is that the
fundamental matrix solution \eqref{1.5} to \eqref{2.3}--\eqref{2.4} captures
linear combinations of Bessel Functions in the first row of \eqref{4.5}
with their corresponding quasi-derivatives in the second row.
\end{remark}
Lastly we sketch the vectorization procedure as it may be applied to
a general Frobenius-type problem of the non-logarithmic cases.
\section{A vectorization procedure for the general problem}\label{sec:vectorization}
For the general problem with $A(x)$ being a $2\times 2$ matrix,
\begin{equation}
X'=\frac{1}{x}A(x)X=\frac{1}{x}\left[ A_0+A_1x+A_2 x+\cdots\right] X,\label{5.1}
\end{equation}
recurrence relation \eqref{1.4} of Theorem \ref{thm:1} is
\begin{equation}
T_n\left( B_0+nI\right) -A_0\cdot T_n=\sum_{m=0}^{n-1}A_{n-m}\cdot
T_m=A_nT_0+A_{n-1}T_1+\cdots+A_1T_{n-1},\quad n \geq 1,\label{5.2}%
\end{equation}
where, as in Theorem \ref{thm:1},
\begin{equation}
B_0=P^{-1}\cdot A_0\cdot P=\begin{bmatrix}
r_1 & 0\\
0 & r_2
\end{bmatrix},\quad T_0=P.\label{5.3}
\end{equation}
If $r_1$ and $r_2$ are real with $r_1>r_2$, then the assumption that \eqref{5.1}
has a good spectrum is
\begin{equation}
r_1-r_2\neq n,\quad n=0,1,2,3,\ldots ,\label{5.4}
\end{equation}
which rules out the logarithmic cases of Frobenius theory as required and
hence the vectorization procedure introduced in Section
\ref{sec:vectorization-procedure} is applicable to solve \eqref{5.2}.
This fact follows immediately from the lemma of Gantmacher and Balser
in \eqref{2.10} as applied to \eqref{5.2} since for $n=1,2,3\ldots$,
the eigenvalues $r_1+n$ and $r_2+n$ of $B_0+n I$ differ from the eigenvalues
$r_1$ and $r_2$ of $A_0$ because of assumption \eqref{5.4}.
Thus a unique solution of \eqref{5.2} for the $2\times 2$ matrix $T_n$
must exist and since the right hand side of \eqref{2.10} is immaterial
for the application of this lemma, successive solution for unique
$T_1,T_2,\ldots,T_n$ is possible. To illustrate the vectorization procedure
in this general case, consider the first step for $n=1$:
\begin{equation}
T_1\left( B_0+I\right) -A_0T_1=A_1T_{0.}\label{5.5}
\end{equation}
Letting
\[
T_0=
\begin{bmatrix}
a_0 & b_0\\
c_0 & d_0%
\end{bmatrix} ,~T_1=\begin{bmatrix}
a_1 & b_1\\
c_1 & d_1%
\end{bmatrix},\quad A_n=\begin{bmatrix}
D_n & E_n\\
F_n & G_n%
\end{bmatrix},~n=0,1,2,3,\ldots,
\]
then the matrix equation \eqref{5.5} corresponds to the four scalar equations:
\begin{gather*}
a_1(r_1+1)-D_0a_1-E_0c_1=D_1a_0+E_1c_0\\
b_1(r_2+1)-D_0b_1-E_0d_1=D_1b_0+E_1d_0\\
c_1(r_1+1)-F_0a_1-G_0c_1=F_1a_0+G_1c_0\\
d_1(r_2+1)-F_0b_1-G_0d_1=F_1b_0+G_1d_0
\end{gather*}
Writing these in the form of a $4\times 4$ matrix equation we thus see that
the analog of equation \eqref{2.15} is
\begin{equation}
M_1\overset{\rightharpoonup}{T}_1=\Lambda_1\overset{\rightharpoonup
}{T}_0,\quad \text{where again } \overset{\rightharpoonup}{T}_0=
\begin{pmatrix}
a_0\\
b_0\\
c_0\\
d_0%
\end{pmatrix},\quad
\overset{\rightharpoonup}{T}_1=\begin{pmatrix}
a_1\\
b_1\\
c_1\\
d_1%
\end{pmatrix},\label{5.6}
\end{equation}
and in this general case we find:
\begin{gather}
M_1 =\begin{bmatrix}
r_1+1-D_0 & 0 & -E_0 & 0\\
0 & r_2+1-D_0 & 0 & -E_0\\
-F_0 & 0 & r_1+1-G_0 & 0\\
0 & -F_0 & 0 & r_1+1-G_0
\end{bmatrix}, \label{5.7}
\\
\Lambda_1 =\begin{bmatrix}
D_1 & 0 & E_1 & 0\\
0 & D_1 & 0 & E_1\\
F_1 & 0 & G_1 & 0\\
0 & F_1 & 0 & G_1
\end{bmatrix}.
\end{gather}
Hence,
\begin{equation}
\overset{\rightharpoonup}{T}_1=(M_1^{-1}\cdot\Lambda_1)\cdot
\overset{\rightharpoonup}{T}_0,\label{5.8}%
\end{equation}
The fact that $M_1$ is invertible follows (under the assumption of good
spectrum) directly from the existence of a unique solution for $T_1$ of
equation \eqref{5.5}. Likewise in the $n$\textsuperscript{th} step,
recurrence relation \eqref{5.2} converts by our vectorization procedure to
\begin{equation}
M_n\overset{\rightharpoonup}{T}_n=\sum_{m=0}^{n-1}
\left( \Lambda_{n-m}\cdot\overset{\rightharpoonup}{T}_m\right) ,\quad
n=1,2,3,\ldots ,\label{5.9}
\end{equation}
where
\[
\overset{\rightharpoonup}{T}_n=
\begin{pmatrix}
a_n\\
b_n\\
c_n\\
d_n
\end{pmatrix},\quad
M_n=\begin{bmatrix}
r_1+n-D_0 & 0 & -E_0 & 0\\
0 & r_2+n-D_0 & 0 & -E_0\\
-F_0 & 0 & r_1+n-G_0 & 0\\
0 & -F_0 & 0 & r_1+n-G_0
\end{bmatrix},
\]
and where
\[
\Lambda_{n-m}=\begin{bmatrix}
D_{n-m} & 0 & E_{n-m} & 0\\
0 & D_{n-m} & 0 & E_{n-m}\\
F_{n-m} & 0 & G_{n-m} & 0\\
0 & F_{n-m} & 0 & G_{n-m}
\end{bmatrix}.
\]
A closed form representation for $\overset{\rightharpoonup}{T}_n$ is then
\begin{equation}
\overset{\rightharpoonup}{T}_n=M_n^{-1}\cdot\Big(
\sum_{m=0}^{n-1}\big( \Lambda_{n-m}\cdot
\overset{\rightharpoonup}{T}_m\big) \Big)
=\sum_{m=0}^{n-1}\Big( \big( M_n^{-1}\cdot
\Lambda_{n-m}\big) \cdot\overset{\rightharpoonup}{T}_m\Big) ,
\label{5.10}
\end{equation}
for $n=1,2,3,\ldots$,
which formulates the $4\times 1$ vector $\overset{\rightharpoonup}{T}_n$
explicitly as a sum of products of $4\times 4$ matrices with $4\times 1$
vectors whose representations are all readily obtained from this section.
Therefore, under the assumption of good spectrum of system \eqref{5.1}, our
vectorization procedure indeed applies in the general case.
In the last section, we give a brief history of the Bessel equation and the power
series method. We also summarize the work of prior authors towards the Bessel
equation in chronological order.
\section{Some history on the Bessel equation and the power series method}\label{sec:history}
The Bessel equation $x^2w''+xw'+(x^2-a^2)w=0$ takes its name from the German
astronomer F. W. Bessel who in 1824, while studying the perturbations of the
planets, began the development of the now extensive theory of its solutions
(see \cite{bessel24}). In Bessel's treatment, he assumed the parameter $a$
to be an integer and managed to obtain integral definitions of the Bessel
functions (Schl\"omilch and Lipschitz later named these functions after
Bessel \cite[p.~319]{hille69}). Interestingly, the now standard power
series representations for the Bessel functions (5.3-5.4) do not appear
in Bessel's 1824 seminal paper and they are also missing from the original
papers of Frobenius and Fuchs (see \cite{fuchs66}--\cite{frobenius73}).
Given that the power series solution ansatz was originated by
L. Euler \cite[p.~204]{agarwal14} and thus it predates the work of
Bessel, Frobenius, and Fuchs, these omissions are notable. In fact, the power
series formulas for the Bessel functions arose first not from the
Euler/Frobenius/Fuchsian theory but from Bessel's integral representations
and thus it is these integral representations that appear in the early
books (compare E.C.J. von Lommel \cite{lommel68}, C.G. Neumann \cite{neumann67},
and G. N. Watson \cite[ch.~2]{watson22}).
Some special instances of series expansions of Bessel functions were however
generated before Bessel's 1824 paper through investigations to such varied
topics as the oscillations of heavy chains and circular membranes, elliptic
motion, and heat conduction in cylinders (see \cite{bernoulli38} for
instance where D. Bernoulli, in 1738, obtained an open form series expansion
of a Bessel function of order 0 or \cite{euler64} where L. Euler, in 1764,
generated a Bessel series while investigating vibrations of a stretched
circular membrane. Below is a summary of the extent of prior author's
treatments of the Bessel equation using the Euler/Frobenius/Fuchsian power
series method and its matrix forms (references and pages indicated).
Key for the following table:
\begin{itemize}
\item[I] Solutions of a scalar Bessel equation are obtained as a power series near
$x = 0$ using the power series method.
\item[II] A $2\times 2$ matrix formulation of the power series method for
2\textsuperscript{nd} order equations is given.
\item[III] A $2\times 2$ matrix form of the Bessel equation is given.
\end{itemize}
{\scriptsize \renewcommand{\arraystretch}{1.4}
\begin{longtable}{llll} \hline
Author(s); Year; Reference & I & II & III \\ \hline
F. W. Bessel; 1824; \cite{bessel24} &No &No &No\\ \hline
L. Fuchs; 1866, 1868; \cite{fuchs66}, \cite{fuchs68} &No &No &No\\\hline
C. G. Neumann; 1867; \cite{neumann67} &No &No &No\\\hline
E. C. J. von Lommel; 1868; \cite{lommel68} &No &No &No\\\hline
G. Frobenius; 1873; \cite{frobenius73} &No &No &No\\\hline
A. Gray, G. B.Matthews;
1895; \cite{gray22} &Yes, p.~7--12 &No &No\\\hline
L. Schlesinger; 1897; \cite{schlesinger97} &No &No &No\\\hline
L. Schlesinger; 1900, 1904, 1922; \cite{schlesinger22}
&Yes, p. 274-279 &Yes, p.132-142 &No\\\hline
A. R. Forsythe; 1902; \cite{forsythe02} &Yes, p.100--102 &No &No\\\hline
H. F. Baker; 1902; \cite{baker02} &No &Yes, p. 335 &Yes, p.348\\\hline
\parbox{4cm}{Whittaker, Watson; 1902, 1915, 1920, 1944; \cite{whittaker02}}
&Yes, p.197-202 &No &No\\\hline
A. R. Forsythe; 1903, 3\textsuperscript{rd} ed.; \cite{forsythe85}
&Yes, p.176-186, 6\textsuperscript{th} ed. &No &No\\\hline
N. Nielsen; 1904; \cite{nielsen04} &Yes, p.~4--5 &No &No\\\hline
G. N. Watson; 1922, 1944; \cite{watson22} &Yes, p.38--42, 57-63 &No &No\\\hline
\parbox{4cm}{R. Courant, D. Hilbert;
1924, 1931; \cite{courant24} }&Yes, p.260, 418 &No &No\\\hline
E. L. Ince; 1926; \cite{ince26} &Yes, p.403-404 &Yes, p.408-415 &Yes, p.415\\ \hline
A. Erdelyi; 1953; \cite{erdelyi55} &Yes, p.4--5 &No &No\\\hline
Lappo-Danilevsky; 1936; 1953; \cite{lappo-danilevsky36} &No &Yes, p.143 &No\\\hline
Coddington, Levison; 1955; \cite{coddington55} &No &Yes, ch.4, p.119 &No\\\hline
Abramowitz, Stegun; 1964; \cite{abramowitz64} &Yes, p.360; 437 &No &No\\\hline
E. Hille; 1969; \cite{hille69} &Yes, p.319-320 &Yes, p.188-191,239-250 &No\\ \hline
W. Balser; 2000; \cite{balser00} &Yes, p.23 &Yes, ch.2, p.19 &No\\\hline
Olver et al; 2010; \cite{olver10} &Yes, p.217 &No &No\\ \hline
\end{longtable}
}
So it appears that the power series method was not applied to the Bessel
Equation until some years after the original papers of Frobenius and Fuchs,
(see \cite{frobenius73}--\cite{fuchs68}). As indicated in the table,
the earliest application of the Euler/Frobenius/Fuchsian power series method
to the Bessel Equation that I could find was in the 1895 book of Gray
and Matthews \cite{gray22}. Also, as revealed by the above table, only two
authors appear to have written the Bessel equation in a matrix form,
first Baker in \cite{baker02} and then later Ince in \cite{ince26}.
Note though that neither of these authors attempted to apply their $2\times 2$
matrix forms of the power series method to the matrix form of the Bessel equation
so as to obtain a $2\times 2$ fundamental matrix as given in this paper
in \eqref{2.25}. This fact appears to stem from the computational difficulty
of generating the solution matrix $X$ of \eqref{2.10} for use in implementing the
matrix recurrence relations \eqref{1.4} or \eqref{1.6}, a matter not addressed
by Schlesinger \cite{schlesinger22}, Baker \cite{baker02},
Ince \cite{ince26}, Lappo-Danilevsky \cite{lappo-danilevsky36}, Coddington
and Levison \cite[ch.~4]{coddington55}, Hille \cite{hille69}, or
Balser \cite[ch.~2]{balser00}, all who formulated a $2\times 2$ matrix power
series formulation of the Euler/Frobenius/Fuchsian theory. At least it appears
that none of these authors provide a nontrivial example of a matrix equation
for which the recurrence relations \eqref{1.4} or \eqref{1.6}, in
Theorems \ref{thm:1} and \ref{thm:2}, were successfully solved to yield an
explicit representation of the fundamental matrix solution in \eqref{1.3}
or \eqref{1.5}.
A search of the literature leads this author to believe that the development
of a general method to properly resolve the computational difficulty in \eqref{1.4}
and \eqref{1.6}, associated with the $2\times 2$ matrix power series method, has
indeed been left undone. This is further supported by a remark of
Ince \cite[p.~415]{ince26}, where in specific reference to the matrix form
of the Bessel Equation \eqref{2.3}-\eqref{2.4}, with $\nu=1$, he states,
``The scope of the matrix method is very wide, but its successful application
demands a knowledge of theorems in the calculus of matrices which cannot be
given here.'' Likewise, after Hille furnishes his solution ansatz to the matrix
equation $w'(z)=F(z)\cdot w(z)$ involving a regular singular point at the origin,
and defines his ``resolvent of the commutator'', he states,
``Thus the formal solution is well defined, if not explicitly known'',
\cite[p.~233]{hille69}.
It should be noted though that two authors did however come close to obtaining
an explicit solution of the $2\times 2$ matrix recurrence relation.
Both Lappo-Danilevsky in \cite{lappo-danilevsky36} and then later Hille
in \cite{hille69} proposed highly theoretical solutions to the $2\times 2$
matrix recurrence relation using a $2\times 2$ matrix theory.
Lappo-Danilevsky's theory appears to be invoked in Hille's resolvent of the
commutator \cite[p.~233]{hille69}, which, in principle, is a matrix inverse
operator that if one could explicitly obtain it, then it would theoretically
yield the general solution to the $2\times 2$ matrix recurrence relation
in \eqref{1.4} or \eqref{1.6} or \eqref{5.2}; and, in particular,
the solution \eqref{2.25} obtained here would be expressible as a product of
suitable $2\times 2$ matrices. The fact though that there is no implementation
to any specific examples of the Lappo-Danilevsky theory or the Hille theory
in their works or in any of the works by the later authors who also gave a
$2\times 2$ matrix formulation of the Frobenius theory does indicate that an
explicit solution to the $2\times 2$ matrix recurrence relation that could
be implemented in the general case as shown in Section \ref{sec:vectorization}
has heretofore eluded previous authors.
\section{Conclusion} %\label{sec:conclusion}
In the literature, prior matrix treatments of Frobenius theory,
like Theorem \ref{thm:1} and \ref{thm:2}, make use of coupled matrix
recurrence relations that pose considerable difficulties in their implementation.
The new idea outlined in this paper, our vectorization procedure, appears
to provide a rather elegant means to reformulate the matrix recurrence relations
of both Theorem \ref{thm:1} and \ref{thm:2} into equivalent recurrence relations
that require only computation of well-defined matrix products and sums.
It can also be noted that our vectorization procedure seems to generalize
and apply to the third order, fourth order, $\ldots$, $n$\textsuperscript{th}
order Frobenius-type problems of the non-logarithmic cases and near regular
singular points by generating equivalent recurrence relations that require
only matrix multiplications and sums involving $9\times 9$ matrices,
$16\times 16$ matrices, $\cdots$ , $n^2 \times n^2$ matrices respectively.
\subsection*{Acknowledgements}\label{sec:acknowledgements}
Special thanks to Dr. Charles Fulton of the Florida Institute of Technology
for his many comments and suggestions that have greatly improved this
paper's exposition. In particular, in Section \ref{sec:connection}:
Connection to Bessel Functions and Section \ref{sec:history}:
Some History on the Bessel Equation and the Power Series Method, his expertise
and depth of knowledge was very insightful. Also a deep gratitude is
reserved for Jonathan Goldfarb for his judicious editing of a rather
raw \LaTeX manuscript to work it into its present form.
\begin{thebibliography}{10}
\bibitem{abramowitz64} Abramowitz and Stegun;
\emph{{Handbook of Mathematical Functions}}, National
Bureau of Standards, 1964.
\bibitem{agarwal14} Ravi P. Agarwal, Syamal K. Sen;
\emph{Creators of mathematical and
computational sciences}, Springer, 2014.
\bibitem{baker02} H. Baker;
\emph{On the integration of Linear Differential Equations}, Proc.
London Math Society \textbf{s1-35} (1902), no.~1, 333--384.
\bibitem{balser00} W. Balser;
\emph{Formal Power Series and Linear Systems of Meromorphic
Ordinary Differential Equations}, Springer, 2000.
\bibitem{bernoulli38} D.~Bernoulli;
\emph{Theoremata De Oscillationibus corporum filo flexile
connexorum et catenae verticaliter suspensae}, Comm. Acad. Sci. Imp. Petrop.
VI. (1732--33), 108--122, [Published 1738].
\bibitem{bessel24} F. W. Bessel;
\emph{Untersuchung des Theils der planetarischen St{\"o}rungen
welcher aus der Bewegung der Sonne entsteht}, Berliner Abhandlungen (1824),
1--52, [Published 1826].
\bibitem{coddington55} Coddington, Levinson;
\emph{Theory of Ordinary Differential Equations},
McGraw-Hill, N.Y., 1955.
\bibitem{courant24} R.~Courant, D. Hilbert;
\emph{Methoden der Mathematischen Physik, Erster Band},
Springer Verlag, Berlin, 1924 (first edition) 1931 (second edition).
\bibitem{erdelyi55} Erdelyi et. al.;
\emph{Higher Transcendental Functions}, vol.~II, McGraw-Hill, 1955.
\bibitem{euler64} Leonhard Euler;
\emph{De motu vibratorio tympanorum}, Novi Commentarii
academiae scientiarum Petropolitanae (1764).
\bibitem{forsythe85} A. R. Forsythe;
\emph{A Treatise on Differential Equations}, first ed.,
Macmillan and Co. limited, London, 1885, 1888 (second edition), 1903 (third
edition), 1914 (fourth edition), 1921 (fifth edition), 1929 (sixth edition),
Reprinted in 1933, 1943, and 1948.
\bibitem{forsythe02} A. R. Forsythe;
\emph{{Theory of Differential Equations, Part III}}, vol.~IV,
Cambridge University Press, 1902, (Reprinted by Dover, 1959).
\bibitem{frobenius73} G. Frobenius;
\emph{\"Uber die Integration der linearen
Differentialgleichungen durch Reihen}, Jour. f\"ur die reine und
angewandte Mathematik \textbf{76} (1873), 214--235.
\bibitem{fuchs66} L. Fuchs;
\emph{{Zur Theorie der linearen Differentialgleichungen mit
ver{\"a}nderlichen Koeffizienten}}, Jour. f{\"u}r die reine und angewandte
Mathematik \textbf{66} (1866), 121--160.
\bibitem{fuchs68} L. Fuchs;
\emph{Zur Theorie der linearen Differentialgleichungen mit
ver{\"a}nderlichen Koeffizienten}, Jour. f{\"u}r die reine und angewandte
Mathematik \textbf{68} (1868), 354--385.
\bibitem{gantmacher59} Gantmacher;
\emph{{Matrix Theory}}, Chelsea Publishing Company, N.Y., 1959.
\bibitem{gray95} A. Gray, G.~B. Matthews;
\emph{A Treatise on Bessel Functions}, first ed.,
MacMillan and Co., London, 1895.
\bibitem{gray22} A. Gray, T. M. McRobert;
\emph{{A Treatise on Bessel Functions and their
Applications to Physics}}, second ed., MacMillan and Co., London, 1922.
\bibitem{hille69} E. Hille;
\emph{{Lectures on Ordinary Differential Equations}}, Addison-Wesley,
Reading, Mass, 1969.
\bibitem{ince26} E. L. Ince;
\emph{{Ordinary Differential Equations}}, Longmans, Green, and Co.,
London, 1926, (Reprinted by Dover 1956).
\bibitem{kaplan58} W. Kaplan;
\emph{{Ordinary Differential Equations}}, Addison-Wesley Pub. Co.,
Reading, Mass, 1958.
\bibitem{lappo-danilevsky36} J. A. Lappo-Danilevsky;
\emph{Theorie des systemes des equations differentielles lineares},
Chelsea Publishing Company, N.Y., 1936,
(Reprinted 1953).
\bibitem{lommel68} E. C. J. Lommel;
\emph{{Studien {\"u}ber die Bessel'schen Funktionen}}, Druck
und Verlag von B.G. Teubuer, Leipzig, 1868.
\bibitem{neumann67} C. G. Neumann;
\emph{{Theorie der Besselschen Funktionen: Eine Analogen zur
Theorie der Kugelfunktionen}}, B. G. Teubuer, Leipzig, 1867.
\bibitem{nielsen04} N. Nielsen;
\emph{Handbuch der Theorie der Zylinderfunktionen}, B. G. Teubuer, Leipzig, 1904.
\bibitem{olver10}
F. W. J. Olver et al.;
\emph{NIST Handbook of Mathematical Functions},
National Institute of Standards and Technology (US Department of Commerce),
and Cambridge University Press, 2010.
\bibitem{schlesinger95} L. Schlesinger;
\emph{{Handbuch der linearen Differential gleichungen}},
Leipzig, 1895--1898.
\bibitem{schlesinger97} L. Schlesinger;
\emph{Theorie der linearen Differentialgleichungen II}, vol.~1, 1897.
\bibitem{schlesinger22} L. Schlesinger;
\emph{{Einf{\"u}hrung in die Theorie der gew{\"o}hnlichen
Differentialgleichungen auf Funktionentheoretischer Grundlage}}, Dritte
Auflage, Berlin and Leipzig, G. J. G{\"o}schensche Verlagshandlung,, 1922,
(first ed. 1900; second ed. 1904).
\bibitem{watson22} G. N. Watson;
\emph{{A Treatise on the Theory of Bessel Functions}}, second
ed., Cambridge University Press, 1922, (second ed. 1944).
\bibitem{whittaker02} E. T. Whittaker, G. N. Watson;
\emph{A Course of Modern Analysis}, Cambridge University Press, 1902,
(second ed. 1915, third ed. 1920, fourth ed. 1927).
\end{thebibliography}
\section{Addendum posted on May 11, 2017}
The author wants to correct the two $4\times 4$ matrices
$M_1$ and $M_n$ appearing in equations \eqref{5.7} and \eqref{5.9},
respectively.
They were printed with typos in the 4th row, 4th column.
These matrices should read:
\begin{gather}
M_1 =\begin{bmatrix}
r_1+1-D_0 & 0 & -E_0 & 0\\
0 & r_2+1-D_0 & 0 & -E_0\\
-F_0 & 0 & r_1+1-G_0 & 0\\
0 & -F_0 & 0 & r_2+1-G_0
\end{bmatrix},
\\
M_n=\begin{bmatrix}
r_1+n-D_0 & 0 & -E_0 & 0\\
0 & r_2+n-D_0 & 0 & -E_0\\
-F_0 & 0 & r_1+n-G_0 & 0\\
0 & -F_0 & 0 & r_2+n-G_0
\end{bmatrix}.
\end{gather}
End of addendum
\end{document}