\documentclass[reqno]{amsart} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2001(2001), No. 28, pp. 1--19.\newline ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu \newline ftp ejde.math.swt.edu (login: ftp)} \thanks{\copyright 2001 Southwest Texas State University.} \vspace{1cm}} \begin{document} \title[\hfilneg EJDE--2001/28\hfil Heat polynomial analogs ] {Heat polynomial analogs for higher order evolution equations} \author[G. N. Hile \& A. Stanoyevitch\hfil EJDE--2001/28\hfilneg] {G. N. Hile \& Alexander Stanoyevitch} \address{Gerald N. Hile \hfill\break Department of Mathematics, University of Hawaii, Honolulu, HI 96822, USA} \email{hile@hawaii.edu} \address{Alexander Stanoyevitch \hfill\break Department of Mathematics, University of Guam, UOG Station, Mangilao, GU 96923, USA} \email{alex@math.hawaii.edu} \date{} \thanks{Submitted February 20, 2001. Published May 2, 2001.} \subjclass[2000]{35C10, 35K25, 35C05, 35K30} \keywords{heat polynomials, polynomial solutions, evolution equations} \begin{abstract} Polynomial solutions analogous to the heat polynomials are demonstrated for higher order linear homogeneous evolution equations with coefficients depending on the time variable. Further parallels with the heat polynomials are established when the equation is parabolic with constant coefficients and only highest order terms. \end{abstract} \maketitle \newtheorem{theorem}{Theorem} \newtheorem{corollary}{Corollary} \newtheorem{example}{Example} \newtheorem{proposition}{Proposition} \newtheorem{remark}{Remark} \numberwithin{equation}{section} \section{Introduction} We consider a linear differential operator $\mathcal{L}$, represented by \begin{equation} \mathcal{L}u\left( x,t\right) =\partial_{t}u\left( x,t\right) -\sum_{\alpha}a_{\alpha}(t)\partial_{x}^{\alpha}u\left( x,t\right) \,. \label{intop1} \end{equation} Here $x\in\mathbb{R}^{n}$, $t\in\mathbb{R}$, and the coefficients $\left\{ a_{\alpha}\right\} $ are real valued continuous functions of $t$ on an interval $\mathbb{I}$ containing the origin. We develop explicit formulas for real valued solutions $\{ p_\beta \}$ of the initial value problems \begin{equation} \mathcal{L}p_{\beta}(x,t)=0\,,\quad p_{\beta}(x,0)=x^{\beta }\,. \label{intcond} \end{equation} Each $p_{\beta}$ is for fixed $t$ a polynomial in $x$ of degree $\left| \beta\right|$, of the form \[ p_{\beta}(x,t)=\sum_{\nu\leq\beta}c_{\nu}(t)x^{\nu}\,, \] where each coefficient $c_{\nu}$ is a real valued function in $C^{1}\left( \mathbb{I}\right) $. Moreover, when the coefficients $\left\{ a_{\alpha }\right\} $ are constant and $\mathcal{L}$ has no zero order term, $p_{\beta }$ is a polynomial in both $x$ and $t$. Our polynomials $\{ p_\beta \}$ are direct analogs of the classical \textit{heat polynomials}, as introduced by Rosenbloom and Widder \cite{RW1}, and described further by Widder in \cite{WID1,WID2,WID3}. Indeed, when $\mathcal{L}$ is the heat operator, $\mathcal{H}=u_{t}-\Delta u$, our polynomials become these heat polynomials. As Rosenbloom and Widder demonstrated, heat polynomials can serve as a basis for expansion of other solutions of the heat equation. The initial condition $p_{\beta} (x,0)=x^{\beta}$ is particularly convenient in approximating initial data of solutions with intial data of linear combinations of heat polynomials. We introduce the polynomials $\{p_\beta (x,t)\}$ as the coefficients in the expansion in powers of $z$ of a generating function $G(x,t,z)$, and we use this expansion to develop a few of their properties. The generating function has an explicit representation in terms of the coefficients of the operator $\mathcal{L}$. We establish also a general recursion formula for the polynomials, and we show that every polynomial solves a prescribed partial differential equation involving only space derivatives. These developments, direct generalizations of results of Rosenbloom and Widder, require no assumption of parabolicity on the operator $\mathcal{L}$. Rosenbloom and Widder introduced also so-called \textit{associate functions} of heat polynomials, comprising a \textit{biorthogonal family} to the heat polynomials. These associate functions are used to determine the coefficients of the expansion in an infinite strip of a heat function in a series of heat polynomials. We have analogs also of these associate functions, in the special case that $\mathcal{L}$ is a parabolic operator with constant coefficients and only highest order terms. The associate functions are defined in terms of the derivatives of the fundamental solution of $\mathcal{L}$, and the fundamental solution can be expanded in a series of these functions. We confirm a biorthogonality relation between the polynomials $\{p_\beta\}$ and their associates, thereby laying the groundwork for further study concerning expansions of solutions of higher order parabolic equations in terms of the polynomials $\{p_\beta \}$. As also demonstrated by Rosenbloom and Widder, the heat polynomials in one space dimension can be described in terms of Hermite polynomials. In the last section we introduce a family of polynomials of a variable $x$ in $\mathbb{R}^{n}$, having a relation to our polynomials $\{ p_\beta\}$ analogous to that between Hermite polynomials and heat polynomials. There have been a number of generalizations of heat polynomials for operators besides the heat operator. For example, Kemnitz \cite{KEM1} presented such polynomials for the equation in one space dimension, \[ \frac{\partial u}{\partial t}=\frac{\partial^{r}u}{\partial x^{r} }\quad \text{(}r\geq2\text{)\ \ ,} \] and discussed series expansions in terms of these polynomials. Our polynomials $\{ p_\beta \}$ specialize to those of Kemnitz, as we point out in a later example. Haimo and Markett \cite{HM1,HM2} studied a closely related equation, the so-called \textit{higher order heat equation} (again in one space variable $x $), \[ \frac{\partial}{\partial t}u(x,t)=\left( -1\right) ^{q+1}\frac{\partial ^{2q}}{\partial x^{2q}}u(x,t)\,. \] They introduced polynomial solutions of this equation, as well as associate functions, confirmed a biorthogonality relation, and in a meticulous study established conditions under which a solution of the higher order heat equation can be expanded in an infinite strip in a series of higher order heat polynomials. They also suggested a generalization of Hermite polynomials to their higher order situation. When our operator $\mathcal{L}$ is specialized to the operator of Haimo and Markett, our polynomials $\{p_\beta\}$ become their polynomials $\{p_{n,q}\}$. In this paper we borrow liberally from the methods of Rosenbloom and Widder, as well as those of Kemnitz, Haimo and Markett. The \textit{generalized heat equation,} or \textit{radial heat equation}, \begin{equation} \frac{\partial u(r,t)}{\partial t}=\frac{\partial^{2}u(r,t)}{\partial r^{2} }+\frac{2\nu}{r}\frac{\partial u}{\partial r}\quad \text{,} \label{genheateqn} \end{equation} has been investigated by Bragg \cite{BRA1} and more extensively by Haimo \cite{HAI0,HAI1,HAI2,HAI4,HAI5,HAI6}. (In the case $2\nu=n-1$, the right side of the equation is the $n$-dimensional Laplace operator in radial coordinates.) These authors defined generalized heat polynomials for this equation, along with associated Appell transforms of the polynomials, and studied series representations of solutions of the equation in terms of these specialized solutions. Cholewinski and Haimo \cite{CH1} developed a parallel theory for the equation \[ \frac{\partial u(x,t)}{\partial t}=xu_{xx}(x,t)+\left( \alpha+1-x\right) u_{x}(x,t)\,, \] and in \cite{HAI3} Haimo extended some aspects of the theory to an analogous equation in $n$ space variables, \[ \frac{\partial u(x,t)}{\partial t}=\Delta_{n}u(x,t)+\sum_{i=1}^{n}\frac{2\nu }{x_{i}}\frac{\partial u(x,t)}{\partial x_{i}}\,. \] Fitouhi \cite{FIT1} extended the theory of heat polynomials to a slightly more general version of (\ref{genheateqn}), \[ u_{t}(x,t)=u_{xx}(x,t)+\left( \frac{2\nu}{x}+\frac{B^{\prime}(x)} {B(x)}\right) u_{x}(x,t)\,, \] with $B$ a suitable analytic function. As the coefficients of these ``generalized heat equations'' depend on the space variable $x$, our theory does not directly apply to them. Lo \cite{LO1} found analogs of the heat polynomials, called \textit{generalized Helmholtz polynomials}, for a perturbed version of the heat equation in one space variable, \begin{equation} \frac{\partial u}{\partial t}=\frac{\partial^{2}u}{\partial x^{2}} +\varepsilon^{2}\frac{\partial^{2}u}{\partial t^{2}}\,. \label{loeqn} \end{equation} Lo's polynomials have properties much like those of the ordinary heat polynomials, and when $\varepsilon=0$ they reduce to the heat polynomials. Lo gave some conditions under which solutions of (\ref{loeqn}) in the upper half plane can be expanded in a series of generalized Helmholtz polynomials. As (\ref{loeqn}) does not quite fit into our format, likewise our theory does not apply to this equation. Finally, we want to mention our forthcoming paper \cite{HS} in which we continue development of the theory of generalized heat polynomials. In this work we derive pointwise upper bounds on the polynomials, and use these to expand solutions of Cauchy problems for higher order evolution equations in series of polynomial solutions. \section{Polynomial Solutions} We consider a linear differential operator $\mathcal{L}$, defined by \begin{align} \mathcal{L}u\left( x,t\right) & =u_{t}\left( x,t\right) -\sum_{\alpha }a_{\alpha}(t)\partial_{x}^{\alpha}u\left( x,t\right) \label{oper1}\\ & =\frac{\partial u\left( x,t\right) }{\partial t}-Q\left( t,\partial _{x}\right) u\left( x,t\right) \,,\nonumber \end{align} where $Q(t,z)$ is the function \begin{equation} Q(t,z)=\sum_{\alpha}a_{\alpha}(t)\;z^{\alpha}\,. \label{defq} \end{equation} All functions are real valued, and $x\in\mathbb{R}^{n}$ ($n\geq1$), $t\in\mathbb{R}$. Also, $z\in\mathbb{R}^{n}$ and, given a multi-index $\alpha$ in $\mathbb{R}^{n}$, the derivative $\partial_{x}^{\alpha}$ and power $z^{\alpha}$ have the usual definitions \[ \partial_{x}^{\alpha}:=\frac{\partial^{\left| \alpha\right| }}{\partial x_{1}^{\alpha_{1}}\partial x_{2}^{\alpha_{2}}\cdots\partial x_{n}^{\alpha_{n} }}\,,\quad z^{\alpha}:=z_{1}^{\alpha_{1}}z_{2}^{\alpha_{2}}\cdots z_{n}^{\alpha_{n}}\,. \] The summation over $\alpha$ is assumed finite -- that is, $a_{\alpha}\equiv0$ except for a finite number of multi-indices $\alpha$. Each coefficient $a_{\alpha}$ is a continuous function of $t$ on an interval $\mathbb{I}$ containing the origin in $\mathbb{R}$. Initially we impose no further condition on $\mathcal{L}$. As the statement of our main result requires first some explaining of notation, and because the notation is motivated only as the proof proceeds, we reverse the usual procedure and prove our result before stating it. We introduce a generating function $G$ to produce functions $p_{\beta }=p_{\beta}(x,t)$, indexed by multi-indices $\beta\in\mathbb{R}^{n}$ and defined for all $x\in\mathbb{R}^{n}$ and $t\in\mathbb{I}$, solving the initial value problem \begin{equation} \mathcal{L}p_{\beta}=0\,,\quad p_{\beta}(x,0)=x^{\beta}\,. \label{IVP} \end{equation} First we define antiderivatives $b_{\alpha}$ of the coefficients $a_{\alpha}$ according to \begin{equation} b_{\alpha}(t):=\int_{0}^{t}a_{\alpha}(s)\,ds\,,\;t\in\mathbb{I}\,, \label{antideriv} \end{equation} and define $R=R(t,z)$, for $t\in\mathbb{I}$ and $z\in\mathbb{R}^{n}$, as \begin{equation} R(t,z)=\sum_{\alpha}b_{\alpha}(t)\;z^{\alpha}=\int_{0}^{t}Q(s,z)\,ds\,. \label{Rdef} \end{equation} Note that $b_{\alpha}{}^{\prime}(t)=a_{\alpha}(t)$, $b_{\alpha}(0)=0$, and \[ \frac{\partial}{\partial t}R(t,z)=Q(t,z)\,,\quad R(0,z)=0\,. \] Our generating function $G$ then is defined as \begin{equation} G(x,t,z):=e^{x\cdot z}e^{R(t,z)}\,, \label{gendef} \end{equation} where $x\cdot z$ is the usual dot product of vectors in $\mathbb{R}^{n}$. Observe that, for each fixed $z$, the function $G(\cdot,\cdot,z)$ is a solution in $\mathbb{R}^{n}\times\mathbb{I}$ of $\mathcal{L}G=0$; indeed, we have the simple calculation \begin{align*} Q\left( t,\partial_{x}\right) G(x,t,z) & =\sum_{\alpha}a_{\alpha }(t)\partial_{x}^{\alpha}G(x,t,z)=\sum_{\alpha}a_{\alpha}(t)z^{\alpha }G(x,t,z)\\ & =Q(t,z)G(x,t,z)=\frac{\partial}{\partial t}G(x,t,z). \end{align*} We will expand $G$ in a convergent power series of the form \begin{equation} G(x,t,z)=\sum_{\beta}p_{\beta}(x,t)\frac{z^{\beta}}{\beta!}\quad ; \label{genseries} \end{equation} then termwise differentiation (which we will justify) yields \begin{equation} 0=\mathcal{L}G(x,t,z)=\sum_{\beta}\mathcal{L}p_{\beta}(x,t)\frac{z^{\beta} }{\beta!}\,, \label{verpolyeqn} \end{equation} and it follows that each $p_{\beta}$ solves $\mathcal{L}p_{\beta}=0$. Moreover, from (\ref{genseries}) and (\ref{gendef}) we have \[ \sum_{\beta}p_{\beta}(x,0)\frac{z^{\beta}}{\beta!}=G(x,0,z)=e^{x\cdot z}e^{R(0,z)}=e^{x\cdot z}=\sum_{\beta}\frac{x^{\beta}z^{\beta}}{\beta!}\,, \] and hence $p_{\beta}(x,0)=x^{\beta}$ for each multi-index $\beta$. For a multi-index $\beta$ we stipulate $\left| \beta\right| =\beta _{1}+\cdots+\beta_{n}$, $\beta!=\beta_{1}!\cdots\beta_{n}!$, and for another multi-index $\gamma$ of the same dimension we say $\gamma\leq\beta$, or $\beta\geq\gamma$, whenever $\gamma_{i}\leq\beta_{i}$ for each $i$. Beginning with (\ref{gendef}), we have the expansion \begin{equation} G(x,t,z)=\left( \sum_{\gamma}\frac{x^{\gamma}z^{\gamma}}{\gamma!}\right) \left( \sum_{j=0}^{\infty}\frac{R(t,z)^{j}}{j!}\right) \,. \label{expG1} \end{equation} We let $K$ denote the number of indices $\alpha$ in the summation (\ref{defq}) corresponding to nonzero $a_{\alpha}$, so that we may label these indices as $\alpha^{1},\alpha^{2},\cdots,\alpha^{K}$, the corresponding coefficients $\left\{ a_{\alpha}\right\} $ as $a_{1},a_{2},\cdots,a_{K}$, and the corresponding antiderivatives $\left\{ b_{\alpha}\right\} $ as $b_{1} ,b_{2},\cdots,b_{K}$. We recall the general multinomial formula \begin{equation} \left( c_{1}+c_{2}+\cdots+c_{K}\right) ^{j}=\sum_{\left| \sigma\right| =j}\frac{j!}{\sigma!}c^{\sigma}\,, \label{multiexp} \end{equation} where $c:=\left( c_{1},\cdots,c_{K}\right) $, and $\sigma=\left( \sigma _{1},\cdots,\sigma_{K}\right) $ represents a multi-index of length $K$. To economize notation, we introduce vectors \[ a=\left( a_{1},a_{2},\cdots,a_{K}\right) \;\,,\quad \;b=\left( b_{1},b_{2},\cdots,b_{K}\right) \,, \] and further define a \textit{vector of multi-indices} $\overline{\alpha}$ as \[ \overline{\alpha}=\left( \alpha^{1},\alpha^{2},\cdots,\alpha^{K}\right) \,. \] The dot product $\overline{\alpha}\cdot\sigma$ we define as the multi-index (in $\mathbb{R}^{n}$) \[ \overline{\alpha}\cdot\sigma:=\sum_{k=1}^{K}\alpha^{k}\sigma_{k}\,. \] We then apply (\ref{multiexp}) to (\ref{Rdef}) to compute \begin{align*} R(t,z)^{j} & =\left[ b_{1}(t)z^{\alpha^{1}}+b_{2}(t)z^{\alpha^{2}} +\cdots+b_{K}(t)z^{\alpha^{K}}\right] ^{j}\\ & =\sum_{\left| \sigma\right| =j}\frac{j!}{\sigma!}\left( b_{1} (t)z^{\alpha^{1}}\right) ^{\sigma_{1}}\left( b_{2}(t)z^{\alpha^{2}}\right) ^{\sigma_{2}}\cdots\left( b_{K}(t)z^{\alpha^{K}}\right) ^{\sigma_{K}}\\ & =\sum_{\left| \sigma\right| =j}\frac{j!}{\sigma!}b(t)^{\sigma }z^{\overline{\alpha}\cdot\sigma}\;, \end{align*} and substitute this formula into (\ref{expG1}) to arrive at \begin{align} G(x,t,z) & =\left( \sum_{\gamma}\frac{x^{\gamma}z^{\gamma}}{\gamma !}\right) \left( \sum_{j=0}^{\infty}\sum_{\left| \sigma\right| =j} \frac{b(t)^{\sigma}z^{\overline{\alpha}\cdot\sigma}}{\sigma!}\right) \nonumber\\ & =\sum_{\gamma}\sum_{\sigma}\frac{x^{\gamma}b(t)^{\sigma}z^{\gamma +\overline{\alpha}\cdot\sigma}}{\gamma!\sigma!}\,. \label{expG2} \end{align} Note that the first summation in (\ref{expG2}) is over all multi-indices $\gamma$ in $\mathbb{R}^{n}$, while the second is over all multi-indices $\sigma$ in $\mathbb{R}^{K}$. As exponential series converge everywhere, there is no trouble with pointwise convergence of this iterated double summation. However, in order to justify rearrangements and termwise differentiations of this series, we must investigate absolute and uniform convergence. Applying estimates of the form $\left| x^{\alpha}\right| \leq\left| x\right| ^{\left| \alpha\right| }$, for each term of (\ref{expG2}) we obtain the bound \[ \left| \frac{x^{\gamma}b(t)^{\sigma}z^{\gamma+\overline{\alpha}\cdot\sigma} }{\gamma!\sigma!}\right| \leq\frac{\left| x\right| ^{\left| \gamma\right| }\left| b(t)\right| ^{\left| \sigma\right| }\left| z\right| ^{\left| \gamma+\overline{\alpha}\cdot\sigma\right| }}{\gamma!\sigma!}\,, \] where $b(t)$, because of our continuity assumption on the coefficients $\left\{ a_{\alpha}\right\} $, is bounded on compact subsets of $\mathbb{I} $. We let $\ell=\max_{\alpha}\left| \alpha\right| $ denote the degree of the operator $\mathcal{L}$, so that \[ \left| \overline{\alpha}\cdot\sigma\right| =\sum_{k=1}^{K}\left| \alpha ^{k}\right| \sigma_{k}\leq\ell\sum_{k=1}^{K}\sigma_{k}=\ell\left| \sigma\right| \,, \] \[ \left| z\right| ^{\left| \gamma+\overline{\alpha}\cdot\sigma\right| }=\left| z\right| ^{\left| \gamma\right| +\left| \overline{\alpha} \cdot\sigma\right| }=\left| z\right| ^{\left| \gamma\right| }\left| z\right| ^{\left| \overline{\alpha}\cdot\sigma\right| }\leq\left| z\right| ^{\left| \gamma\right| }\left( 1+\left| z\right| \right) ^{\ell\left| \sigma\right| }\,. \] Using all these estimates in (\ref{expG2}), along with the general formula \[ \sum_{\beta}\frac{s^{\left| \beta\right| }}{\beta!}=e^{ns}\quad \text{(} s\in\mathbb{R}\text{\textit{, }}\beta\text{ a multi-index in }\mathbb{R} ^{n}\text{),} \] we find that \begin{align} & \sum_{\gamma}\sum_{\sigma}\left| \frac{x^{\gamma}b(t)^{\sigma} z^{\gamma+\overline{\alpha}\cdot\sigma}}{\gamma!\sigma!}\right| \label{bound} \\ & \leq\sum_{\gamma}\frac{\left| x\right| ^{\left| \gamma\right| }\left| z\right| ^{\left| \gamma\right| }}{\gamma!}\sum_{\sigma}\frac{\left| b(t)\right| ^{\left| \sigma\right| }\left( 1+\left| z\right| \right) ^{\ell\left| \sigma\right| }}{\sigma!}\nonumber\\ & =e^{n\left| x\right| \left| z\right| }e^{K\left| b(t)\right| \left( 1+\left| z\right| \right) ^{\ell}}\,,\nonumber \end{align} with convergence uniform when $\left( x,t,z\right) $ is restricted to any compact subset of $\mathbb{R}^{n}\times\mathbb{I}\times\mathbb{R}^{n}$. We also want to justify differentiating termwise the series (\ref{expG2}), arbitrarily with respect to $x$ and once with respect to $t$. Given a multi-index $\tau$ in $\mathbb{R}^{n}$, the formal derivative $\partial _{x}^{\tau}$ of the series is \[ \partial_{x}^{\tau}G(x,t,z)=\sum_{\gamma\geq\tau}\sum_{\sigma}\frac {x^{\gamma-\tau}b(t)^{\sigma}z^{\gamma+\overline{\alpha}\cdot\sigma}}{\left( \gamma-\tau\right) !\sigma!}=z^{\tau}\sum_{\nu}\sum_{\sigma}\frac{x^{\nu }b(t)^{\sigma}z^{\nu+\overline{\alpha}\cdot\sigma}}{\nu!\sigma!}\,. \] Obviously a bound similar to (\ref{bound}) applies to this series, so termwise differentiation is justified. The formal derivative of (\ref{expG2}) with respect to $t$ is \begin{equation} \frac{\partial}{\partial t}G(x,t,z)=\sum_{\gamma}\sum_{\sigma}\frac{x^{\gamma }z^{\gamma+\overline{\alpha}\cdot\sigma}}{\gamma!\sigma!}\frac{\partial }{\partial t}b(t)^{\sigma}\,, \label{timederiv} \end{equation} with \[ \frac{\partial}{\partial t}b(t)^{\sigma}=\frac{\partial}{\partial t}\left[ b_{1}(t)^{\sigma_{1}}b_{2}(t)^{\sigma_{1}}\cdots b_{K}(t)^{\sigma_{K}}\right] =\sum_{k=1}^{K}\sigma_{k}a_{k}(t)b(t)^{\sigma-e_{k}}\,, \] where $e_{k}$ represents the unit multi-index having $1$ in the $k$-th position. With estimates like those in (\ref{bound}) we find that (\ref{timederiv}) is majorized by \[ \sum_{k=1}^{K}\sum_{\gamma}\sum_{\sigma\geq e_{k}}\frac{\left| x\right| ^{\left| \gamma\right| }\left| z\right| ^{\left| \gamma\right| }\left( 1+\left| z\right| \right) ^{\ell\left| \sigma\right| }}{\gamma!\sigma !}\sigma_{k}\left| a(t)\right| \left| b(t)\right| ^{\left| \sigma -e_{k}\right| }\,, \] where we may sum over $\sigma\geq e_{k}$ since $\sigma_{k}=0$ otherwise. When we substitute $\sigma=\nu+e_{k}$ this series becomes \begin{align*} & \sum_{k=1}^{K}\sum_{\gamma}\sum_{\nu}\frac{\left| x\right| ^{\left| \gamma\right| }\left| z\right| ^{\left| \gamma\right| }\left( 1+\left| z\right| \right) ^{\ell\left( \left| \nu\right| +1\right) }}{\gamma !\nu!}\left| a(t)\right| \left| b(t)\right| ^{\left| \nu\right| }\\ & =K\left| a(t)\right| \left( 1+\left| z\right| \right) ^{\ell }e^{n\left| x\right| \left| z\right| }e^{K\left| b(t)\right| \left( 1+\left| z\right| \right) ^{\ell}}\,, \end{align*} again with convergence uniform on compact subsets of $\mathbb{R} ^{n}\mathbb{\times I\times R}^{n}$. Thus we may differentiate termwise once with respect to $t$ in (\ref{expG2}). By Fubini's theorem for double series, we may rearrange the summation in (\ref{expG2}), summing on the outside over powers $z^{\beta}$, to rewrite the series as% \[ G(x,t,z)=\sum_{\beta}z^{\beta}\sum_{\gamma,\sigma:\gamma+\overline{\alpha }\cdot\sigma=\beta}\frac{x^{\gamma}b(t)^{\sigma}}{\gamma!\sigma!}=\sum_{\beta }p_{\beta}(x,t)\frac{z^{\beta}}{\beta!}\;\;\;, \] where the functions $\{ p_\beta \}$ are defined as \begin{equation} p_{\beta}(x,t):=\sum_{\gamma,\sigma:\gamma+\overline{\alpha}\cdot\sigma=\beta }\frac{x^{\gamma}b(t)^{\sigma}}{\gamma!\sigma!}\,. \label{firstpoly} \end{equation} (Recall that $\gamma$ ranges over multi-indices in $\mathbb{R}^{n}$ and $\sigma$ over multi-indices in $\mathbb{R}^{K}$.) Now, the only way that $\gamma+\overline{\alpha}\cdot\sigma=\beta$ is possible is that $\overline {\alpha}\cdot\sigma\leq\beta$ and $\gamma=\beta-\overline{\alpha}\cdot\sigma$, and for any such $\sigma$ there is only one corresponding $\gamma$. Thus we may rewrite (\ref{firstpoly}) as a sum over all multi-indices $\sigma$ in $\mathbb{R}^{K}$, obtaining the alternative formulation \begin{equation} p_{\beta}(x,t):=\beta!\sum_{\overline{\alpha}\cdot\sigma\leq\beta} \frac{x^{\beta-\overline{\alpha}\cdot\sigma}b(t)^{\sigma}}{\sigma!\left( \beta-\overline{\alpha}\cdot\sigma\right) !}\,. \label{polyform1} \end{equation} Thus we have derived (\ref{genseries}), and justified as well termwise differentiation in (\ref{verpolyeqn}). All series representations are valid for $(x,t,z)\in\mathbb{R}^{n}\times\mathbb{I\times R}^{n}$. If the operator $\mathcal{L}$ of (\ref{oper1}) contains a \textit{zero order term} -- that is, if $a_{\alpha}$ appears corresponding to $\alpha=\left( 0,\cdots,0\right) $ -- then the summation over $\sigma$ in (\ref{polyform1}) will have an infinite number of terms, as there will be an infinite number of multi-indices $\sigma$ in $\mathbb{R}^{K}$ with $\overline{\alpha}\cdot \sigma\leq\beta$. However, if $\mathcal{L}$ has no zero order term then the number of such $\sigma$ is finite, and the summation in (\ref{polyform1}) is a finite one. In either case, each $p_{\beta}$ is a polynomial in $x$ with coefficients depending on $t$, as there is a finite number of powers $x^{\tau }$ with $\tau\leq\beta$. Note that the leading term in (\ref{polyform1}), corresponding to $\sigma=\left( 0,\cdots,0\right) $, is $x^{\beta}$; thus the degree of $p_{\beta}$ as a polynomial in $x$ is $\left| \beta\right| $. We summarize formally our deliberations thus far: \begin{theorem} \label{thm1}Let $\mathcal{L}$ be the operator \begin{equation} \mathcal{L}=\frac{\partial}{\partial t}-\sum_{\alpha}a_{\alpha}(t)\partial _{x}^{\alpha}=\frac{\partial}{\partial t}-\sum_{k=1}^{K}a_{k}(t)\partial _{x}^{\alpha^{k}}\,, \label{thm1op} \end{equation} where each $a_{\alpha}$ is continuous on an interval $\mathbb{I}$ containing the origin. Set \[ a=\left( a_{1},a_{2},\cdots,a_{K}\right) \,,\quad b(t)=\int _{0}^{t}a(s)\,ds\,. \] Then the functions $p_{\beta}$, indexed over multi-indices $\beta$ in $\mathbb{R}^{n}$ and defined by (\ref{polyform1}), are polynomials in $x$ of degree $\left| \beta\right| $, and satisfy on $\mathbb{R}^{n}\times \mathbb{I}$ the equation $\mathcal{L}p_{\beta}=0$ as well as the initial condition $p_{\beta}(x,0)=x^{\beta}$. Moreover, the series (\ref{polyform1}) for $p_{\beta}$ is uniformly absolutely convergent on any compact subset of $\mathbb{R}^{n}\times\mathbb{I}$, as are the differentiated series $\partial_{t}p_{\beta}$ and $\partial_{x}^{\tau}p_{\beta}$ for all $\tau$. If $\mathcal{L}$ has no zero order term, then the summation in (\ref{polyform1}) is over only a finite number of indices $\sigma$, and so the sum has a finite number of terms. \end{theorem} If the coefficients $a_{\alpha}$ are constant -- that is, independent of $t$ -- then $Q(t,z)$ and $R(t,z)$ simplify to \[ Q(t,z)=Q(z)=\sum_{\alpha}a_{\alpha}z^{\alpha} \,,\quad R(t,z)=tQ(z)=t\sum_{\alpha}a_{\alpha}z^{\alpha}\,, \] while the formula (\ref{gendef}) for the generating function becomes \begin{equation} G(x,t,z)=e^{x\cdot z}e^{tQ(z)}\,. \label{genconst} \end{equation} Moreover, in this case we have \[ b(t)=\int_{0}^{t}a\,ds=at\;\,,\quad b(t)^{\sigma}=a^{\sigma}t^{\left| \sigma\right| }\,, \] so that (\ref{polyform1}) becomes \begin{equation} p_{\beta}(x,t):=\beta!\sum_{\overline{\alpha}\cdot\sigma\leq\beta} \frac{a^{\sigma}x^{\beta-\overline{\alpha}\cdot\sigma}t^{\left| \sigma\right| }}{\sigma!\left( \beta-\overline{\alpha}\cdot\sigma\right) !}\,. \label{polyform2} \end{equation} If furthermore $\mathcal{L}$ has no zero order term, then this sum is over a finite number of indices $\sigma$ and consequently $p_{\beta}$ is a polynomial in both $x$ and $t$. In this situation the degree of $p_{\beta}$ as a polynomial in $\left( x,t\right) $ is again $\left| \beta\right| $, as the degree of the general term in (\ref{polyform2}) will have the upper bound \begin{align*} degree\;\left( x^{\beta-\overline{\alpha}\cdot\sigma}t^{\left| \sigma\right| }\right) & =\left| \beta\right| -\left| \overline{\alpha }\cdot\sigma\right| +\left| \sigma\right| =\left| \beta\right| -\sum_{k=1}^{K}\left| \alpha^{k}\right| \sigma_{k}+\left| \sigma\right| \\ & \leq\left| \beta\right| -\sum_{k=1}^{K}\sigma_{k}+\left| \sigma\right| =\left| \beta\right| \,. \end{align*} \begin{corollary} If the coefficients $a_{\alpha}$ are constant, so that \[ \mathcal{L}=\frac{\partial}{\partial t}-\sum_{\alpha}a_{\alpha}\partial _{x}^{\alpha}\,, \] then the formula for $p_{\beta}$ reduces to (\ref{polyform2}). In particular, if $\mathcal{L}$ has constant coefficients and no zero order term, then each function $p_{\beta}$ is a polynomial in $x$ and $t$, of degree $\left| \beta\right| $. \end{corollary} \begin{example} Consider the heat operator in $n$ space variables, \[ \mathcal{H}=\frac{\partial}{\partial t}-\Delta_{x}=\frac{\partial}{\partial t}-\sum_{k=1}^{n}\frac{\partial^{2}}{\partial x_{k}^{2}}=\frac{\partial }{\partial t}-Q\left( \partial_{x}\right) \,, \] where in this instance \[ Q(t,z)=Q(z)=\sum_{i=1}^{n}z_{i}^{2}=\sum_{i=1}^{n}z^{2e_{i}}\,, \] with $e_{i}$ the $i$-th unit multi-index. For $\sigma=\left( \sigma _{1},\cdots,\sigma_{n}\right) $ we have \[ \overline{\alpha}\cdot\sigma=2\sigma_{1}e_{1}+\cdots+2\sigma_{n}e_{n} =2\sigma\,, \] while $a^{\sigma}=\left( 1,\cdots,1\right) ^{\sigma}=1$. Thus formula (\ref{polyform2}) simplifies to \[ p_{\beta}(x,t)=\beta!\sum_{2\sigma\leq\beta}\frac{x^{\beta-2\sigma}t^{\left| \sigma\right| }}{\sigma!\left( \beta-2\sigma\right) !}\,. \] These are the classical heat polynomials of Rosenbloom and Widder \cite{RW1}. (These authors used a less compact notation for these polynomials -- see \cite{HIM1} for a transition to the multi-index notation.) \end{example} \begin{example} Kemnitz \cite{KEM1} introduced an analog of heat polynomials for a higher order version of the one-dimensional heat equation, \[ \frac{\partial u}{\partial t}-\frac{\partial^{r}u}{\partial x^{r}}=0\,, \] where $r$ is an integer, $r\geq2$. In this situation there is only one space derivative $\partial^{\alpha}$, with $\alpha=re_{1}$ and corresponding coefficient $a_{\alpha}=1$. The multi-index $\sigma$ reduces to a scalar, with $\overline{\alpha}\cdot\sigma=r\sigma$ and $a^{\sigma}=1$. Formula (\ref{firstpoly}) for the polynomials $\{ p_\beta \}$, with $b(t)=t$, becomes the formula of Kemnitz, \[ p_{\beta}(x,t):=\sum_{\gamma+r\sigma=\beta}\frac{x^{\gamma}t^{\sigma}} {\gamma!\sigma!}\,, \] where now $\beta$, $\gamma$, and $\sigma$ nonnegative integers. \end{example} \begin{example} Polynomial solutions of the so-called ``higher-order heat equation'', \[ \frac{\partial u}{\partial t}=\left( -1\right) ^{m+1}\frac{\partial^{2m} u}{\partial x^{2m}}\,, \] with $u=u(x,t)$ a function of time $t$ and one space variable $x$, were studied by Haimo and Markett in \cite{HM1,HM2}. Our formula (\ref{polyform2}) yields the same polynomials. The operator of interest is \begin{equation} \mathcal{H}_{m,1}:=\frac{\partial}{\partial t}-\left( -1\right) ^{m+1} \frac{\partial^{2m}}{\partial x^{2m}}\,. \label{hmeqn} \end{equation} Again there is only one space derivative $\partial^{\alpha}$, with $\alpha=2me_{1}$ and corresponding coefficient $a_{\alpha}=\left( -1\right) ^{m+1}$. The multi-index $\sigma$ is a scalar, with $\overline{\alpha} \cdot\sigma=2m\sigma$ and $a^{\sigma}=\left( -1\right) ^{\left( m+1\right) \sigma}$. Formula (\ref{polyform2}) becomes the Haimo-Markett formula \[ p_{\beta}(x,t)=\beta!\sum_{2m\sigma\leq\beta}\left( -1\right) ^{\left( m+1\right) \sigma}\frac{x^{\beta-2m\sigma}t^{\sigma}}{\sigma!\left( \beta-2m\sigma\right) !}\,, \] where now $\beta$ and $\sigma$ are nonnegative integers. \end{example} \begin{example} The analog in $n$ space dimensions of the operator (\ref{hmeqn}) is \begin{equation} \mathcal{H}_{m,n}=\frac{\partial}{\partial t}-\left( -1\right) ^{m+1} \Delta^{m}\,, \label{hmeqnan} \end{equation} with $\Delta$ the Laplace operator in $\mathbb{R}^{n}$. (The factor $\left( -1\right) ^{m+1}$ makes the operator parabolic.) The corresponding function $Q$ of (\ref{defq}) becomes \[ Q(t,z)=Q(z)=\left( -1\right) ^{m+1}\left| z\right| ^{2m}=\left( -1\right) ^{m+1}\left( z_{1}^{2}+\cdots+z_{n}^{2}\right) ^{m}\,. \] Instead of substituting directly into (\ref{polyform2}) to determine the formula for $p_{\beta}$, it is a little cleaner and more efficient to begin again from the expansion (\ref{expG1}). Since the coefficients of this operator are constant, we have $R(t,z)=tQ(z)$, and thus (\ref{expG1}) becomes \[ G(x,t,z)=\left( \sum_{\gamma}\frac{x^{\gamma}z^{\gamma}}{\gamma!}\right) \left( \sum_{j=0}^{\infty}\frac{t^{j}Q(z)^{j}}{j!}\right) \,, \] with \begin{align*} Q(z)^{j} & =\left( -1\right) ^{\left( m+1\right) j}\left( z_{1} ^{2}+\cdots+z_{n}^{2}\right) ^{mj}\\ & =\left( -1\right) ^{\left( m+1\right) j}\sum_{\left| \sigma\right| =mj}\frac{\left( mj\right) !}{\sigma!}\left( z_{1}^{2}\right) ^{\sigma _{1}}\left( z_{2}^{2}\right) ^{\sigma_{2}}\cdots\left( z_{n}^{2}\right) ^{\sigma_{n}}\\ & =\left( -1\right) ^{\left( m+1\right) j}\sum_{\left| \sigma\right| =mj}\frac{\left| \sigma\right| !}{\sigma!}z^{2\sigma}\,, \end{align*} where in this situation $\sigma$ represents a multi-index in $\mathbb{R}^{n} $. Thus we have \begin{align*} G(x,t,z) & =\sum_{\gamma}\frac{x^{\gamma}z^{\gamma}}{\gamma!}\sum _{j=0}^{\infty}\frac{t^{j}}{j!}\left( -1\right) ^{\left( m+1\right) j} \sum_{\left| \sigma\right| =mj}\frac{\left| \sigma\right| !}{\sigma !}z^{2\sigma}\\ & =\sum_{\gamma}\frac{x^{\gamma}}{\gamma!}\sum_{m\mid\left| \sigma\right| }\left( -1\right) ^{\left( m+1\right) \left| \sigma\right| /m} \frac{\left| \sigma\right| !z^{\gamma+2\sigma}t^{\left| \sigma\right| /m} }{\left( \left| \sigma\right| /m\right) !\sigma!}\,. \end{align*} (The notation $m\mid\left| \sigma\right| $ means $m$ divides $\left| \sigma\right| $ evenly.) Summing first over powers $z^{\beta}$, as in our derivation of (\ref{polyform1}), we rewrite this double sum as \[ G(x,t,z)=\sum_{\beta}p_{\beta}(x,t)\frac{z^{\beta}}{\beta!}\,, \] where \[ p_{\beta}(x,t)=\beta!\sum_{2\sigma\leq\beta,m\mid\left| \sigma\right| }\left( -1\right) ^{\left( m+1\right) \left| \sigma\right| /m} \frac{\left| \sigma\right| !x^{\beta-2\sigma}t^{\left| \sigma\right| /m} }{\left( \left| \sigma\right| /m\right) !\sigma!\left( \beta -2\sigma\right) !}\,. \] These polynomials satisfy $\mathcal{H}_{m,n}p_{\beta}=0$, with $p_{\beta }(x,0)=x^{\beta}$. \end{example} \begin{example} We consider a modified heat operator in $n$ space dimensions, \[ \mathcal{L}=\frac{\partial u(x,t)}{\partial t}-\left[ f(t)u(x,t)+\sum _{i=1}^{n}g_{i}(t)\frac{\partial^{2}u(x,t)}{\partial x_{i}^{2}}\right] \;\,, \] where the functions $f$ and $\left\{ g_{i}\right\} $ are real valued and continuous on an interval containing the origin. The function $Q$ for this operator is \[ Q(t,z)=f(t)+\sum_{i=1}^{n}g_{i}(t)z_{i}^{2}=f(t)z^{0}+\sum_{i=1}^{n} g_{i}(t)z^{2e_{i}}\,. \] For $\sigma=\left( \sigma_{0},\sigma_{1},\cdots,\sigma_{n}\right) $ we have \[ \overline{\alpha}\cdot\sigma=\sigma_{0}(0)+2\sigma_{1}e_{1}+2\sigma_{2} e_{2}+\cdots+2\sigma_{n}e_{n}=2\widetilde{\sigma}\,, \] where $\widetilde{\sigma}=\left( \sigma_{1},\cdots,\sigma_{n}\right) $. We set \[ F(t):=\int_{0}^{t}f(s)\,ds\,,\quad G_{i}(t):=\int_{0}^{t} g_{i}(s)\,ds\quad \text{(}i=1,\cdots,n\text{)\ \ ,} \] and $G:=\left( G_{1},\cdots,G_{n}\right) $, so that \[ b(t)^{\sigma}=F(t)^{\sigma_{0}}G(t)^{\widetilde{\sigma}}\,. \] The formula (\ref{polyform1})\ for $p_{\beta}$ becomes \begin{align*} p_{\beta}(x,t) & =\beta!\sum_{\sigma:2\widetilde{\sigma}\leq\beta} \frac{x^{\beta-2\widetilde{\sigma}}F(t)^{\sigma_{0}}G(t)^{\widetilde{\sigma}} }{\sigma!\left( \beta-2\widetilde{\sigma}\right) !}=\sum_{\sigma_{0} =0}^{\infty}\frac{F(t)^{\sigma_{0}}}{\sigma_{0}!}\beta!\sum_{2\widetilde {\sigma}\leq\beta}\frac{x^{\beta-2\widetilde{\sigma}}G(t)^{\widetilde{\sigma} }}{\widetilde{\sigma}!\left( \beta-2\widetilde{\sigma}\right) !}\\ & =e^{F(t)}\beta!\sum_{2\tau\leq\beta}\frac{x^{\beta-2\tau}G(t)^{\tau}} {\tau!\left( \beta-2\tau\right) !}\,, \end{align*} where the last summation, a finite one, is over multi-indices $\tau$ in $\mathbb{R}^{n}$. \end{example} We return to the general operator (\ref{intop1}), with generating function (\ref{gendef}) and expansion (\ref{genseries}). Given any multi-index $\tau$ in $\mathbb{R}^{n}$, from (\ref{gendef}) and (\ref{genseries}) we have \begin{equation} \partial_{x}^{\tau}G(x,t,z)=z^{\tau}G(x,t,z)=\sum_{\nu}p_{\nu}(x,t)\frac {z^{\nu+\tau}}{\nu!}=\sum_{\beta\geq\tau}p_{\beta-\tau}\frac{z^{\beta} }{\left( \beta-\tau\right) !}\,. \label{diffgen1} \end{equation} On the other hand, differentiation of (\ref{genseries}) gives \begin{equation} \partial_{x}^{\tau}G(x,t,z)=\sum_{\beta}\partial_{x}^{\tau}p_{\beta} (x,t)\frac{z^{\beta}}{\beta!}\,. \label{diffgen2} \end{equation} Equating coefficients of powers of $z$ in (\ref{diffgen1}) and (\ref{diffgen2} ), we infer that \begin{equation} \partial_{x}^{\tau}p_{\beta}(x,t)=\left\{ \begin{array} [c]{cc} \frac{\beta!}{\left( \beta-\tau\right) !}p_{\beta-\tau}(x,t)\,, & \text{if }\beta\geq\tau\quad \text{,}\\ 0\,, & \text{otherwise.} \end{array} \right. \label{difformula} \end{equation} When we combine (\ref{difformula}) with the equation \[ 0=\mathcal{L}p_{\beta}(x,t)=\partial_{t}p_{\beta}(x,t)-\sum_{\alpha}a_{\alpha }(t)\partial_{x}^{\alpha}p_{\beta}(x,t)\,, \] we acquire the additional formula \begin{equation} \partial_{t}p_{\beta}(x,t)=\beta!\sum_{\alpha\leq\beta}\frac{a_{\alpha} (t)}{\left( \beta-\alpha\right) !}p_{\beta-\alpha}(x,t)\,, \label{difformula2} \end{equation} with the summation over only those $\alpha$ appearing in $\mathcal{L}$ for which $\alpha\leq\beta$. The next result presents a recursion formula for the polynomials $\left\{ p_{\beta}\right\} $, useful in constructing one such polynomial from lower order ones. \begin{theorem} Let $\mathcal{L}$ be the operator (\ref{intop1}), with associated polynomials $\{ p_\beta \}$. Then for any $p_{\beta}$ and for $1\leq i\leq n$ (with $e_{i}$ the unit multi-index in the $i$-th direction), \begin{equation} \beta_{i}p_{\beta}(x,t)=\beta_{i}x_{i}p_{\beta-e_{i}}(x,t)+\beta!\sum _{\alpha\leq\beta}\frac{\alpha_{i}b_{\alpha}(t)}{\left( \beta-\alpha\right) !}p_{\beta-\alpha}(x,t)\,, \label{recursion} \end{equation} where the summation is taken over multi-indices $\alpha$ appearing in $\mathcal{L}$ such that $\alpha\leq\beta$. (If there are no such multi-indices, the summation is vacuous.) \end{theorem} \begin{proof} If $\beta_{i}=0$ then both sides of (\ref{recursion}) vanish; thus we assume $\beta_{i}>0$. Beginning with (\ref{polyform1}), we compute \begin{align*} x_{i}p_{\beta-e_{i}}(x,t) & =x_{i}\left( \beta-e_{i}\right) !\sum _{\overline{\alpha}\cdot\sigma\leq\beta-e_{i}}\frac{x^{\beta-e_{i} -\overline{\alpha}\cdot\sigma}b(t)^{\sigma}}{\sigma!\left( \beta -e_{i}-\overline{\alpha}\cdot\sigma\right) !}\\ & =\left( \beta-e_{i}\right) !\sum_{\overline{\alpha}\cdot\sigma\leq \beta-e_{i}}\frac{x^{\beta-\overline{\alpha}\cdot\sigma}b(t)^{\sigma}\left( \beta_{i}-\left( \overline{\alpha}\cdot\sigma\right) _{i}\right) } {\sigma!\left( \beta-\overline{\alpha}\cdot\sigma\right) !}\,. \end{align*} Now, if $\overline{\alpha}\cdot\sigma\leq\beta$ then either $\beta_{i}-\left( \overline{\alpha}\cdot\sigma\right) _{i}=0$ or $\overline{\alpha}\cdot \sigma\leq\beta-e_{i}$; thus the last summation may be taken over all $\sigma$ with $\overline{\alpha}\cdot\sigma\leq\beta$. Then, subtracting this expression from (\ref{polyform1}), we find that \begin{align*} & p_{\beta}(x,t)-x_{i}p_{\beta-e_{i}}(x,t)\\ & =\beta!\sum_{\overline{\alpha}\cdot\sigma\leq\beta}\frac{x^{\beta -\overline{\alpha}\cdot\sigma}b(t)^{\sigma}}{\sigma!\left( \beta -\overline{\alpha}\cdot\sigma\right) !}-\left( \beta-e_{i}\right) !\sum_{\overline{\alpha}\cdot\sigma\leq\beta}\frac{x^{\beta-\overline{\alpha }\cdot\sigma}b(t)^{\sigma}\left( \beta_{i}-\left( \overline{\alpha} \cdot\sigma\right) _{i}\right) }{\sigma!\left( \beta-\overline{\alpha} \cdot\sigma\right) !}\\ & =\left( \beta-e_{i}\right) !\sum_{\overline{\alpha}\cdot\sigma\leq\beta }\frac{x^{\beta-\overline{\alpha}\cdot\sigma}b(t)^{\sigma}\left( \overline{\alpha}\cdot\sigma\right) _{i}}{\sigma!\left( \beta-\overline {\alpha}\cdot\sigma\right) !}\\ & =\left( \beta-e_{i}\right) !\sum_{k=1}^{K}\left( \alpha^{k}\right) _{i}\sum_{\overline{\alpha}\cdot\sigma\leq\beta}\frac{x^{\beta-\overline {\alpha}\cdot\sigma}b(t)^{\sigma}\sigma_{k}}{\sigma!\left( \beta -\overline{\alpha}\cdot\sigma\right) !}\,. \end{align*} As the summand in the last expression vanishes if $\sigma_{k}=0$, we may sum over only those $\sigma$ such that $\sigma_{k}>0$, in which case we may write $\sigma=\tau+e_{k}$ for some other multi-index $\tau$. With this substitution we have \[ \overline{\alpha}\cdot\sigma=\overline{\alpha}\cdot\tau+\overline{\alpha}\cdot e_{k}=\overline{\alpha}\cdot\tau+\alpha^{k}\,, \] and thus \[ p_{\beta}(x,t)-x_{i}p_{\beta-e_{i}}(x,t)=\left( \beta-e_{i}\right) !\sum_{k=1}^{K}\left( \alpha^{k}\right) _{i}\sum_{\overline{\alpha}\cdot \tau+\alpha^{k}\leq\beta}\frac{x^{\beta-\overline{\alpha}\cdot\tau-\alpha^{k} }b(t)^{\tau}b_{k}(t)}{\tau!\left( \beta-\overline{\alpha}\cdot\tau-\alpha ^{k}\right) !}\,. \] In the last summation we may sum over only those $k$ for which $\alpha^{k} \leq\beta$, as otherwise the sum is vacuous. Thus we finally arrive at \begin{align*} p_{\beta}(x,t)-x_{i}p_{\beta-e_{i}}(x,t) & =\left( \beta-e_{i}\right) !\sum_{\alpha\leq\beta}\alpha_{i}b_{\alpha}(t)\sum_{\overline{\alpha}\cdot \tau\leq\beta-\alpha}\frac{x^{\beta-\alpha-\overline{\alpha}\cdot\tau }b(t)^{\tau}}{\tau!\left( \beta-\alpha-\overline{\alpha}\cdot\tau\right) !}\\ & =\left( \beta-e_{i}\right) !\sum_{\alpha\leq\beta}\frac{\alpha _{i}b_{\alpha}(t)}{\left( \beta-\alpha\right) !}p_{\beta-\alpha}(x,t)\,, \end{align*} and thereby (\ref{recursion}). \end{proof} \ \ Finally, if we substitute (\ref{difformula}) into (\ref{recursion}) we obtain for $p_{\beta}$ the differential equation \begin{equation} \beta_{i}p_{\beta}(x,t)=x_{i}\frac{\partial}{\partial x_{i}}p_{\beta }(x,t)+\sum_{\alpha}\alpha_{i}b_{\alpha}(t)\partial_{x}^{\alpha}p_{\beta }(x,t)\,. \label{genpde} \end{equation} (We may sum over \textit{all} $\alpha$ appearing in $\mathcal{L}$ since $\partial_{x}^{\alpha}p_{\beta}=0$ whenever $\alpha\leq\beta$ does not hold.) \section{Homogeneous Parabolic Operators} \qquad We specialize to parabolic partial differential operators having constant coefficients, and involving space derivatives only of the highest order $\ell$. The general form is \begin{equation} \mathcal{L}u(x,t)=u_{t}(x,t)-\sum_{\left| \alpha\right| =\ell}a_{\alpha }\partial_{x}^{\alpha}u(x,t)=u_{t}(x,t)-Q(\partial_{x})u(x,t)\,, \label{homogop} \end{equation} where $Q$ is the polynomial \begin{equation} Q(z)=\sum_{\left| \alpha\right| =\ell}a_{\alpha}z^{\alpha}\,. \label{homogq} \end{equation} The parabolicity condition requires that, for all $z\in\mathbb{R}^{n}$ and some $\delta>0$, \begin{equation} \operatorname{Re}\,Q(iz)=\operatorname{Re}\sum_{\left| \alpha\right| =\ell }a_{\alpha}\left( iz\right) ^{\alpha}\leq-\delta\left| z\right| ^{\ell }\,. \label{parcond} \end{equation} This condition dictates that the order $\ell$ of the operator be an even number, as otherwise $\operatorname{Re}\,Q(-iz)=-\operatorname{Re}\,Q(iz)$. According to chapter 9 of the book of Friedman \cite{FRI1}, a \textit{fundamental solution} for $\mathcal{L}$ is given for $x\in \mathbb{R}^{n}$ and$\;t>0$ as the function \begin{equation} K(x,t):=\left( 2\pi\right) ^{-n}\int_{\mathbb{R}^{n}}e^{ix\cdot z} e^{tQ(iz)}\;dz\quad \text{.} \label{fundsol} \end{equation} Condition (\ref{parcond}) ensures that this integral converges absolutely, and that $K$ can be differentiated under the integral arbitrarily with respect to $x$ and $t$. From the formulas \begin{align*} \partial_{x}^{\alpha}K(x,t) & =\left( 2\pi\right) ^{-n}\int_{\mathbb{R} ^{n}}e^{ix\cdot z}\left( iz\right) ^{\alpha}e^{tQ(iz)}\;dz\,,\\ \partial_{t}K(x,t) & =\left( 2\pi\right) ^{-n}\int_{\mathbb{R}^{n} }e^{ix\cdot z}Q(iz)e^{tQ(iz)}\;dz \end{align*} it follows that $\mathcal{L}K=0$ in the upper half-space where $t>0$. Since $\ell$ is even we have $Q(-z)=Q(z)$, and consequently from (\ref{fundsol}) that \begin{equation} K(-x,t)=K(x,t)\,. \label{evenfcn} \end{equation} As also demonstrated in \cite{FRI1}, for any multi-index $\alpha$ we have a bound \begin{equation} \left| \partial_{x}^{\alpha}K(x,t)\right| \leq\frac{C_{\alpha}}{t^{\left( n+\left| \alpha\right| \right) /\ell}}\exp\left[ -C_{\alpha}\left( \frac{\left| x\right| ^{\ell}}{t}\right) ^{1/(\ell-1)}\right] \,, \label{fundest} \end{equation} where $C_{\alpha}$ is a constant depending on $\alpha$, the coefficients of $\mathcal{L}$, and the modulus of parabolicity $\delta$. From this estimate and the differential equation $K_{t}=Q\left( \partial_{x}\right) K$ it follows that, for all derivatives $\partial_{x}^{\alpha}\partial_{t}^{j}K$, \[ \lim_{t\rightarrow0^{+}}\partial_{x}^{\alpha}\partial_{t}^{j} K(x,t)=0\quad \text{(}x\neq0\text{)\ \ \ }. \] Therefore, if we extend the definition of $K$ to all of $\mathbb{R}^{n} \times\mathbb{R}$ by setting \[ K(x,t)\equiv0\,,\;\text{for }x\in\mathbb{R}^{n}\text{ and }t\leq0\,, \] then $K$ is of class $C^{\infty}$ in the region $\left( \mathbb{R}^{n} \times\mathbb{R}\right) -\left\{ (0,0)\right\} $, and $\mathcal{L}K=0$ there. As we have seen, since $\mathcal{L}$ has constant coefficients and no zero order derivative, the solutions \begin{equation} p_{\beta}(x,t):=\beta!\sum_{\overline{\alpha}\cdot\sigma\leq\beta} \frac{a^{\sigma}x^{\beta-\overline{\alpha}\cdot\sigma}t^{\left| \sigma\right| }}{\sigma!\left( \beta-\overline{\alpha}\cdot\sigma\right) !}\, \label{parpolys} \end{equation} are polynomials in $x$ and $t$. By the theory in \cite{FRI1} (see in particular Theorems 3 and 6 of chapter 9), each solution $p_{\beta}$, of polynomial growth with respect to $x$, can be represented in any region of the form $\mathbb{R}^{n}\times(s,\infty)$, $s\in\mathbb{R}$, by the integral \begin{equation} p_{\beta}(x,t)=\int_{\mathbb{R}^{n}}K(x-y,t-s)\;p_{\beta}(y,s)\;dy\,. \label{polyrep1} \end{equation} (In fact, it is shown in \cite{FRI1} that this representation is valid when $p_{\beta}$ is replaced by more general solutions $u(x,t)$, of certain restricted exponential growth in the $x$ variable.) Taking in particular $s=0 $, and recalling that $p_{\beta}(x,0)=x^{\beta}$, we have for $x\in \mathbb{R}^{n}$ and $t>0$ the formula \begin{equation} p_{\beta}(x,t)=\int_{\mathbb{R}^{n}}K(x-y,t)\;y^{\beta}\;dy\,. \label{polyrep2} \end{equation} When $\beta$ is the zero multi-index we have $p_{0}\equiv1$ and thus \begin{equation} x\in\mathbb{R}^{n},t>0\Longrightarrow\int_{\mathbb{R}^{n}} K(x-y,t)\;dy=1\quad \text{.} \label{fundint} \end{equation} Following the example of Rosenbloom and Widder \cite{RW1}, as well as of other authors, we introduce \textit{functions} $\left\{ q_{\beta}\right\} $ \textit{associated with the polynomials} $\{ p_\beta \}$, defined in $\mathbb{R}^{n}\times\mathbb{R}$ according to \begin{equation} q_{\beta}(x,t):=\left( -1\right) ^{\left| \beta\right| }\partial _{x}^{\beta}K(x,t)\,, \label{assocfcns} \end{equation} or more precisely, \begin{align} q_{\beta}(x,t) & =\left( -1\right) ^{\left| \beta\right| }\left( 2\pi\right) ^{-n}\int_{\mathbb{R}^{n}}e^{ix\cdot z}\left( iz\right) ^{\beta}e^{tQ(iz)}\;dz\quad \text{, if }t>0\,,\label{assocform}\\ q_{\beta}(x,t) & =0\quad \text{, if }t\leq0\,.\nonumber \end{align} As a space derivative of the fundamental solution $K$, each $q_{\beta}$ is of class $C^{\infty}$ in $\left( \mathbb{R}^{n}\times\mathbb{R}\right) -\left\{ (0,0)\right\} $ and solves there \[ \mathcal{L}q_{\beta}(x,t)=0\,. \] Also, (\ref{assocfcns}) implies immediately that \begin{equation} \partial_{x}^{\alpha}q_{\beta}=\left( -1\right) ^{\left| \alpha\right| }q_{\alpha+\beta}\,. \label{qderiv} \end{equation} \begin{remark} Rosenberg and Widder \cite{RW1} defined associate functions -- which they call $\left\{ w_{\beta}\right\} $ -- slightly differently, setting $w_{\beta }=\left( -2\right) ^{\left| \beta\right| }\partial_{x}^{\beta}K$, so that the associate functions turn out to be the Appell transforms of the polynomials $\{ p_\beta \}$. Haimo and Markett \cite{HM1} followed this convention as well. However, for higher order operators there is no apparent analog of the Appell transform, and we have found that omitting this now meaningless extra factor of $2$ much simplifies later expansion formulas. We use the notation $q_{\beta}$ instead of $w_{\beta}$ to point out this subtle distinction between our associate functions and those of \cite{RW1} and \cite{HM1}. \end{remark} The next several results are analogous to results of \cite{RW1}; they will be useful in developing a theory of expansion of general solutions of $\mathcal{L}u=0$ in terms of the polynomials $\{ p_\beta \}$. \begin{proposition} For the parabolic operator $\mathcal{L}$ of (\ref{homogop}), and for $x,y\in\mathbb{R}^{n}$ and $t\in\mathbb{R}$, \begin{equation} K(x-y,t)=\sum_{\beta}q_{\beta}(x,t)\frac{y^{\beta}}{\beta!}\,. \label{fundexp} \end{equation} \end{proposition} \begin{proof} If $t\leq0$ then both sides vanish; thus we need only consider $t>0$, in which case (\ref{fundsol}) gives \begin{align*} K(x-y,t) & =\left( 2\pi\right) ^{-n}\int_{\mathbb{R}^{n}}e^{ix\cdot z}e^{-iy\cdot z}e^{tQ(iz)}\;dz\\ & =\left( 2\pi\right) ^{-n}\int_{\mathbb{R}^{n}}e^{ix\cdot z}\sum_{\beta }\frac{\left( -y\right) ^{\beta}\left( iz\right) ^{\beta}}{\beta !}e^{tQ(iz)}\;dz\,. \end{align*} Condition (\ref{parcond}), along with \[ \sum_{\beta}\left| \frac{y^{\beta}z^{\beta}}{\beta!}\right| \leq\sum_{\beta }\frac{\left| y\right| ^{\left| \beta\right| }\left| z\right| ^{\left| \beta\right| }}{\beta!}=e^{n\left| y\right| \left| z\right| }\,, \] confirms that we may interchange integration with summation to obtain \[ K(x-y,t)=\sum_{\beta}\frac{y^{\beta}}{\beta!}\left( -1\right) ^{\left| \beta\right| }\left( 2\pi\right) ^{-n}\int_{\mathbb{R}^{n}}e^{ix\cdot z}\left( iz\right) ^{\beta}e^{tQ(iz)}\;dz\,, \] and thus (\ref{fundexp}) by use of (\ref{assocform}). \end{proof} \begin{theorem} For the parabolic operator $\mathcal{L}$ of (\ref{homogop}), with polynomial solutions $\{ p_\beta \}$ and associated functions $\left\{ q_{\beta}\right\} $, we have for $t>0$ the biorthogonality relations \begin{equation} \int_{\mathbb{R}^{n}}p_{\beta}(x,-t)q_{\alpha}(x,t)\;dx=\beta!\;\delta _{\alpha,\beta}=\left\{ \begin{array} [c]{cc} 0\quad \text{,} & \text{if }\alpha\neq\beta,\\ \alpha!\quad \text{,} & \text{if }\alpha=\beta. \end{array} \right. \label{orthog} \end{equation} \end{theorem} \begin{proof} Integration by parts, as justified by the bound (\ref{fundest}), gives \begin{align*} \int_{\mathbb{R}^{n}}p_{\beta}(x,-t)q_{\alpha}(x,t)\;dx & =\left( -1\right) ^{\left| \alpha\right| }\int_{\mathbb{R}^{n}}p_{\beta }(x,-t)\partial_{x}^{\alpha}K(x,t)\;dx\\ & =\int_{\mathbb{R}^{n}}\partial_{x}^{\alpha}p_{\beta}(x,-t)K(x,t)\;dx\,. \end{align*} If $\alpha_{i}>\beta_{i}$ for some $i$, then (\ref{difformula}) shows that $\partial_{x}^{\alpha}p_{\beta}\equiv0$ and hence the integral vanishes. If $\alpha=\beta$ then (\ref{difformula}) gives $\partial_{x}^{\alpha}p_{\beta }=\alpha!$, and then use of (\ref{evenfcn}) and (\ref{fundint}) leads to \[ \int_{\mathbb{R}^{n}}p_{\alpha}(x,-t)q_{\alpha}(x,t)\;dx=\alpha!\int _{\mathbb{R}^{n}}K(0-x,t)\;dx=\alpha!\,. \] The only case left to consider is $\alpha\leq\beta$ with $\alpha_{i}<\beta _{i}$ for some $i$. Then (\ref{difformula}) and the above integration by parts formula give \[ \int_{\mathbb{R}^{n}}p_{\beta}(x,-t)q_{\alpha}(x,t)\;dx=\frac{\beta!}{\left( \beta-\alpha\right) !}\int_{\mathbb{R}^{n}}p_{\beta-\alpha} (x,-t)K(x,t)\;dx\,. \] We apply (\ref{polyrep1}) to the polynomial $p_{\beta-\alpha}$, setting $x=0$, $t=0$, $s=-t$, and use the fact that $K$ is an even function in $x$ to find that \[ p_{\beta-\alpha}(0,0)=\int_{\mathbb{R}^{n}}K(-y,t)\;p_{\beta-\alpha }(y,-t)\;dy=\int_{\mathbb{R}^{n}}p_{\beta-\alpha}(x,-t)K(x,t)\;dx\,. \] But (\ref{parpolys}) shows that $p_{\beta-\alpha}(0,0)=0$ if $\beta>\alpha$. \end{proof} \begin{theorem} The functions $\left\{ q_{\beta}\right\} $ solve the differential equations \begin{equation} \left( \beta_{k}+1\right) q_{\beta}(x,t)+x_{k}\frac{\partial}{\partial x_{k}}q_{\beta}(x,t)+t\sum_{\left| \alpha\right| =\ell}\alpha_{k}a_{\alpha }\partial_{x}^{\alpha}q_{\beta}(x,t)=0\,, \label{qeqn1} \end{equation} as well as the algebraic equations \begin{equation} \left( \beta_{k}+1\right) q_{\beta}(x,t)-x_{k}q_{\beta+e_{k}}(x,t)+\left( -1\right) ^{\ell}t\sum_{\left| \alpha\right| =\ell}\alpha_{k}a_{\alpha }q_{\beta+\alpha}(x,t)=0\,. \label{qeqn2} \end{equation} \end{theorem} \begin{proof} Since $q_{\beta}(x,t)\equiv0$ if $t\leq0$, we need only verify (\ref{qeqn1}) for $t>0$. It is sufficient to show that the functions \[ \psi_{\beta}(x,t):=\left( -1\right) ^{\left| \beta\right| }\left( 2\pi\right) ^{n}q_{\beta}(x,t)=\int_{\mathbb{R}^{n}}e^{ix\cdot z}\left( iz\right) ^{\beta}e^{tQ(iz)}\;dz \] solve the same equation. Condition (\ref{parcond}) ensures that we may differentiate under the integral and then integrate by parts to obtain \begin{align*} & \left( \beta_{k}+1\right) \psi_{\beta}(x,t)+x_{k}\frac{\partial}{\partial x_{k}}\psi_{\beta}(x,t)\\ & =\left( \beta_{k}+1\right) \psi_{\beta}(x,t)-i\int_{\mathbb{R}^{n} }\left( \frac{\partial}{\partial z_{k}}e^{ix\cdot z}\right) \left( iz\right) ^{\beta+e_{k}}e^{tQ(iz)}\;dz\\ & =i\int_{\mathbb{R}^{n}}e^{ix\cdot z}\left( iz\right) ^{\beta+e_{k} }e^{tQ(iz)}t\frac{\partial}{\partial z_{k}}Q(iz)\;dz\\ & =i\int_{\mathbb{R}^{n}}e^{ix\cdot z}\left( iz\right) ^{\beta+e_{k} }e^{tQ(iz)}t\sum_{\left| \alpha\right| =\ell}i\alpha_{k}a_{\alpha}\left( iz\right) ^{\alpha-e_{k}}\;dz\\ & =-t\sum_{\left| \alpha\right| =\ell}\alpha_{k}a_{\alpha}\partial _{x}^{\alpha}\psi_{\beta}(x,t)\,. \end{align*} Equation (\ref{qeqn2}) follows immediately from (\ref{qeqn1}) and (\ref{qderiv}). \end{proof} \ \ Formulas (\ref{qeqn1}) and (\ref{qeqn2}) are analogous to (4.11) and (4.22), respectively, of \cite{HM1}. \section{Analogs of Hermite Polynomials} \qquad We let $\mathcal{L}$ remain the parabolic operator of the previous section, \begin{equation} \mathcal{L}u(x,t)=u_{t}(x,t)-\sum_{\left| \alpha\right| =\ell}a_{\alpha }\partial_{x}^{\alpha}u(x,t)\,, \label{homogop2} \end{equation} with constant coefficients and only highest order terms. We take advantage of the homogeneity of the equation to make some further observations. First we note that solutions of $\mathcal{L}u=0$ are preserved under a change of variables $\left( x,t\right) \longrightarrow\left( \lambda x,\lambda^{\ell }t\right) $, -- that is, if $\mathcal{L}u=0$ and $v(x,t)=u\left( \lambda x,\lambda^{\ell}t\right) $ with $\lambda\in\mathbb{R}$, then also $\mathcal{L}v=0$. Moreover, (\ref{homogq}) gives for $Q$ the identity \[ Q(\lambda z)=\lambda^{\ell}Q(z)\,, \] and then formula (\ref{fundsol}) for the fundamental solution yields \begin{equation} K(\lambda x,\lambda^{\ell}t)=\lambda^{-n}K(x,t)\,. \label{fundhomog} \end{equation} From (\ref{parpolys}) we find that \[ p_{\beta}(\lambda x,\lambda^{\ell}t)=\beta!\sum_{\overline{\alpha}\cdot \sigma\leq\beta}\frac{a^{\sigma}\lambda^{\left| \beta\right| -\left| \overline{\alpha}\cdot\sigma\right| }x^{\beta-\overline{\alpha}\cdot\sigma }\lambda^{\ell\left| \sigma\right| }t^{\left| \sigma\right| }} {\sigma!\left( \beta-\overline{\alpha}\cdot\sigma\right) !}\,. \] But since $\left| \alpha\right| =\ell$ for each $\alpha$ in (\ref{homogop2} ), in the summation we have \[ \left| \overline{\alpha}\cdot\sigma\right| =\left| \sum_{\left| \alpha\right| =\ell}\alpha\sigma_{\alpha}\right| =\sum_{\left| \alpha\right| =\ell}\left| \alpha\right| \sigma_{\alpha}=\ell\sum_{\left| \alpha\right| =\ell}\sigma_{\alpha}=\ell\left| \sigma\right| \,, \] and thus \begin{equation} p_{\beta}(\lambda x,\lambda^{\ell}t)=\lambda^{\left| \beta\right| }p_{\beta }(x,t)\,. \label{pformula} \end{equation} Finally, from (\ref{assocfcns})\ and (\ref{fundhomog}) we derive \begin{equation} q_{\beta}\left( \lambda x,\lambda^{\ell}t\right) =\lambda^{-n-\left| \beta\right| }q_{\beta}(x,t)\,. \label{qformula} \end{equation} For multi-indices $\beta$ in $\mathbb{R}^{n}$ we introduce functions \begin{equation} h_{\beta}(x):=p_{\beta}(x,-1)\;\,,\quad \;k_{\beta}(x):=q_{\beta }(x,1)\,. \label{homogdefs} \end{equation} Obviously each $h_{\beta}$ is a polynomial in $x$ of degree $\left| \beta\right| $. For $t<0$ we take $\lambda=(-t)^{1/\ell}$ in (\ref{pformula}) to obtain \[ p_{\beta}(x,t)=p_{\beta}\left( \lambda(x/\lambda),\lambda^{\ell} \cdot(-1)\right) =\lambda^{\left| \beta\right| }p_{\beta}\left( x/\lambda,-1\right) \,, \] and thus \begin{equation} p_{\beta}(x,t)=\left( -t\right) ^{\left| \beta\right| /\ell}h_{\beta }\left( \frac{x}{\left( -t\right) ^{1/\ell}}\right) \quad \text{(} t<0\text{)\ \ .} \label{reversep} \end{equation} In a similar manner for $t>0$ we take $\lambda=t^{1/\ell}$ in (\ref{qformula}) and derive \begin{equation} q_{\beta}(x,t)=t^{-\left( n+\left| \beta\right| \right) /\ell}k_{\beta }\left( \frac{x}{t^{1/\ell}}\right) \quad \text{(}t>0\text{)\ \ .} \label{reverseq} \end{equation} The orthogonality relation (\ref{orthog}) for $p_{\beta}$ and $q_{\alpha}$ specializes when $t=1$ to \begin{equation} \int_{\mathbb{R}^{n}}h_{\beta}(x)k_{\alpha}(x)\;dx=\beta!\;\delta _{\alpha,\beta}\,. \label{orthog2} \end{equation} \begin{remark} For the special case of the one-dimensional heat equation, when $\ell=2$ and each $\beta$ is a nonnegative integer, it turns out (see \cite{RW1,WID3}) that the classical Hermite polynomials $H_{\beta}$ are related to the one-dimensional heat polynomials by the equation \begin{equation} p_{\beta}(x,t)=\left( -t\right) ^{\beta/2}H_{\beta}\left( \frac{x}{\left( -4t\right) ^{1/2}}\right) \,. \label{hermites} \end{equation} Comparing this equation with (\ref{reversep}), we see that in the case of the one-dimensional heat equation our polynomials $\left\{ h_{\beta}\right\} $ are related to the Hermite polynomials according to $H_{\beta}(x)=h_{\beta }(2x)$. We choose to work with the functions $\left\{ h_{\beta}\right\} $ because the extra factor $2$, of no particular use for higher order equations, adds unnecessary complications to subsequent calculations. In the case of the higher-dimensional heat equation of order $q=2\ell$, Haimo and Markett \cite{HM1} chose to use (\ref{hermites}) to define higher order Hermite polynomials based on the relation \[ p_{\beta}(x,t)=\left( -t\right) ^{\beta/\ell}H_{\beta}\left( \frac {x}{\left( -4t\right) ^{1/\ell}}\right) \,. \] The relation between our polynomials and those of Haimo-Markett is \[ H_{\beta}(x)=h_{\beta}\left( 4^{1/\ell}x\right) =p_{\beta}\left( 4^{1/\ell }x,-1\right) \,. \] It is perhaps this formula that would be the most direct generalization of the Hermite polynomials to higher order parabolic equations in arbitrary space dimensions. \end{remark} We see from (\ref{genconst}) and (\ref{genseries}) that a generating function for the polynomials $\left\{ h_{\beta}\right\} $ is \[ G(x,-1,z)=e^{x\cdot z}e^{-Q(z)}=\sum_{\beta}p_{\beta}(x,-1)\frac{z^{\beta} }{\beta!}=\sum_{\beta}h_{\beta}(x)\frac{z^{\beta}}{\beta!}\,. \] From (\ref{genpde}) with $b_{\alpha}=ta_{\alpha}$, and from (\ref{recursion}) and (\ref{homogdefs}), it can be checked that the functions $\left\{ h_{\beta}\right\} $ solve the differential equation \begin{equation} \beta_{i}h_{\beta}(x)=x_{i}\frac{\partial}{\partial x_{i}}h_{\beta} (x)-\sum_{\left| \alpha\right| =\ell}\alpha_{i}a_{\alpha}\partial _{x}^{\alpha}h_{\beta}(x)\,, \label{last1} \end{equation} as well as the recursion formula \begin{equation} \beta_{i}h_{\beta}(x)=\beta_{i}x_{i}h_{\beta-e_{i}}(x)-\beta!\sum_{\left| \alpha\right| =\ell,\alpha\leq\beta}\frac{\alpha_{i}a_{\alpha}}{\left( \beta-\alpha\right) !}h_{\beta-\alpha}(x)\,. \label{last2} \end{equation} Likewise, from (\ref{qeqn1}), (\ref{qeqn2}), and (\ref{homogdefs}) we derive \begin{equation} \left( \beta_{i}+1\right) k_{\beta}(x)+x_{i}\frac{\partial}{\partial x_{i} }k_{\beta}(x)+\sum_{\left| \alpha\right| =\ell}\alpha_{i}a_{\alpha} \partial_{x}^{\alpha}k_{\beta}(x)=0\,, \label{last3} \end{equation} \begin{equation} \left( \beta_{i}+1\right) k_{\beta}(x)-x_{i}k_{\beta+e_{i}}(x)+\left( -1\right) ^{\ell}\sum_{\left| \alpha\right| =\ell}\alpha_{i}a_{\alpha }k_{\beta+\alpha}(x)=0\,. \label{last4} \end{equation} \begin{thebibliography}{99} \bibitem{BRA1}L. R. Bragg, \emph{The radial heat polynomials and related functions}, Trans. Amer. Math. Soc. 119 (1965), 270-290. \bibitem{CH1}F. M. Cholewinski and D. T. Haimo, \emph{Expansions in terms of Laguerre heat polynomials and of their temperature transforms}, J. Math. Anal. 24 (1971), 285-322. \bibitem{FIT1}A. Fitouhi, \emph{Heat polynomials for a singular operator on (0,}$\infty\emph{)}$, Constr. Approx. 5 (1989), 241-270. \bibitem{FRI1}A. Friedman, \emph{Partial Differential Equations of Parabolic Type}, Prentice-Hall, Englewood Cliffs, N.J., 1964. \bibitem{HAI0}D. T. Haimo, $\emph{L}^{2}$\emph{\ expansions in terms of generalized heat polynomials and of their Appell transforms}, Pacific J. Math. 15 (1965), 865-875. \bibitem{HAI1}D. T. Haimo, \emph{Expansions in terms of generalized heat polynomials and of their Appell transforms}, J. Math. Mechan. 15 (1966), 735-758. \bibitem{HAI2}D. T. Haimo, \emph{Generalized temperature functions}, Duke Math. J. 33 (1966), 305-322. \bibitem{HAI3}D. T. Haimo, \emph{Series expansions of generalized temperature functions in N dimensions}, Canad. J. Math. 18 (1966), 794-802. \bibitem{HAI4}D. T. Haimo, \emph{Series representation of generalized temperature functions}, SIAM J. Appl. Math. 15 (1967), 359-367. \bibitem{HAI5}D. T. Haimo, \emph{Series expansions and integral representations of generalized temperatures}, Illinois J. Math. 14 (1970), 621-629. \bibitem{HAI6}D. T. Haimo, \emph{Homogeneous generalized temperatures}, SIAM J. Math. Anal. 11 (1980), 473-487. \bibitem{HM1}D. T. Haimo and C. Markett, \emph{A representation theory for solutions of a higher order heat equation, I}, J. Math. Anal. Appl. 168 (1992), 89-107. \bibitem{HM2}D. T. Haimo and C. Markett, \emph{A representation theory for solutions of a higher order heat equation, II}, J. Math. Anal. Appl. 168 (1992), 289-305. \bibitem{HIM1}G. N. Hile and C. P. Mawata, \emph{The behaviour of the heat operator on weighted Sobolev spaces}, Trans. Amer. Math. Soc. 350 (1998), 1407-1428. \bibitem{HS}G. N. Hile and A. Stanoyevitch, \emph{Expansions of solutions of higher order evolution equations in series of generalized heat polynomials}, in preparation. \bibitem{KEM1}H. Kemnitz, \emph{Polynomial expansions for solutions of} $D_{x}^{r}u(x,t)=D_{t}u(x,t)$, $t=2,3,4,\ldots$, SIAM J. Math. Anal. 13 (1982), 640-650. \bibitem{LO1}C. Y. Lo, \emph{Polynomial expansions of solutions of } $u_{xx}+\varepsilon^{2}u_{tt}=u_{t}$, Z. Reine Angew. Math. 253 (1972), 88-103. \bibitem{RW1}P. C. Rosenbloom and D. V. Widder, \emph{Expansions in terms of heat polynomials and associated functions}, Trans. Amer. Math. Soc. 92 (1959), 220-266. \bibitem{WID1}D. V. Widder, \emph{Series expansions of solutions of the heat equation in n dimensions}, Ann. Mat. Pura Appl. 55 (1961), 389-410. \bibitem{WID2}D. V. Widder, \emph{Expansions in series of homogeneous temperature functions of the first and second kinds}, Duke Math. J. 36 (1969), 495-510. \bibitem{WID3}D. V. Widder, \emph{The Heat Equation}, Academic Press, New York, San Francisco, London, 1975. \end{thebibliography} \end{document}