Research Group of Prof. Dr. J. Garcke
Institute for Numerical Simulation
maximize


@inproceedings{Boerm.Garcke:2007,
  author = {S. B\"orm and J. Garcke},
  title = {Approximating Gaussian Processes with ${H^2}$-matrices},
  booktitle = {Proceedings of 18th European Conference on Machine
		  Learning, Warsaw, Poland, September 17-21, 2007. ECML
		  2007},
  year = {2007},
  editor = {Joost N. Kok and Jacek Koronacki and Ramon Lopez de
		  Mantaras and Stan Matwin and Dunja Mladen and Andrzej
		  Skowron},
  volume = {4701},
  pages = {42--53},
  abstract = {To compute the exact solution of Gaussian process
		  regression one needs $\Os(N^3)$ computations for direct and
		  $\Os(N^2)$ for iterative methods since it involves a
		  densely populated kernel matrix of size $N \times N$, here
		  $N$ denotes the number of data. This makes large scale
		  learning problems intractable by standard techniques. We
		  propose to use an alternative approach: the kernel matrix
		  is replaced by a data-sparse approximation, called an
		  ${\mathcal H}^2$-matrix. This matrix can be represented by
		  only ${\cal O}(N m)$ units of storage, where $m$ is a
		  parameter controlling the accuracy of the approximation,
		  while the computation of the ${\mathcal H}^2$-matrix scales
		  with ${\cal O}(N m \log N)$. Practical experiments
		  demonstrate that our scheme leads to significant reductions
		  in storage requirements and computing times for large data
		  sets in lower dimensional spaces.},
  annote = {proc_ref},
  doi = {10.1007/978-3-540-74958-5_8},
  file = {gpWithH2.pdf:http\://www.math.tu-berlin.de/~garcke/paper/gpWithH2.pdf:PDF},
  pdf = {http://garcke.ins.uni-bonn.de/research/pub/gpWithH2.pdf 1},
  seriestitle = { Lecture Notes in Artificial Intelligence}
}