Research Group of Prof. Dr. J. Garcke
Institute for Numerical Simulation
maximize


@article{Garcke.Griebel.Thess:2000,
  author = {J. Garcke and M. Griebel and M. Thess},
  title = {Data mining with sparse grids},
  year = {2001},
  journal = {Computing},
  volume = {67},
  number = {3},
  doi = {doi:10.1007/s006070170007},
  pages = {225--253},
  optnote = {also as SFB 256 Preprint 675, Institut f\"ur Angewandte
		  Mathematik, Universit\"at Bonn, 2000},
  ps = {http://wissrech.ins.uni-bonn.de/research/pub/garcke/datamine15.ps.gz 1},
  pdf = {http://link.springer-ny.com/link/service/journals/00607/papers/1067003/10670225.pdf 1},
  http = {http://link.springer-ny.com/link/service/journals/00607/bibs/1067003/10670225.htm},
  abstract = {We present a new approach to the classification problem
		  arising in data mining. It is based on the regularization
		  network approach but, in contrast to the other methods
		  which employ ansatz functions associated to data points, we
		  use a grid in the usually high-dimensional feature space
		  for the minimization process. To cope with the curse of
		  dimensionality, we employ sparse grids. Thus, only
		  $O(h_n^{-1} n^{d-1})$ instead of $O(h_n^{-d})$ grid points
		  and unknowns are involved. Here $d$ denotes the dimension
		  of the feature space and $h_n = 2^{-n} $ gives the mesh
		  size. To be precise, we suggest to use the sparse grid
		  combination technique where the classification problem is
		  discretized and solved on a certain sequence of
		  conventional grids w ith uniform mesh sizes in each
		  coordinate direction. The sparse grid solution is then
		  obtained from the solutions on these different grids by
		  linear combination . In contrast to other sparse grid
		  techniques, the combination method is simpler to use and
		  can be parallelized in a natural an d straightforward way.
		  
		  We describe the sparse grid combination technique for the
		  classification problem in terms of the regularization
		  network approach. We then give implementational d etails
		  and discuss the complexity of the algorithm. It turns out
		  that the metho d scales linear with the number of
		  instances, i.e. the amount of data to be clas sified.
		  Finally we report on the quality of the classifier build by
		  our new method. Here we consider standard test problems
		  from the UCI repository and problems wit h huge synthetical
		  data sets in up to 9 dimensions. It turns out that our new
		  method achieves correctness rates which are competitive to
		  that of the best existing methods.},
  annote = {article, journal}
}