@misc{cogprints2124,
          editor = {Pat Langley},
           title = {Solving Multiple-Instance Problem: A Lazy Learning Approach},
          author = {Jun Wang and Jean-Daniel Zucker},
       publisher = {Morgan Kaufmann},
            year = {2000},
           pages = {1119--1125},
        keywords = {multiple-instance problem,
multiple-instance learning,
lazing learning,
nearest neighbor
},
             url = {http://cogprints.org/2124/},
        abstract = {As opposed to traditional supervised learning, multiple-instance learning 
    concerns the problem of classifying a bag of instances, given bags that are 
    labeled by a teacher as being overall positive or negative. Current research 
    mainly concentrates on adapting traditional concept learning to solve this 
    problem. In this paper we investigate the use of lazy learning and Hausdorff 
    distance to approach the multiple-instance problem. We present two variants of 
    the K-nearest neighbor algorithm, called Bayesian-KNN and Citation-KNN, solving 
    the multiple-instance problem. Experiments on the Drug discovery benchmark data 
    show that both algorithms are competitive with the best ones conceived in the 
    concept learning framework. Further work includes exploring of a combination of 
    lazy and eager multiple-instance problem classifiers.}
}