@unpublished{cogprints1287, month = {November}, title = {Building large-scale hierarchical models of the world with binary sparse distributed representations}, author = {Dmitri A. Rachkovskij and Ernst M. Kussul}, year = {2000}, keywords = {analogy, analogical mapping,analogical retrieval, APNN, associative-projective neural networks, binary coding, binding, categories, chunking, compositional distributed representations, concepts, concept hierarchy, connectionist symbol processing, context-dependent thinning, distributed memory, distributed representations, Hebb, long-term memory, nested representations, neural assemblies, part-whole hierarchy, representation of structure, sparse coding, taxonomy hierarchy, thinning, working memory, world model}, url = {http://cogprints.org/1287/}, abstract = {Many researchers agree on the basic architecture of the "world model" where knowledge about the world required for organization of agent's intelligent behavior is represented. However, most proposals on possible implementation of such a model are far from being plausible both from computational and neurobiological points of view. Implementation ideas based on distributed connectionist representations offer a huge information capacity, flexibility of similarity representation, and possibility to use a distributed neural network memory. However, for a long time distributed representations suffered from the "superposition catastrophe". Local representations are vivid, pictorial and easily interpretable, allow for an easy manual construction of hierarchical structures and an economical computer simulation of toy tasks. The problems of local representations show up with scaling to the real world models, and it is unclear how to solve them under reasonable requirements imposed on memory size and speed. We discuss the architecture of Associative-Projective Neural Networks (APNNs) that is based on binary sparse distributed representations of fixed dimensionality for items of various complexity and generality, and provides a promise for scaling up to the full-sized model of the real world. An on-the-fly binding procedure proposed for APNNs overcomes the superposition catastrophe, permitting representation of the order and grouping of structure components. These representations allow a simple estimation of structures' similarity, as well as finding various kinds of associations based on their context-dependent similarity. Structured distributed auto-associative neural network is used as long-term memory, wherein representations of items organized into part-whole (compositional) and concept (generalization) hierarchies are built. Examples of schematic APNN architectures and processes for recognition, prediction, reaction, analogical reasoning, and other tasks required for functioning of an intelligent system, as well as APNN implementations, are considered. } }