@misc{cogprints4990, volume = {123}, editor = {Luc Berthouze and Fr{\'e}d{\'e}ric Kaplan and Hideki Kozima and Hiroyuki Yano and J{\"u}rgen Konczak and Giorgio Metta and Jacqueline Nadel and Giulio Sandini and Georgi Stojanov and Christian Balkenius}, title = {Robot Gesture Generation from Environmental Sounds Using Inter-modality Mapping}, author = {Yuya Hattori and Hideki Kozima and Kazunori Komatani and Tetsuya Ogata and Hiroshi G. Okuno}, publisher = {Lund University Cognitive Studies}, year = {2005}, pages = {139--140}, keywords = {iconic gesture generation, inter-modal learning, auditory distance, Keepon robot}, url = {http://cogprints.org/4990/}, abstract = {We propose a motion generation model in which robots presume the sound source of an environmental sound and imitate its motion. Sharing environmental sounds between humans and robots enables them to share environmental information. It is difficult to transmit environmental sounds in human-robot communications. We approached this problem by focusing on the iconic gestures. Concretely, robots presume the motion of the sound source object and map it to the robot motion. This method enabled robots to imitate the motion of the sound source using their bodies.} }