@article{oai:kanazawa-u.repo.nii.ac.jp:00001860, author = {積山, 薫}, journal = {金沢大学文学部論集. 行動科学科篇}, month = {Feb}, note = {In English speaking cultures, it has been reported that when auditory speech is presented in synchrony with discrepant visual (lip-read) speech, the subjects often report hearing sounds that integrate information from the two modalities (the "McGurk effect"). A cross-language study on the McGurk effect was carried out to examine differences between Japanese and English suggested by Sekiyama and Tohkura (1991). The stimulus materials were ten syllables (/ba, pa, ma, wa, da, ta, na, ga, ka, ra/) pronounced by a Japanese and an American speaker. The ten auditory and ten visual syllables pronounced by a speaker were cross-dubbed resulting in 100 auditory-visual stimuli. Japanese syllables were presented to 14 Japanese and 10 American subjects. English syllables were presented to different groups of subjects, 12 Japanese and 10 American. The stimuli were presented in both quiet and noise-added conditions. The subjects were asked to check incompatibility between what they heard and what they saw as well as to report what they heard. The results showed that Japanese subjects are more sensitive to auditory-visual discrepancy and less prone to the McGurk effect than Americans. The size of the McGurk effect correlated highly negatively with the frequency of incompatibility. These results demonstrate that Japanese listeners tend to separate two modalities of information unless visual support is necessary while Americans easily integrate them., 金沢大学文学部}, pages = {29--62}, title = {McGurk Effect and Incompatibility : A Cross-Language study on Auditory-Visual Speech Perception}, volume = {14}, year = {1994} }