# Winter Storm 2015 Modeling Workshop # Word Learning Model # ==explosion.R== # defines a parallel word learner with optional cost # or benefit to future learning from learned words # explosion(TOTALWORDS = total words to be learned # MEAN_T2L = mean number of time steps to learn a word # STD_DV2L = standard deviation in word difficulty) # ================================================================= # McMurray, B. (2007). Defusing the childhood vocabulary explosion. Science, 317(5838), 631-631. # # code author: Rachael Richardson # rachaelr@umd.edu explosion <- function( TOTALWORDS = 10000, MEAN_T2L = 4000, STD_DV2L = 800 ) { # define x-axis with integer sequence x <- seq(0,TOTALWORDS-1) # standard normal distribution: most words are learned at MEAN_T2L # standard normal distribution: most words are learned at MEAN_T2L vocabulary_dist <- rnorm( x, MEAN_T2L, STD_DV2L ) vocabulary <- sample( vocabulary_dist, TOTALWORDS ) # plot gaussian distribution of sample words' required time steps for acquisition plot( seq( 0, TOTALWORDS-1 ), vocabulary, type='p', xlab='word index', ylab='learning threshold (time steps)', main='samples from a vocabulary of normally distributed difficulty' ) # run the learner on the vocabulary wordsknown <- learner(vocabulary) # plot the results with a specified x-axis limit for neatness cutoff <- 6000 plot( seq(0,length(wordsknown)-1), wordsknown, type='l', xlim = c(0,cutoff), xlab='time steps to acquisition', ylim = c(0,cutoff), ylab='total # of words known', main='cumulative vocabulary' ) # associate word learning with decreasing word learning costs withbonus <- learner( vocabulary, offset = -0.05 ) # add results to the plot in blue lines( seq(0,length(withbonus)-1), withbonus, col="blue" ) # associate word learning with increasing word learning costs withpenalty <- learner( vocabulary, offset = 0.05 ) # add results to the plot in red lines( seq(0,length(withpenalty)-1), withpenalty, col="red" ) }