Partial Solution Log for HW 23 ============================== Plotting: answers part (i) > library(MASS) > hist(geyser$wait, nclass=32, prob=T) pts = seq(40,110, n=101) > dens1 = c(outer(pts, geyser$wait, function(x,y) dlogis(x,y,.5)) %*% rep(1/299,299)) dens2 = c(outer(pts, geyser$wait, function(x,y) dlogis(x,y,3)) %*% rep(1/299,299)) > lines(pts,dens1, lty=2) lines(pts,dens2, lty=5) --------------------------------- Density estimates: (ii) start by fitting mixed normal to log-values, then do log transform at end > lLk = function(x, pwt, mn, scal, dens = dlogis) { invsc = 1/scal dnsmat = invsc*matrix(dens(invsc* outer(mn,x,"-")), ncol=length(x)) sum(log(pwt %*% dnsmat)) } > tmpft = nlm(function(w) -lLk(log(geyser$wait),c(plogis(w[1]), 1-plogis(w[1])), w[2:3], exp(w[4:5]), dens=dnorm), c(0, log(50), log(80),0,0), print.level=1, iterlim=100) iteration = 0 Step: [1] 0 0 0 0 0 Parameter: [1] 0.000000 3.912023 4.382027 0.000000 0.000000 Function Value [1] 290.7419 Gradient: [1] 3.99967 -49.32411 17.10582 122.55733 145.38466 iteration = 27 Parameter: [1] -0.7073342 4.0009645 4.3892868 -2.3041704 -2.4458906 Function Value [1] -118.1357 Gradient: [1] 7.815970e-10 -2.287041e-07 4.279813e-07 1.498690e-08 -5.888530e-08 Relative gradient close to zero. Current iterate is probably solution. ## This gives fitted mixed 2-component normal with prob weights > c(plogis(tmpft$est[1]), 1-plogis(tmpft$est[1])) 1] 0.3301881 0.6698119 ## means 4.00096 4.38929 and ## log stdev's -2.30417 -2.44589 ## (so std.dev's = 0.09984, 0.08665) ## Now 3-component fit: > tmpft2 = nlm(function(w) -lLk(log(geyser$wait), c(plogis(w[1]), (1-plogis(w[1]))*c(plogis(w[2]),1-plogis(w[2]))), w[3:5], exp(w[6:8]), dens=dnorm), c(-.7,0, log(c(54,73,80)),0,0,0), print.level=1, iterlim=100) iteration = 0 Step: [1] 0 0 0 0 0 0 0 0 Parameter: [1] -0.700000 0.000000 3.988984 4.290459 4.382027 0.000000 0.000000 [8] 0.000000 Function Value [1] 285.2272 Gradient: [1] 2.1309415 -0.3256010 -25.4599943 2.6885465 11.5075272 86.2527909 [7] 97.0297739 95.2081323 iteration = 47 Parameter: [1] -1.2579758 0.8699166 4.2238888 4.3998658 3.9655837 -1.7914966 -2.5778584 [8] -2.5495736 Function Value [1] -119.4960 Gradient: [1] -6.193250e-06 -2.109815e-05 -4.403482e-05 1.224016e-04 9.365365e-05 [6] 1.518470e-05 -4.008558e-06 -2.453067e-05 Last global step failed to locate a point lower than x. Either x is an approximate local minimum of the function, the function is too non-linear for this algorithm, or steptol is too large. ### This minimization is less stable! ## This gives fitted mixed 3-component normal with prob weights > c(plogis(tmpft2$est[1]), (1-plogis(tmpft2$est[1]))* c(plogis(tmpft2$est[2]), 1-plogis(tmpft2$est[2]))) [1] 0.2213225 0.5487561 0.2299214 ## means 4.22389 4.39987 3.96558 and ## log stdev's -1.7915 -2.57786 -2.54957 ## (so std.dev's = 0.1667 0.0759 0.07812) > lines(pts, .3301881*dlnorm(pts,4.00096,exp(-2.30417))+ .6698119*dlnorm(pts,4.38929,exp(-2.44589)), lty=7) lines(pts, .2213225*dlnorm(pts,4.22389,exp(-1.7915))+ .5487561*dlnorm(pts,4.39987,exp(-2.57786)) + .2299214*dlnorm(pts,3.96558,exp(-2.54957)), lty=9) > legend(locator(), legend=c("Ker,.5","Ker,3","MxLgNrm,2", "MxLgNrm,3"), lty=c(2,5,7,9)) ## Picture is already a little cluttered, but the two ## and three component lognormals are excellent !!