0

我正在寻求优化模型的拟合,该模型描述了在已知直径和物种的映射树图中描述在 0.5m^2“垃圾陷阱”网络中收集的垃圾数量。选择的模型有两个因素,即垫料生产的异速生长比例和垫料移动距离的指数衰减。

tree1.litter = alpha*gamma^2 * DBH^Beta/(2*pi)  *  exp(-gamma*z-delta*DBH)

但是,我们的陷阱数据包含来自多棵树的输入(这是标题中提到的“缺失级别”):

Obs.Litter = tree1.litter + tree2.litter + ... + treej.litter + error

到目前为止,即使是模拟数据,结果也很复杂。似乎有足够的直径和距离组合,功能应该受到一定的约束。这个分析是在我抄袭的一篇文章中进行的。我还尝试了对日志(Obs.Litter)的分析,我认为这是要走的路。但是我不确定我编写日志版本的方式是否会导致您期望执行的更好。

在这一点上,我想我只是在寻找对这种类型的“隐藏过程”拟合非线性回归或模型拟合问题更有经验的人的任何类型的建议(基于代码或概念)。下面包括数据模拟和各种可能性的代码。我在使用OpenBUGS中的贝叶斯层次模型估计这些参数方面取得了更大的成功,只有信息先验。

library(plyr)
########################
##Generate Data#########
########################

alpha = 5
Beta = 2
gamma = .2
delta = .02

n = 600 #Number of trees
N.trap = 45 #Number of litter traps

D = rlnorm(n, 2)+5  #generate diameters
Z = runif(n, 0, 25) #generate distances
trap.id = sort(sample(1:N.trap, size = n, replace = T)) #assign trees to traps
tree.lit = (2*pi)^-1*alpha*gamma^2*D^Beta  *  exp(-gamma*Z-delta*D) #generate litter
log.tree.lit = -(2*pi) + log(alpha) + 2*log(gamma) + Beta*DBH -gamma*Z - delta*D

litter = data.frame(D=D, Z = Z, trap.id = trap.id, tree.lit = tree.lit)
data = ddply(litter, .(trap.id), summarize, trap.lit = sum(tree.lit), n.trees=length(trap.id) )

trap.lit = data[,2]

#####################################
##### Maximum Likelihood Optimization
#####################################
library(bbmle)

log.Litter.Func<-function(alpha, Beta, gamma, delta, sigma, D, Z, N.trap, trap.id,     Obs.Litter){

    log.Expected.Litter.tree = -log(2*pi) + log(alpha) + 2*log(gamma) + Beta*log(D) -gamma*Z - delta*D
    log.Expected.Litter.trap = rep(0, N.trap)
    for(i in 1:N.trap){
        log.Expected.Litter.trap[i] <-     sum(exp(log.Expected.Litter.tree[trap.id==i]))
        }
    -sum(dlnorm(log(Obs.Litter), log.Expected.Litter.trap, sd=sigma, log=T))
    }


Litter.Func<-function(alpha, Beta, gamma, delta, sigma, D, Z, N.trap, trap.id,     Obs.Litter){

    Expected.Litter.tree = 1/(2*pi) * alpha * gamma^2 * D^Beta *exp(-gamma*Z - delta*D)
    Expected.Litter.trap = rep(0, N.trap)
        for(i in 1:N.trap){
            Expected.Litter.trap[i] <- sum(Expected.Litter.tree[trap.id==i])
            }
    -sum(dnorm(Obs.Litter, Expected.Litter.trap, sd=sigma, log=T))
    }


log.fit<-mle2(log.Litter.Func, 
start = list(alpha = 5,gamma = .2,Beta = 2,delta = .02, sigma = 1),
#upper = list(alpha = 20,gamma = 1,Beta = 4,delta = .2,sigma = 20),
#lower = list(alpha = .002,gamma = .002,Beta = .0002,delta = .000000002,sigma = .020),
#method="L-BFGS-B",
data=list(D=D, Z=Z, N.trap=N.trap, trap.id=litter$trap.id, Obs.Litter=trap.lit)
)

fit<-mle2(Litter.Func, 
start = list(alpha = 5,gamma = .2,Beta = 2,delta = .02, sigma = 1),
#upper = list(alpha = 20,gamma = 1,Beta = 4,delta = .2,sigma = 20),
#lower = list(alpha = .002,gamma = .002,Beta = .0002,delta = .000000002,sigma = .020),
#method="L-BFGS-B",
data=list(D = D,Z = Z,N.trap=N.trap,  trap.id=litter$trap.id,Obs.Litter = trap.lit)
)
4

0 回答 0