我一直在尝试估计两参数 (a,b) 威布尔分布 (loc.param. = 0)。
为了找到估计值,我使用最大似然法和 NR 方法。
t = np.array(list(range(1, 1000))) #data input
def gradient(a,b):
grad_a = -10*b/a + b/a*np.sum((t/a)**b)
grad_b = 10/b - 10*(np.log(a)) + np.sum(np.log(t)) - np.sum(((t/a)**b)*np.log(t/a))
grad_matrix = np.array([grad_a, grad_b])
return grad_matrix
def hessian(a,b):
hess_a = 10*b/a**2 + (b*(b+1)/a**2)*np.sum((t/a)**b)
hess_b = 10/(b**2) + np.sum((t/a)**b* (np.log(t/a))**2)
hessians = np.array([hess_a, hess_b])
return hessians
iters = 0
a0, b0 = 2,6 #initial condition
while iters < 4:
if hessian(a0,b0).any() == 0.0:
print('Divide by zero error!')
else:
a = a0 - gradient(a0,b0)[0]/hessian(a0,b0)[0]
b = b0 - gradient(a0,b0)[1]/hessian(a0,b0)[1]
print('Iteration-%d, a = %0.6f, b= %0.6f, e1 = %0.6f, e2 = %0.6f' % (iters, a,b,a-a0,b-b0))
if gradient(a,b)[0]==0 and gradient(a,b)[1]==0:
break
else:
a0,b0 = a,b
iters = iters +1
print(a,b)
print(iters)
输出:
迭代0,a = 1.714391,b= 6.656262,e1 = -0.285609,e2 = 0.656262
迭代1,a = 1.490482,b= 7.249362,e1 = -0.223909,e2 = 0.593099
迭代2,a = 1.309805, b= 7.794966, e1 = -0.180677, e2 = 0.545604
迭代 3, a = 1.160878, b= 8.303167, e1 = -0.148927, e2 = 0.508201
a 变为零,b 变无穷大,这两者都不是正确的估计。感觉好像我在定义梯度和粗麻布时做错了,尽管这些似乎是对数似然函数的正确一阶和二阶导数。