求解极值的三种最优化方法总结

求解极值的三种最优化方法总结

解法:

1.最速下降解法见手动计算作业纸如下:

利用python绘制出从(0,1)点作为初始点的三次迭代图如下:

x1, x2 = sp.symbols('x1 x2')

x = np.array([[0], [1]])

f = x1 ** 2 + 2*x2 ** 2 - 4 * x1 + 4

X1 = np.arange(-1.5, 3, 0.05)

X2 = np.arange(-2, 2 + 0.05, 0.05)

[x1, x2] = np.meshgrid(X1, X2)

f = (x1 - 2) ** 2 + 2 * (x2 ** 2) # 给定的函数

plt.plot([0,4/3,14/9,52/27],[1,-1/3,-1/9,2/27], 'g*-') # 画出迭代点收敛的轨

plt.contour(x1, x2, f, 20) # 画出函数的20条轮廓线

2.利用拟牛顿法以初始点0,1开始迭代,python解法如下:

import numpy as np

import sympy as sp

x_old=None

def jacobian(f, x): # 雅可比矩阵,求一阶导数

a, b = np.shape(x) # 判断变量维度

x1, x2 = sp.symbols('x1 x2') # 定义变量,如果多元的定义多元的

x3 = [x1, x2] # 将1变量放入列表中,方便查找和循环。有几个变量放几个

df = np.array([[0.00000], [0.00000]]) # 定义一个空矩阵,将雅可比矩阵的值放入,保留多少位小数,小数点后面就有几个0。n元变量就加n个[]

for i in range(a): # 循环求值

df[i, 0] = sp.diff(f, x3[i]).subs({x1: x[0][0], x2: x[1][0]}) # 求导和求值,n元的在subs后面补充

return df

def hesse(f, x): # hesse矩阵

a, b = np.shape(x)

x1, x2 = sp.symbols('x1 x2')

x3 = [x1, x2]

G = np.zeros((a, a))

for i in range(a):

for j in range(a):

G[i, j] = sp.diff(f, x3[i], x3[j]).subs({x1: x[0][0], x2: x[1][0]}) # n元的在subs后面补充

return G

def dfp_newton(f, x, iters):

a = 1 # 定义初始步长

xk=1

H = np.eye(2) # 初始化正定矩阵

G = hesse(f, x) # 初始化Hesse矩阵

epsilon = 1e-3 # 一阶导g的第二范式的最小值(阈值)

for i in range(1, iters):

g = jacobian(f, x)

if np.linalg.norm(g) < epsilon:

xbest = []

for a in x:

xbest.append(round(a[0])) # 将结果从矩阵中输出放到列表中并四舍五入

break

# 下面的迭代公式

d = -np.dot(H, g)

a = -(np.dot(g.T, d) / np.dot(d.T, np.dot(G, d)))

# 更新x值

x_new = x + a * d

if xk==1:

xk=xk+1

x_old=x_new

print("第 %d 次结果" % i)

print(x_new)

g_new = jacobian(f, x_new)

y = g_new - g

s = x_new - x

H = H + np.dot(s, s.T) / np.dot(s.T, y) - np.dot(H, np.dot(y, np.dot(y.T, H))) / np.dot(y.T, np.dot(H, y))

G = hesse(f, x_new)

x = x_new

return xbest

x1, x2 = sp.symbols('x1 x2') # 例子

x = np.array([[0], [1]])

f = x1 ** 2 + 2*x2 ** 2 - 4 * x1 + 4

print(dfp_newton(f, x, 20))

3. 最后实现用共轭梯度法完成初始点为(0,1)的迭代,使用python实现:

import random

import numpy as np

import matplotlib.pyplot as plt

def goldsteinsearch(f,df,d,x,alpham,rho,t):

flag = 0

a = 0

b = alpham

fk = f(x)

gk = df(x)

phi0 = fk

dphi0 = np.dot(gk, d)

alpha=b*random.uniform(0,1)

while(flag==0):

newfk = f(x + alpha * d)

phi = newfk

if (phi - phi0 )<= (rho * alpha * dphi0):

if (phi - phi0) >= ((1 - rho) * alpha * dphi0):

flag = 1

else:

a = alpha

b = b

if (b < alpham):

alpha = (a + b) / 2

else:

alpha = t * alpha

else:

a = a

b = alpha

alpha = (a + b) / 2

return alpha

def Wolfesearch(f,df,d,x,alpham,rho,t):

sigma=0.75

flag = 0

a = 0

b = alpham

fk = f(x)

gk = df(x)

phi0 = fk

dphi0 = np.dot(gk, d)

alpha=b*random.uniform(0,1)

while(flag==0):

newfk = f(x + alpha * d)

phi = newfk

if (phi - phi0 )<= (rho * alpha * dphi0):

if (phi - phi0) >= ((1 - rho) * alpha * dphi0):

flag = 1

else:

a = alpha

b = b

if (b < alpham):

alpha = (a + b) / 2

else:

alpha = t * alpha

else:

a = a

b = alpha

alpha = (a + b) / 2

return alpha

def frcg(fun,gfun,x0):

maxk = 5000

rho = 0.6

sigma = 0.4

k = 0

epsilon = 1e-5

n = np.shape(x0)[0]

itern = 0

W = np.zeros((2, 20000))

f = open("共轭.txt", 'w')

while k < maxk:

W[:, k] = x0

gk = gfun(x0)

itern += 1

itern %= n

if itern == 1:

dk = -gk

else:

beta = 1.0 * np.dot(gk, gk) / np.dot(g0, g0)

dk = -gk + beta * d0

gd = np.dot(gk, dk)

if gd >= 0.0:

dk = -gk

if np.linalg.norm(gk) < epsilon:

break

alpha=goldsteinsearch(fun,gfun,dk,x0,1,0.1,2)

x0+=alpha*dk

f.write(str(k)+' '+str(np.linalg.norm(gk))+"\n")

print(k,alpha)

g0 = gk

d0 = dk

k += 1

W = W[:, 0:k+1] # 记录迭代点

return [x0, fun(x0), k,W]

def fun(x):

return (x[0] - 2) ** 2 + 2*x[1] ** 2

def gfun(x):

return np.array([2*x[0] -4, 4*x[1]])

if __name__=="__main__":

X1 = np.arange(-0.5, 3.5, 0.05)

X2 = np.arange(-1, 1.5, 0.05)

[x1, x2] = np.meshgrid(X1, X2)

f = (x1-2) ** 2 + 2* (x2 ** 2) # 给定的函数

plt.contour(x1, x2, f, 20) # 画出函数的20条轮廓线

x0 = np.array([0, 1.0])

x=frcg(fun,gfun,x0)

print(x[0],x[2])

W=x[3]

print(W[0, :], W[1, :])

plt.plot(W[0, :], W[1, :], 'g*-') # 画出迭代点收敛的轨迹

plt.show()

4.三种算法的特点(等值线及其迭代图已经在上面画出)

1.最速下降法 每次计算沿梯度方向的变化量,调整幅度经常会偏大,不能沿着函数想要的变化方向调整函数,容易产生锯齿现象,影响收敛速度。

2.拟牛顿法 为了减少计算量,使用正定矩阵来代替海森矩阵求解最值问题,这种方法对于二阶收敛,收敛速度比较快,但是当初始点选择不当时,就会出现不收敛或者收敛慢的现象。

3.共轭梯度下降方法 这种方法是工业上使用较多的一种求解方法,不需要进行大量的计算,也免去了计算海森矩阵求逆的过程,其下降方向也不会一直沿着梯度方向产生大幅度的锯齿效应,是较为稳定和常用的最优化方法之一。