가장 쉬운 입자 군집 최적화 (PSO) 예제 [python 3] by 바죠

가장 쉬운 입자 군집 최적화 (PSO) 예제 [python 3]
particle swarm optimization algorithm



\[\vec{x}\]
\[f(\vec{x})\]
우리는 여러 가지 잠정적인 해들을 동시에 고려한다. 
\[ \{ \vec{x}\}\]
\[\{f(\vec{x})\}\]


파이썬 리스트를 이용한다. 
동일한 형식으로 정의된 객체를 다수 만들 것이다. 이들은 동일한 형식이지만 여러개가 존재한다. 
파이썬 리스트 항목으로 저장할 수 있다. 
그런데 객체가 클래스로 정의 된 것이 될 수도 있다. 
파이썬 리스트는 매우 포괄적인 것을 하나의 항목으로 인정한다.
먼저 PARTICLE이라는 객체를 정의한다. 
[입자 하나에 대한 많은 정보를 가지고 있다. 위치, 속도, 목적함수 값, 역대 최적값, 역대 최적 위치] 
[입자마다 고유의 특성이 다를 수 있다. 운동 특성이 다를 수 있다. ]
이것들로 구성된 또 다른 종류의 객체를 정의한다. 
[입자들로 구성된 무리를 정의한다.]


PSO 계산의 효율성을 높이기 위해서 국소최적화를 사용하였다.

------------------------------------------------------------------------------------------------------------------------
import random
import numpy as np
from scipy.optimize import minimize
def functuser(x):
    case=3

    if case == 1:
       total=0.
       for j in range(len(x)):
           total+=(x[j])**2
    if case == 2:
#    Rastrigin
       total=10.*len(x)
       for j in range(len(x)):
           total+=x[j]**2-10.*np.cos(2.*np.pi*x[j])
    if case == 3:
#   Rosenbrock
       xarray0=np.zeros(len(x))
       for j in range(len(x)):
          xarray0[j]=x[j]
       total=sum(100.0*(xarray0[1:]-xarray0[:-1]**2.0)**2.0 + (1-xarray0[:-1])**2.0)
    if case == 4:
#   Styblinski-Tang
       total=0.
       for j in range(len(x)):
           total+=(x[j]**4-16.*x[j]**2+5.*x[j])/2.

    return total
class PARTICLE:
    def __init__(self,startx0,ww,c1,c2,xbounds,lverbo):
        self.position_i=[]
        self.velocity_i=[]
        self.position_best_i=[]
        self.obj_best_i=9e99
        self.obj_i=9e99
        self.dimensions=len(startx0)
        self.ww=ww+(random.random()-0.5)*0.2
        self.c1=c1+(random.random()-0.5)*0.2*1.
        self.c2=c2+(random.random()-0.5)*0.2*1.
        if lverbo:
           print(self.ww,self.c1,self.c2)
        for j in range(self.dimensions):
            self.velocity_i.append(random.uniform(-1,1))
            self.position_i.append( startx0[j]*( 1.+(random.random()-0.5)*2.) )
        if random.random() < 0.8:
           for j in range(self.dimensions):
               self.position_i[j]=xbounds[j][0]+(xbounds[j][1]-xbounds[j][0])*random.random()
        for j in range(self.dimensions):
            if self.position_i[j] > xbounds[j][1]:
               self.position_i[j]=xbounds[j][0]+(xbounds[j][1]-xbounds[j][0])*random.random()
            if self.position_i[j] < xbounds[j][0]:
               self.position_i[j]=xbounds[j][0]+(xbounds[j][1]-xbounds[j][0])*random.random()
        self.position_best_i=self.position_i.copy()
    def evaluate(self,objfunct):
#       self.obj_i=objfunct(self.position_i)
        xarray0=np.zeros(self.dimensions)
        for j in range(self.dimensions):
            xarray0[j]=self.position_i[j]
        res=minimize(objfunct,xarray0,method='nelder-mead',options={'xtol':1e-6,'disp':True})
        self.position_i=res.x.copy()
        self.obj_i=res.fun
        if self.obj_i < self.obj_best_i :
           self.position_best_i=self.position_i.copy()
           self.obj_best_i=self.obj_i
    def update_velocity(self,position_best_g):
        for j in range(self.dimensions):
            vc=self.c1*(self.position_best_i[j]-self.position_i[j])*random.random()
            vs=self.c2*(position_best_g[j]-self.position_i[j])*random.random()
            self.velocity_i[j]=self.ww*self.velocity_i[j]+vc+vs
    def update_position(self,xbounds):
        for j in range(self.dimensions):
            self.position_i[j]=self.position_i[j]+self.velocity_i[j]
            if self.position_i[j] > xbounds[j][1]:
               self.position_i[j]=xbounds[j][0]+(xbounds[j][1]-xbounds[j][0])*random.random()
            if self.position_i[j] < xbounds[j][0]:
               self.position_i[j]=xbounds[j][0]+(xbounds[j][1]-xbounds[j][0])*random.random()
class PSO():
    def __init__(self, objfunct, startx0, xbounds, ww=0.5, c1=1.0, c2=2.0, nparticles=50, maxiter=50000, verbose=False):
        obj_best_g=9e99
        position_best_g=[]
        swarm=[]
        for _ in range(nparticles):
            swarm.append(PARTICLE(startx0,ww,c1,c2,xbounds,verbose))
        it=0
        while it < maxiter:
            if verbose:
               print(f'iter: {it:>6d} best solution: {obj_best_g:16.8e}')
            for i in range(nparticles):
                swarm[i].evaluate(objfunct)
                if swarm[i].obj_i < obj_best_g :
                   position_best_g=list(swarm[i].position_i)
                   obj_best_g=float(swarm[i].obj_i)
            for i in range(nparticles):
                swarm[i].update_velocity(position_best_g)
                swarm[i].update_position(xbounds)
            it+=1
        print('\nfinal solution:')
        print(f'   > {position_best_g}')
        print(f'   > {obj_best_g}\n')
        if True:
           abc=np.zeros(nparticles)
           abcvec=np.zeros((nparticles,len(startx0)))
           for i in range(nparticles):
               abc[i]=swarm[i].obj_best_i
               abcvec[i]=swarm[i].position_best_i
           idx=abc.argsort()
           abc=abc[idx]
           abcvec=abcvec[idx,:]
           for i in range(nparticles):
               print(abc[i])
               print(abcvec[i,:])

startx0=[]
xbounds=[]
for j in range(10):
    startx0.append(0.)
for j in range(len(startx0)):
    xbounds.append((-20., 20.))
PSO(functuser, startx0, xbounds, ww=0.5, c1=1.0, c2=2.0, nparticles=50, maxiter=50000, verbose=True)

------------------------------------------------------------------------------------------------------------------------


------------------------------------------------------------------------------------------------------------------------
from pyswarms.single.global_best import GlobalBestPSO
import numpy as np
# create a parameterized version of the classic Rosenbrock unconstrained optimzation function
def rosenbrock_with_args(x, a, b, c=0):
    f = (a - x[:, 0]) ** 2 + b * (x[:, 1] - x[:, 0] ** 2) ** 2 + c
    return f
# instatiate the optimizer
x_max = 10 * np.ones(2)
x_min = -1 * x_max
bounds = (x_min, x_max)
options = {'c1': 0.5, 'c2': 0.3, 'w': 0.9}
optimizer = GlobalBestPSO(n_particles=50, dimensions=2, options=options, bounds=bounds)
# now run the optimization, pass a=1 and b=100 as a tuple assigned to args
cost, pos = optimizer.optimize(rosenbrock_with_args, 1000, a=1, b=100, c=0)


------------------------------------------------------------------------------------------------

import numpy as np
import os
import os.path
from scipy.optimize import dual_annealing
from scipy.optimize import minimize
from scipy import optimize
def append_new_line(file_name, text_to_append):
    with open(file_name, "a+") as file_object:
        file_object.seek(0)
        data = file_object.read(100)
        if len(data) > 0:
            file_object.write("\n")
        file_object.write(text_to_append)
def append_multiple_lines(file_name, lines_to_append):
    with open(file_name, "a+") as file_object:
        appendEOL = False
        file_object.seek(0)
        data = file_object.read(100)
        if len(data) > 0:
            appendEOL = True
        for line in lines_to_append:
            if appendEOL == True:
                file_object.write("\n")
            else:
                appendEOL = True
            file_object.write(line)

lw = [-15.0] * 10
up = [15.0] * 10
if True:
   ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234, maxiter=20)
if False:
   ret=optimize.differential_evolution(func, bounds=list(zip(lw, up)), maxiter=10)
if False:
   x0=[ 0. for i in range(10)]
   ret = minimize(func, x0, method='Nelder-Mead', tol=1e-6)
lines=[]
lines.append(str(ret.fun))
for i in range(len(ret.x)):
    lines.append(str(ret.x[i]))
append_multiple_lines('./OUTPUT', lines)



핑백

덧글

댓글 입력 영역

최근 포토로그



MathJax