import numpy as np def felli(x): N = x.shape[0] if N < 2: raise ValueError("dimension must be greater than one") exponents = np.arange(N) / (N - 1) return float(np.sum((1e6 ** exponents) * (x ** 2))) def escma(func, *, N=10, xmean=None, sigma=0.5, stopfitness=1e-14, stopeval=2000, func_args=(), func_kwargs=None, seed=0, bestEver = np.inf, noImproveGen = 0, absTolImprove = 1e-12, maxNoImproveGen = 100, sigmaImprove = 1e-12): if func_kwargs is None: func_kwargs = {} if seed is not None: np.random.seed(seed) # Initialization (aus Parametern statt hart verdrahtet) if xmean is None: xmean = np.random.rand(N) else: xmean = np.asarray(xmean, dtype=float) N = xmean.shape[0] if stopeval is None: stopeval = int(1e3 * N**2) # Strategy parameter setting: Selection lambda_ = 4 + int(np.floor(3 * np.log(N))) mu = lambda_ / 2.0 # muXone recombination weights weights = np.log(mu + 0.5) - np.log(np.arange(1, int(mu) + 1)) mu = int(np.floor(mu)) weights = weights / np.sum(weights) mueff = np.sum(weights)**2 / np.sum(weights**2) # Strategy parameter setting: Adaptation cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N) cs = (mueff + 2) / (N + mueff + 5) c1 = 2 / ((N + 1.3)**2 + mueff) cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + 2 * mueff)) damps = 1 + 2 * max(0, np.sqrt((mueff - 1) / (N + 1)) - 1) + cs # Initialize dynamic (internal) strategy parameters and constants pc = np.zeros(N) ps = np.zeros(N) B = np.eye(N) D = np.eye(N) C = B @ D @ (B @ D).T eigeneval = 0 chiN = np.sqrt(N) * (1 - 1/(4*N) + 1/(21 * N**2)) # Generation Loop counteval = 0 arx = np.zeros((N, lambda_)) arz = np.zeros((N, lambda_)) arfitness = np.zeros(lambda_) gen = 0 print(f' [CMA-ES] Start: lambda = {lambda_}, sigma ={round(sigma, 6)}, stopeval = {stopeval}') while counteval < stopeval: gen += 1 # Generate and evaluate lambda offspring for k in range(lambda_): arz[:, k] = np.random.randn(N) arx[:, k] = xmean + sigma * (B @ D @ arz[:, k]) arfitness[k] = float(func(arx[:, k], *func_args, **func_kwargs)) # <-- allgemein counteval += 1 # Sort by fitness and compute weighted mean into xmean idx = np.argsort(arfitness) arfitness = arfitness[idx] arindex = idx xold = xmean.copy() xmean = arx[:, arindex[:mu]] @ weights zmean = arz[:, arindex[:mu]] @ weights # Stagnation check fbest = arfitness[0] if bestEver - fbest > absTolImprove: bestEver = fbest noImproveGen = 0 else: noImproveGen = noImproveGen + 1 if gen == 1 or gen%50==0: print(f' [CMA-ES] Gen {gen}, best = {round(fbest, 6)}, sigma = {sigma:.3g}') if noImproveGen >= maxNoImproveGen: print(f' [CMA-ES] Abbruch: keine Verbesserung > {round(absTolImprove, 3)} in {maxNoImproveGen} Generationen.') break if sigma < sigmaImprove: print(f' [CMA-ES] Abbruch: sigma zu klein {sigma:.3g}') break # Cumulation: Update evolution paths ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * (B @ zmean) norm_ps = np.linalg.norm(ps) hsig = norm_ps / np.sqrt(1 - (1 - cs)**(2 * counteval / lambda_)) / chiN < \ (1.4 + 2 / (N + 1)) hsig = 1.0 if hsig else 0.0 pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * (B @ D @ zmean) # Adapt covariance matrix C BDz = B @ D @ arz[:, arindex[:mu]] C = (1 - c1 - cmu) * C \ + c1 * (np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) \ + cmu * BDz @ np.diag(weights) @ BDz.T # Adapt step-size sigma sigma = sigma * np.exp((cs / damps) * (norm_ps / chiN - 1)) # Update B and D from C (Eigenzerlegung, O(N^2)) if counteval - eigeneval > lambda_ / ((c1 + cmu) * N * 10): eigeneval = counteval # enforce symmetry C = (C + C.T) / 2.0 eigvals, B = np.linalg.eigh(C) D = np.diag(np.sqrt(eigvals)) # Break, if fitness is good enough if arfitness[0] <= stopfitness: break # Escape flat fitness, or better terminate? if arfitness[0] == arfitness[int(np.ceil(0.7 * lambda_)) - 1]: sigma = sigma * np.exp(0.2 + cs / damps) print(' [CMA-ES] stopfitness erreicht.') #print("warning: flat fitness, consider reformulating the objective") break #print(f"{counteval}: {arfitness[0]}") #Final Message #print(f"{counteval}: {arfitness[0]}") xmin = arx[:, arindex[0]] bestValue = arfitness[0] print(f' [CMA-ES] Ende: Gen = {gen}, best = {round(bestValue, 6)}') return xmin if __name__ == "__main__": xmin = escma(felli, N=10) # <-- Zielfunktion wird übergeben print("Bestes gefundenes x:", xmin) print("f(xmin) =", felli(xmin))