# Golden-Section-Search:

phi = (1+math.sqrt(5))/2
a, b, c = trpl
fa, fb, fc = f(trpl)
if b - a > c - b:
   d = a + (b - a) / phi
   fd = f (d)
   if (fd < fb):  trpl = [a, d, b]
   else:  trpl = [d, b, c]
else:
   d = c + (b - c) / phi
   fd = f (d)
   if (fd < fb):  trpl = [b, d, c]
   else:  trpl = [a, b, d]
trpl = np.array (trpl)


# Coordinate-Descent:

def f (x, y):
   return x ** 2 + y ** 2 - x * y
   # df/dx = 2x - y = 0  =>  x = y / 2
   # df/dy = 2y - x = 0  =>  y = x / 2

if i % 2 == 1:
   y = x / 2
else:
   x = y / 2


# Pattern-Search:

fmm = f (x, y)
fum = f (x - size, y)
fom = f (x + size, y)
fmu = f (x, y - size)
fmo = f (x, y + size)
fv = np.array ([fmm,fum,fom,fmu,fmo])
best = np.argmin (fv)
if best == 0:  size /= 2
if best == 1:  x -= size
if best == 2:  x += size
if best == 3:  y -= size
if best == 4:  y += size


# Nelder-Mead:

# x ... 3x2 np.array
fv = fc (x[:,0], x[:,1])   # für f(x) = fc(x[0],x[1]), wenn x vector
si = np.argsort (fv)
fv = fv[si]
x = x[si]
a = (x[0] + x[1]) / 2
r = a + (a - x[2])
e = a + 2 * (a - x[2])
c = a - 0.5 * (a - x[2])
if f (e) < f (r) < fv[0]:  x[2] = e
elif f (r) < fv[1]:  x[2] = r
elif f (c) < min (f (r), fv[2]):  x[2] = c
else:
   x[1] = (x[0] + x[1]) / 2
   x[2] = (x[0] + x[2]) / 2


# Backtracking

fa = f (x)
while True:
   fn = f (x + alpha * d)
   if fn < fa:  break
   alpha *= 0.7
x += alpha * d


# Wolve 1

while True:
   fn = f (x + alpha * d)
   if fn < fa - 0.3 * alpha * np.dot (d, d):  break
   alpha *= 0.7
x += alpha * d


# Wolve 2

while True:
   fn = f (x + alpha * d)
   dn = - df (x + alpha * d)
   if math.fabs (np.dot (dn, d)) < 0.2 * np.dot (d, d):  break
   alpha *= 0.7
x += alpha * d


# Momentum

d = - df (x)
v = beta * v + alpha * d
x += v


# Nesterov

dn = - df (x + beta * v)
v = beta * v + alpha * dn
x += v


# Adagrad

d = - df (x)
s += d * d
x += 2 / (0.000001 + np.sqrt (s)) * d


# RMSprop

d = - df (x)
s = 0.8 * s + 0.2 * d * d
x += 2 / (0.000001 + np.sqrt (s)) * d


# Adam

d = - df (x)
v = beta * v + (1 - beta) * d
vh = v / (1.0 - beta ** (i + 1))
s = gamma * s + (1.0 - gamma) * d * d
sh = s / (1.0 - 0.9 ** (i + 1))
x += alpha / (0.000001 + np.sqrt (sh)) * v

# Conjugate-Gradient

x = x0
g = df (x)
d = np . zeros (2)
for i in range (...):
   dold = d;  gold = g
   g = df (x)
   beta = np . dot (g, g) / np . dot (gold, gold)
   d = -g + beta * dold
   alpha = line_search (f, x, d)
   x = x + alpha * d

# Newton

d = np . linalg . solve (ddf (x), - df (x))
x = x + d

# Gauss-Newton

A = dg (x)
d = np . linalg . solve (np . einsum ('ij,ik', A, A), - np . einsum ('ij,i', A, g(x)))
x = x + d

# Trust-Region - Newton

d = np . linalg . solve (ddf (x) + lam * np . identity (2), - df (x))
x = x + d

# Levenberg-Marquard

A = dg (x)
B = np . einsum ('ij,ik', A, A)
b = - np . einsum ('ij,i', A, g(x))
while True:
   d = np . linalg . solve (B + lam * np . identity (2), b)
   if f (x + d) <= f (x):
      lam = lam / 2
      break
   else:
      lam = lam * 2
x = x + d

# Powells Dog-Leg

A = dg (x)
B = np . einsum ('ij,ik', A, A)
b = - np . einsum ('ij,i', A, g(x))
while True:
   dgn = np . linalg . solve (B, b)
   dgd = -dg(x).T @ g(x)
   t = (dgd @ dgd) / ((dg(x) @ dgd) @ (dg(x) @ dgd))
   dgd = t * dgd
   if dgn @ dgn < r ** 2:
      d = dgn
   elif dgd @ dgd < r ** 2:
      ea = (dgn - dgd) @ (dgn - dgd);  eb = 2 * (dgn - dgd) @ dgd;  ec = dgd @ dgd - r ** 2
      beta = (-eb + np . sqrt (eb ** 2 - 4 * ea * ec)) / (2 * ea)
      d = beta * dgn + (1 - beta) * dgd
   else:
      d = dgd * r / np . sqrt (dgd @ dgd)
   if f (x + d) <= f (x):
      r = r * 2
      break
   else:
      r = r * 0.8
x = x + d

# BGFS

x = x0
G = 0.01 * np . identity (2)
for i in range (...):
   g = df (x)
   d = -G @ g
   while d @ df (x + d) < d @ g * 0.6:  d *= 1.5
   e = df (x + d) - df (x)
   b = 1.0 / (d @ e)
   U = (np . identity (2) -  np . outer (e, d) * b)
   G = U.T @ G @ U + np . outer (d, d) * b
   x = x + d

# L-BFGS

def lbfgs (p, k, i):
   if i == 5 or k - i == 0:
      return (db[k-1] @ eb[k-1]) / (eb[k-1] @ eb[k-1]) * p
   a = (db[k-i] @ p) / (db[k-i] @ eb[k-i])
   q = lbfgs (p - a * e, k, i + 1)
   return q + (a - (eb[k-i] @ q) / (db[k-i] @ eb[k-i])) * db[k-i]

x = x0
db = np . zeros ((..., 2))
eb = np . zeros ((..., 2))
d = - 0.01 * df (x)
db[0] = d
eb[0] = df (x + d) - df (x)
x = x + d
for k in range (1, ...):
   g = df (x)
   d = - lbfgs (g, k, 1)
   while d @ df (x + d) < d @ g * 0.6:  d *= 1.5
   db[k] = d
   eb[k] = df (x + d) - g
   x = x + d

# Projected Gradient-Descent

a = np . abs (x)
b = np . sort (a) [::-1]
s = 0; k = 0
while k < np . size (b) and (s + b[k] - 1) / (k + 1) < b[k]:  s = s + b[k]; k = k + 1
c = (s - 1) / k
y = np . zeros (np . size (x))
for i in range (np . size (x)):
   if np . abs (x[i]) > c:  y[i] = x[i] - np . sign (x[i]) * c

# Proximal Gradient-Descent
# soft-threshold:
   if q[i] > th: q[i] -= th
   elif q[i] > -th: q[i] = 0
   else: q[i] += th

# Simplex-Methode
AA = np . array ([[1., 1, 3], [1, 6, 1], [6, 1, 1]])  # AA x >= b
b = np . array ([-2.5, -2.5, -2.5,])
cc = np . array ([1.,1,0.5])  # min cc^T x
A = np . concatenate ((AA, -AA, - np . identity (len (cc))), axis = 1)
c = np . concatenate ((cc, -cc, np . zeros (len (cc))))
kappa = np . array ([6, 7, 8])
while True:
   kappa_bar = np . setdiff1d (np . arange (0, len (c)), kappa)
   x = np . zeros (len (c));  x[kappa] = np . linalg . solve (A [:, kappa], b)
   lam = np . linalg . solve (A [:, kappa].T, c [kappa])
   mu = c [kappa_bar] - A [:, kappa_bar] . T @ lam
   if all (mu >= -0.00000001):  break
   q = kappa_bar [np . argmin (mu)]
   d = np . linalg . solve (A [:, kappa], A [:, q])
   if all (d <= 0):  print ("non-bound");  break
   p = kappa [np . argmin (np . where (d > 0, x[kappa] / d, 100000000))]
   kappa = np . union1d (np . setdiff1d (kappa, [p]), [q])
x = x[0:3] - x[3:6]

# Penalty

def fg (x, l):  return f(x) + l * g(x) ** 2
def dfg (x, l):  return df(x) + l * 2 * g(x) * dg(x)
#def fg2 (x, l):  return f(x) + l * np . abs (g(x))
#def dfg2 (x, l):  return df(x) + l * (1 if g(x) > 0 else -1) * dg(x)

   d = -0.1 * dfg (x, l)
   x += d
   l *= 1.05

# Augmented Lagrange

def fg (x, l, lam):  return f(x) - lam * g(x) + 0.5 * l * g(x) ** 2
def dfg (x, l, lam):  return df(x) - (lam - l * g(x)) * dg(x)
l = 0.5
lam = 0

   d = -0.1 * dfg (x, l, lam)
   x += d
   lam = lam - l * g(x)

# Active Set

for k in ...
   G = ddf (x)  # Hesse-Matrix
   c = df (x) - G @ x  # Gradient vs. c
   AA = np . block ([[G, -A[alpha].T], [A[alpha], np . zeros ((len (alpha), len (alpha)))]])
   bb = np . concatenate ((-G @ x - c, -A[alpha] @ x + b[alpha]))
   dd = np . linalg . solve (AA, bb)
   d = dd[:n]
   lam = dd[n:]
   beta = (b - A @ x) / (A @ d)
   beta2 = np . where (beta >= 0, beta, 1000000)  # negative beta ausschließen
   beta2[alpha] = 1000000  # Active Set ausschließen
   if p != -1:  beta2[p] = 1000000  # Index ausschließen, der zuletzt inaktiv wurde (beta_p wäre noch 0)
   q = np . argmin (beta2)
   if beta2[q] < 1.0:  # mit beta > 1 würden wir über Optimum hinausschießen
      x = x + beta[q] * d
   else:
      x = x + d
      q = -1
   if len (lam) > 0:
      pl = np . argmin (lam)  # pl ist index index in lambda 
      if lam[pl] < 0:
         p = alpha [pl]  # p ist zeilen-index in A
         alpha = np . setdiff1d (alpha, [p])
      else:
         p = -1
   if q != -1:  alpha = np . union1d (alpha, [q])
   if p == -1 and q == -1 and np . allclose (d, 0):  break

# Linear Interior-Point

c = np . array ([0.3, 1.0])
A = np . array ([[0.5, 1.0], [-0.5, 1.0], [-1.0, 1.0]])
b = np . array ([0.2, 0.2, -0.2])
n = len (c);  m = len (A)

AA = np . block ([A, -A, - np . eye (m)])
cc = np . concatenate ((c, -c, np . zeros (m)))
nn = 2 * n + m
xx = ...
mu = ...
lam = ...
sigma = 0.01

for k in ...
   x = xx[0:n] - xx[n:2*n]
   G = np . block (
     [ [np . zeros ((nn, nn)), AA.T, np . eye (nn)],
       [AA, np . zeros ((m, m)), np . zeros ((m, nn))],
       [np . diag (mu), np . zeros ((nn, m)), np . diag (xx)]
     ])
   u = mu @ xx / nn
   g = np . concatenate ((AA.T @ lam + mu - cc, AA @ xx - b, mu * xx - np . full (nn, sigma * u)))
   d = np . linalg . solve (G, -g)
   dxx = d[0:nn]
   dlam = d[nn:nn+m]
   dmu = d[nn+m:]
   alpha = 1.0
   while True:
      u = (mu + alpha * dmu) @ (xx + alpha * dxx) / nn
      if np . all ((mu + alpha * dmu) * (xx + alpha * dxx) >= sigma * u): break
      alpha *= 0.5
   if alpha < 0.001: break
   xx += alpha * dxx
   lam +=alpha * dlam
   mu += alpha * dmu


# Nonlinear Interior-Point
# (ohne Gleichheits-Constraint, nur ein Ungleichheits-Constraint)

def f (x):  ...
def df (x):  ...
def ddf (x):  ...

def h (x): ...
def dh (x): ...
def ddh (x): ...

sigma = 0.01

x = np . array ([0.5, 2.5])
y = np.array ([h(x)])
mu = np . array ([1.0])
for k in ...
   u = mu @ y / 1
   print (mu, y, u, h(x))
   L = np.block (
     [ [ddf(x) - ddh(x) * mu, np . zeros((2,1)), np.array([dh(x)]).T],
       [np . zeros ((1,2)), np . diag (mu), np . diag (y)],
       [np.array([dh(x)]), - np.eye(1), np.zeros((1,1))]
     ]);
   l = np . concatenate ((df(x) - dh(x) * mu, mu * y - sigma * u, h(x)-y))
   d = np . linalg . solve (L, -l)
   dx = d[0:2];  dy = d[2:3];  dmu = d[3:]
   alpha = 1.0
   while True:
      u = (mu + alpha * dmu) @ (y + alpha * dy) / 1
      if np . all ((mu + alpha * dmu) * (y + alpha * dy) >= sigma * u): break
      alpha *= 0.5
   if alpha < 0.001: break
   x += alpha * dx
   y += alpha * dy
   dmu += alpha * dmu

