Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
76 commits
Select commit Hold shift + click to select a range
1f18253
build the base mapping class, and an exponential and taylor childs
artofscience Jan 6, 2022
c4fd413
change word
artofscience Jan 6, 2022
4247cbd
small changes
artofscience Jan 6, 2022
7c660fc
Merge remote-tracking branch 'origin/main' into combine_approx_interv
artofscience Jan 9, 2022
50cc3b4
create a general mapping class and children problem, subproblem and a…
artofscience Jan 9, 2022
4fdaf1d
apply feedb ack max
artofscience Jan 10, 2022
c42d484
changed mapping.py drastically and made a first tested; passed :)
artofscience Jan 10, 2022
bec6b7f
some more tests, at some point breaks down (currently commented out)
artofscience Jan 10, 2022
587f750
delete subproblem for now (will add later)
artofscience Jan 10, 2022
e3c89f9
fixed an error, second order sensitivities not correct yet
artofscience Jan 10, 2022
6889ef4
updated structure:
artofscience Jan 11, 2022
a8c9cda
Update sao/mappings/mapping.py
artofscience Jan 11, 2022
6c6c87c
small change
artofscience Jan 11, 2022
0468d9c
Merge remote-tracking branch 'origin/combine_approx_interv' into comb…
artofscience Jan 11, 2022
922fd77
small change
artofscience Jan 11, 2022
604d632
Updated version with approx=intv=mapping
artofscience Jan 11, 2022
940bf85
Without base class
artofscience Jan 11, 2022
bb3178f
updated emptymap structure
artofscience Jan 11, 2022
c05b9c0
remove problem from mapping
artofscience Jan 11, 2022
9815a7d
updat test
artofscience Jan 11, 2022
edb155f
updat test
artofscience Jan 11, 2022
3accbb0
generated an empty(mapping)
artofscience Jan 11, 2022
88d9ac6
Apply suggestions from code review
artofscience Jan 11, 2022
1c3e717
Update sao/mappings/mapping.py
artofscience Jan 11, 2022
29f3390
generate linear mapping as empty mapping
artofscience Jan 11, 2022
356b083
small typo
artofscience Jan 11, 2022
f47223c
fixed error in chain rule, taylor with exp not working yet
artofscience Jan 12, 2022
7ccd46a
taylor approximation is working! (does not obey chain rule)
artofscience Jan 12, 2022
9894916
test Taylor1(Taylor1(Rec(Lin)))
artofscience Jan 12, 2022
57a768c
implemented Taylor2 and made drastic simplifications to taylor1/2
artofscience Jan 13, 2022
ac0c8b9
taylor2 working
artofscience Jan 13, 2022
198ef9e
small improvements
artofscience Jan 14, 2022
9351ea8
moved dx/dy ddx/ddy to mapping update. T2 not working..
artofscience Jan 14, 2022
280e57c
ARNOUD
artofscience Jan 14, 2022
1c2445e
test approx of approx (aoa)
artofscience Jan 14, 2022
87a9d57
Merge remote-tracking branch 'origin/main' into combine_approx_interv
artofscience Jan 16, 2022
0d876d0
test approx of approx
artofscience Jan 16, 2022
84bc0da
test approx of approx
artofscience Jan 30, 2022
f74996f
small modification to aoa:
artofscience Jan 30, 2022
d72c5d2
additional tests
artofscience Jan 30, 2022
d0f5ae5
small mods
artofscience Jan 30, 2022
9e5d279
test_dqa spherical nonspherical
artofscience Jan 30, 2022
a9d4c28
Update pytest.yml
artofscience Jan 30, 2022
f4053fd
remove numba
artofscience Jan 30, 2022
3fbb3a9
Merge remote-tracking branch 'origin/combine_approx_interv' into comb…
artofscience Jan 30, 2022
933feec
comment assert
artofscience Jan 30, 2022
bfebf3e
Update pytest.yml
artofscience Jan 30, 2022
c6d08fd
fix bug spherical
artofscience Jan 30, 2022
5339f62
added positive/negative and conlin
artofscience Jan 30, 2022
c8c4cb4
test conlin
artofscience Jan 30, 2022
e626ebd
reformatting structure of files
artofscience Jan 30, 2022
3e14701
small mod to imports
artofscience Jan 30, 2022
b37d7f3
test mapping some initial steps
artofscience Jan 30, 2022
a3a27cf
test Mixed Mapping (MM)
artofscience Jan 30, 2022
99999a2
small mod
artofscience Jan 30, 2022
2aab75f
add mma (not tested yet!)
artofscience Jan 30, 2022
f61beb3
add mma (not tested yet!)
artofscience Jan 30, 2022
1c7fc17
modification of exponential initialisation
artofscience Jan 30, 2022
f65e659
modification of exponential initialisation
artofscience Jan 30, 2022
9053309
added sum mapping
artofscience Jan 31, 2022
a149a77
added sum mapping
artofscience Jan 31, 2022
5c96a90
new mapping class (simplicity = key)
artofscience Feb 1, 2022
978329a
introduced new and simplified mixed mapping
artofscience Feb 1, 2022
b0b00b8
introduced new and simplified mixed mapping
artofscience Feb 1, 2022
808c2ed
introduced new and simplified mixed mapping
artofscience Feb 1, 2022
c9f9a88
introduced new and simplified mixed mapping
artofscience Feb 1, 2022
9256139
changes based on max's input
artofscience Feb 7, 2022
5d2eb28
updated the update method (based on discussion max and stijn) not wor…
artofscience Feb 9, 2022
f072031
Commenting the approximations.py file
artofscience Jun 25, 2022
a8846ef
Updated test change of variable
artofscience Jun 25, 2022
d3a6bd0
Better commenting approximations.py
artofscience Jun 25, 2022
365224f
Modified mapping based on maxs comments
artofscience Jun 25, 2022
40869d7
Some additional comments and setup of test_mapping and test_approx
artofscience Jun 25, 2022
2008c39
Comment mapping.py
artofscience Jun 26, 2022
de1405c
Added new test of mapping (update function)
artofscience Jun 26, 2022
e662767
Modifed "global" update, such that local "_update" does not require o…
artofscience Jun 26, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion problems/n_dim/square.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def __init__(self, n):
super().__init__()
self.x_min = 1e-3 * np.ones(n) # cuz a subproblem uses both, whereas a problem only has x_min
self.x_max = np.ones(n) # cuz a subproblem uses both, whereas a problem only has x_max
self.x0 = np.linspace(0.8, 0.9, n)
self.x0 = np.linspace(1, 2, n)
self.n = n
self.m = 1
self.f = np.zeros(n)
Expand Down
73 changes: 73 additions & 0 deletions sao/intervening_variables/mma.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import numpy as np

from .intervening import Intervening
from .exponential import Exponential
from .split import PositiveNegative

Expand Down Expand Up @@ -195,3 +196,75 @@ def get_asymptotes(self, x):
low = x - self.dist * self.dx
upp = x + self.dist * self.dx
return low, upp


class MMA07(Intervening):
"""
MMA implementation as proposed in the note "MMA and GCMMA - two methods for nonlinear optimization"
Krister Svanberg (KTH Stockholm), 2007

Some practical considerations:
1. Scale the constraint such that 1 < fj(x) = gj(x) - gjmax < 100 and 1 < f0(x) < 100
2. Scale variables such that 0.1 < ximax - ximin < 100
3. When working with artificial variables, start with cj = 100, and increase by factors of 10 until all yj are zero
"""

def __init__(self, x_min=0.0, x_max=1.0, sinit=0.5, sincr=1.2, sdecr=0.7, asybound=10.0, oscillation_tol=1e-10,
p=-1, factor=0.01):
super().__init__(p=p, factor=factor)
self.x, self.xold1, self.xold2 = None, None, None
self.dx = x_max - x_min

self.asybound = asybound
self.sinit = sinit
self.sincr = sincr
self.sdecr = sdecr
self.oscillation_tol = oscillation_tol

self.dist = None
self.dist_min, self.dist_max = 1 / (self.asybound ** 2), self.asybound

def y(self, x):
return

def dydx(self, x):
return np.where(self.positive, self.right.dydx(x), self.left.dydx(x))

def ddyddx(self, x):
return np.where(self.positive, self.right.ddyddx(x), self.left.ddyddx(x))

def clip(self, x):
self.left.clip(x)
self.right.clip(x)
return x


def get_asymptotes(self, x):
self.xold2, self.xold1, self.x = self.xold1, self.x, x.copy()
"""Increases or decreases the asymptotes interval based on oscillations in the design vector"""
if self.dist is None:
self.dist = np.full_like(self.x, self.sinit)

if self.xold2 is None:
# Initial values of asymptotes
low = x - self.dist * self.dx
upp = x + self.dist * self.dx
else:
# Update scheme for asymptotes
# depending on if the signs of (x_k-xold) and (xold-xold2) are opposite, indicating an oscillation in xi
# if the signs are equal the asymptotes are slowing down the convergence and should be relaxed

# check for oscillations in variables (if > 0: no oscillations, if < 0: oscillations)
oscillation = ((x - self.xold1) * (self.xold1 - self.xold2)) / self.dx

# oscillating variables x_i are increase or decrease the factor
self.dist[oscillation > +self.oscillation_tol] *= self.sincr
self.dist[oscillation < -self.oscillation_tol] *= self.sdecr

# Clip the asymptote factor
np.clip(self.dist, self.dist_min, self.dist_max)

# update lower and upper asymptotes
low = x - self.dist * self.dx
upp = x + self.dist * self.dx
return low, upp
120 changes: 120 additions & 0 deletions sao/mappings/approximations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import numpy as np
from .mapping import Mapping, Linear


class LinearApproximation(Mapping):
"""
Linear Approximation (LA) f[x] of function g[x] at x0.

f[x] = g[x0] + g'[x0]*(x - x0)
= (g[x0] - g'[x0]*x0) + g'[x0]*x

f'[x] = g'[x0]

f''[x] = 0
"""

def __init__(self, mapping=Linear()):
"""
Initialization of LA.

:param mapping: The dependent mapping.

If mapping is not provided, dependent mapping is Linear() which ends the chain.
"""
super().__init__(mapping) # Initialization of dependent mapping.
self.g0, self.dg0 = None, None

def _update(self, x0, dg0, ddg0=0):
self.g0 = -dg0 * x0
self.dg0 = dg0
return self._g(x0), self._dg(dg0), self._ddg(ddg0) # Why not return self.g0, self.dg0, self.ddg0?

def _g(self, x):
"""
Function value of LA function at x.

f[x] = g[x0] + g'[x0]*(x - x0)
= (g[x0] - g'[x0]*x0) + g'[x0]*x
= g[x0] + a + b*x, with
a = -g'[x0]*x0
b = g'[x0]

:param x: Incoming variable (or function) value
:return: Function value at x
"""
return self.g0 + self.dg0 * x # Excludes g[x0] (on purpose)

def _dg(self, x): return self.dg0

def _ddg(self, x): return np.zeros_like(x)


class DiagonalQuadraticApproximation(LinearApproximation):
"""
Diagonal Quadratic Approximation (DQA) f[x] of function g[x] at x0.

DQA builds on top of LA.
Explanation of what meaning Diagonal Quadratic

f[x] = g[x0] + g'[x0]*(x - x0) + 1/2*g''[x0]*(x - x0)^2
= (g[x0] - g'[x0]*x0 + 1/2*g''[x0]*x0^2) + (g'[x0] - g''[x0]*x0)*x + 1/2*g''[x0]*x^2
= LA[x] + 1/2*g''[x0]*(x0^2 - 2*x0*x + x^2)
= (LA[x] + 1/2*g''[x0]*x0^2) - g''[x0]*x0*x + 1/2*g''[x0]*x^2

f'[x] = (LA'[x] - g''[x0]*x0) + g''[x0]*x

f''[x] = LA''[x] + g''[x0], with LA''[x] = 0
"""

def __init__(self, mapping=Linear()):
super().__init__(mapping)
self.ddg0 = None

def _update(self, x0, dg0, ddg0=0):

super()._update(x0, dg0)
self.g0 += 0.5 * ddg0 * x0 ** 2
self.dg0 -= ddg0 * x0
self.ddg0 = ddg0

def _g(self, x):
"""
Function value of DQA function at x.

f[x] = g[x0] + g'[x0]*(x - x0) + 1/2*g''[x0]*(x - x0)^2
= (g[x0] - g'[x0]*x0 + 1/2*g''[x0]*x0^2) + (g'[x0] - g''[x0]*x0)*x + 1/2*g''[x0]*x^2
= g[x0] + a + b*x + c*x^2, with
a = -g'[x0]*x0 + 1/2*g''[x0]*x0^2
b = g'[x0] - g''[x0]*x0
c = 1/2*g''[x0]

:param x: Incoming variable (or function) value
:return: Function value at x
"""

return self.g0 + self.dg0 * x + 0.5 * self.ddg0 * x ** 2

def _dg(self, x):
"""
First derivative of DQA function at x.

f'[x] = (g'[x0] - g''[x0]*x0) + g''[x0]*x

:param x: Incoming variable (or function) value
:return: First derivative at x
"""

return self.dg0 + self.ddg0 * x

def _ddg(self, x):
"""
Second derivative of DQA function at x.

f''[x] = g''[x0]

:param x: Incoming variable (or function) value
:return: Second derivative at x
"""

return self.ddg0
52 changes: 52 additions & 0 deletions sao/mappings/change_of_variable.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
from abc import abstractmethod, ABC
from .mapping import Mapping, Linear, Conditional
import numpy as np


class Exponential(Mapping):
def __init__(self, p=1, mapping=Linear(), xlim=1e-10):
super().__init__(mapping)
assert p != 0, f"Invalid power x^{p}, will result in zero division."
self.p, self.xlim = p, xlim

def _clip(self, x): return np.maximum(x, self.xlim, out=x) if self.p < 0 else x

def _g(self, x): return x ** self.p

def _dg(self, x): return self.p * x ** (self.p - 1)

def _ddg(self, x): return self.p * (self.p - 1) * x ** (self.p - 2)


class MMAp(Conditional, ABC):
def __init__(self, p=-1, factor=1e-3, low=-10.0, upp=10.0):
super().__init__(Exponential(p), Exponential(p))
self.low, self.upp = low, upp
self.factor = factor

def update(self, x0, dg0, ddg0=0):
super().update(x0, dg0, ddg0)
[self.low, self.upp] = self.get_asymptotes(x0)

@abstractmethod
def get_asymptotes(self, x): pass

def _g(self, x):
return super().g(np.where(self.condition, self.upp - x, x - self.low))

def _dg(self, x):
return super().dg(np.where(self.condition, self.upp - x, x - self.low)) * np.where(self.positive, -1, +1)

def _ddg(self, x):
return super().ddg(np.where(self.condition, self.upp - x, x - self.low))

def clip(self, x):
return np.clip(x, self.low + self.factor, self.upp - self.factor, out=x)


class ConLin(Conditional):
def __init__(self): super().__init__(Exponential(-1), Exponential(1))


class MMA(MMAp, ABC):
def __init__(self): super().__init__(-1)
Loading