diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index c4a5dd0..aedfd22 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -8,81 +8,73 @@
+
+
+
-
+
-
-
+
+
-
-
-
-
-
-
+
-
+
-
-
-
-
-
-
+
+
-
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
+
+
-
+
+
+
+
+
+
+
+
+
+
-
-
+
+
-
-
-
+
@@ -99,36 +91,36 @@
- no_prior_no_exploration
- no_prior_with_exploration_ei_noise
- no_p
- open
- model.WO_obj_ca
- model.WO_con1_model_ca, model.WO_con2_model_ca
- n_iter
- n_iter
- obj_setting
- self.obj
- no_prior_wit
- con_empty, con_empty
- con_empty
- Benoit
- ca_
- scale_inputs=True
- benoir
- sqrt
- 29
- samples_
- backtr
- obj
- obj_
- noise
- compute_
+ bio
+ 75
+ qua
+ exploration_n
+ exploration_no
+ .png
+ new2_
+ eval
+ self.ytrain
+ Use_GP_for_model
+ GPs_construction_ca
+ construct_opt_ca
+ Ellipse_sampling
+ construct_opt_ca_with_GPception
+ data_handling
+ compute_data
+ GPs_c
+ funcs_model
+ self.Use
+ self.ndat
+ restorat
+ plots_
+ GPs_construction
+ plots_RT
+ bio_con1_ca_RK4
+ obj_min
plot_obj
- [6.
- EXplore_n
- noise = [0.5**2, 5e-8, 5e-8]
- self.obj
+ noise
+ compute
+ 23
Benoit_Model
@@ -137,6 +129,13 @@
np.array([1.1,-0.1])
noise = [np.sqrt(1e-3)]*2
con1_model
+ u = np.array(u1) * (self.u_max - self.u_min) + self.u_min
+ u1 = np.array(u0).reshape((self.nk,2), order='F' )
+
+ return -pcon1
+ Matern52
+ 0.1
+ 'LCB'
@@ -150,20 +149,32 @@
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
+
+
+
+
+
+
@@ -175,14 +186,37 @@
-
-
+
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -190,10 +224,19 @@
-
+
+
+
+
+
+
+
+
+
+
@@ -210,7 +253,7 @@
-
+
@@ -218,11 +261,11 @@
-
+
-
+
@@ -245,7 +288,7 @@
-
+
@@ -255,23 +298,22 @@
-
-
+
-
-
-
+
+
+
-
-
+
+
-
-
-
-
+
+
+
+
@@ -287,288 +329,274 @@
file://$PROJECT_DIR$/run_WO/main_WO_prior_EI.py
- 836
+ 865
file://$PROJECT_DIR$/run_WO/main_WO_prior_EI.py
- 30
-
+ 284
+
+
+
+ file://$PROJECT_DIR$/run_WO/main_WO_prior_EI.py
+ 282
+
+
+
+ file://$PROJECT_DIR$/sub_uts/systems.py
+ 585
+
+
+
+ file://$PROJECT_DIR$/run_WO/main_WO_prior_EI.py
+ 246
+
file://$PROJECT_DIR$/run_WO/main_WO_prior_EI.py
- 94
-
+ 244
+
+
+
+ file://$PROJECT_DIR$/run_WO/main_WO_prior_EI.py
+ 181
+
+
+
+ file://$PROJECT_DIR$/run_simple/main_simple.py
+ 754
+
file://$PROJECT_DIR$/run_simple/main_simple.py
- 29
-
+ 220
+
+
+
+ file://$PROJECT_DIR$/plots_RTO.py
+ 616
+
+
+
+ file://$PROJECT_DIR$/plots_RTO.py
+ 627
+
+
+
+ file://$PROJECT_DIR$/run_Bio/main_WO_prior_EI2.py
+ 31
+
+
+
+ file://$PROJECT_DIR$/plots_RTO.py
+ 797
+
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
-
+
-
-
+
+
-
+
-
+
-
-
+
+
-
+
+
-
-
+
+
-
+
-
-
-
-
+
+
-
+
-
-
-
-
-
+
+
-
+
-
-
-
-
-
+
+
-
+
-
-
-
-
-
+
+
-
+
-
-
+
+
-
-
-
-
-
-
+
+
+
-
+
-
-
+
+
-
+
-
-
+
+
+
+
+
-
+
-
-
+
+
-
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
+
+
-
+
-
-
+
+
-
+
+
+
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
-
-
-
-
-
+
+
-
+
-
-
+
+
-
+
-
-
+
+
-
+
-
-
+
+
-
-
-
-
-
-
+
-
+
-
-
-
-
-
-
-
-
+
-
+
-
-
+
+
-
-
-
-
-
-
+
-
+
-
-
-
-
-
-
-
-
-
-
+
+
-
+
-
-
+
-
-
+
-
+
-
-
-
-
-
-
-
-
-
-
+
+
diff --git a/__pycache__/plots_RTO.cpython-37.pyc b/__pycache__/plots_RTO.cpython-37.pyc
new file mode 100644
index 0000000..1477a21
Binary files /dev/null and b/__pycache__/plots_RTO.cpython-37.pyc differ
diff --git a/plots_RTO.py b/plots_RTO.py
index 0c4bf06..30da36a 100644
--- a/plots_RTO.py
+++ b/plots_RTO.py
@@ -725,4 +725,157 @@ def plot_obj_noise(obj):
plt.tight_layout()
plt.savefig('figs_noise_WO/noexplore_prior.png', dpi=400)
plt.close()
- return print(1)
\ No newline at end of file
+ return print(1)
+
+
+#
+#
+# X_opt_mc, y_opt_mc, TR_l_mc, xnew_mc, backtrack_1_mc = pickle.load(
+# open('prior_with_exploration_ei_no_red_constraint_violation.p', 'rb'))
+# plant = Bio_system(nk=6)
+# cons_system = [] # l.WO_obj_ca
+# for k in range(plant.nk):
+# cons_system.append(functools.partial(plant.bio_con1_ca, k + 1))
+# cons_system.append(functools.partial(plant.bio_con2_ca, k + 1))
+#
+# obj_system = plant.bio_obj_ca
+# cons_h = np.zeros([8, 63, 12])
+# obj = np.zeros([8, 63])
+# for i in range(8):
+# max_obj = 0.
+#
+# for j in range(63):
+# for ik in range(12):
+# cons_h[i, j, ik] = cons_system[ik](np.array(X_opt_mc)[i, j])
+# if j >= 13:
+# # for k in range(100):
+# p = 0.
+# if (cons_h[i, j,:] > 1e-7).all(): # not(np.array(backtrack_1_mc)[i,j-13]):#cons_system[ik](np.array(X_opt_mc)[i,j-k])>0:
+# # p+=1
+# print(cons_h[i, j, :] > 1e-7)
+# if -obj_system(np.array(X_opt_mc)[i, j]) > max_obj:
+# max_obj = -obj_system(np.array(X_opt_mc)[i, j])
+# obj[i, j] = max_obj
+# else:
+# obj[i, j] = max_obj
+# else:
+# obj[i, j] = max_obj
+#
+# # if p==1 or j== 13:
+# # obj[i,j] = obj_system(np.array(X_opt_mc)[i,j-k])
+# # break
+#
+# else:
+# obj[i, j] = -obj_system(np.array(X_opt_mc)[i, j])
+
+# csfont = {'fontname': 'Times New Roman'}
+#
+# # plt.rcParams['font.sans-serif'] = "Arial"
+# plt.rcParams['font.family'] = "Times New Roman"
+# ni = 50
+# ft = int(20)
+# font = {'size': ft}
+# plt.rc('font', **font)
+# plt.rc('text', usetex=True)
+# params = {'legend.fontsize': 15,
+# 'legend.handlelength': 2}
+# plt.rcParams.update(params)
+# plt.plot(np.linspace(1, 50, 50),obj[0,13:].T,color='#AA3939', label='Proposed')
+# plt.plot(np.linspace(1, 50, 50),obj[[0,1,2,3,4,5,7],13:].T,color='#AA3939')
+#
+# plt.plot(np.linspace(1, 50, 50), [0.171] * ni, 'k--', label='Real Optimum')
+#
+# plt.xlabel('RTO-iter')
+# plt.ylabel('Objective')
+# plt.xlim(1, 50)
+# plt.legend()
+# plt.tick_params(right=True, top=True, left=True, bottom=True)
+# plt.tick_params(axis="y", direction="in")
+# plt.tick_params(axis="x", direction="in")
+# plt.tight_layout()
+# plt.savefig('figs_WO/EI_bio.png', dpi=400)
+# plt.close()
+X_opt_mc, y_opt_mc, TR_l_mc, xnew_mc, backtrack_1_mc = pickle.load(open('no_prior_with_exploration_ei_no_red_constraint_violation_GP.p', 'rb'))
+X_opt_mc_model, y_opt_mc, TR_l_mc, xnew_mc, backtrack_1_mc = pickle.load(open('prior_with_exploration_ei_no_red_constraint_violation.p', 'rb'))
+csfont = {'fontname': 'Times New Roman'}
+
+# plt.rcParams['font.sans-serif'] = "Arial"
+plt.rcParams['font.family'] = "Times New Roman"
+ni = 50
+ft = int(20)
+font = {'size': ft}
+plt.rc('font', **font)
+plt.rc('text', usetex=True)
+params = {'legend.fontsize': 15,
+ 'legend.handlelength': 2}
+plt.rcParams.update(params)
+u1_opt = np.array([282.21090606, 282.21090606, 281.84839532, 280.74131362, 120.00896333, 120.01589492,
+ 222.34601803])
+# plt.step(np.linspace(0,6,7),(40-0)*np.array([np.array(X_opt_mc)[3,-1,1].T,*np.array(X_opt_mc)[3,-1,1::2].T])+0, where='pre',
+# color='#AA3939', label='Proposed')
+plt.step(np.linspace(0,6,7),u1_opt, where='pre',
+ color='#AA3939', label='Optimal')
+u1 = np.array(X_opt_mc)[:,:,::2]
+
+plt.fill_between(np.linspace(0,6,7),
+ np.quantile((400-120)*np.array([np.array(u1)[:,-1,1].T,
+ *np.array(u1)[:,-1].T])+120,0.05,axis=1),
+ np.quantile((400-120)*np.array([np.array(u1)[:,-1,1].T,
+ *np.array(u1)[:,-1].T])+120,0.95,axis=1),'-',step='pre'
+ ,color='#226666',alpha=0.2)
+plt.step(np.linspace(0,6,7),((400-120)*np.array([np.array(u1)[:,-1,1].T,*np.array(u1)[:,-1].T])).mean(1)+120,'--',where='pre'
+ ,color='#226666', label='Proposed')
+
+plt.ylabel('$I [\mu$mol m$^{-2}$s$^{-1}]$ ')
+plt.xlabel('Normalized time [-]')
+plt.xlim(0, 6)
+plt.legend()
+plt.tick_params(right=True, top=True, left=True, bottom=True)
+plt.tick_params(axis="y", direction="in")
+plt.tick_params(axis="x", direction="in")
+plt.tight_layout()
+plt.savefig('figs_WO/I2.png', dpi=400)
+plt.close()
+
+#
+#X_opt_mc, y_opt_mc, TR_l_mc, xnew_mc, backtrack_1_mc = pickle.load(open('no_prior_with_exploration_ei_no_red_constraint_violation_GP.p', 'rb'))
+X_opt_mc_model, y_opt_mc, TR_l_mc, xnew_mc, backtrack_1_mc = pickle.load(open('prior_with_exploration_ei_no_red_constraint_violation.p', 'rb'))
+csfont = {'fontname': 'Times New Roman'}
+
+# plt.rcParams['font.sans-serif'] = "Arial"
+plt.rcParams['font.family'] = "Times New Roman"
+ni = 50
+ft = int(20)
+font = {'size': ft}
+plt.rc('font', **font)
+plt.rc('text', usetex=True)
+params = {'legend.fontsize': 15,
+ 'legend.handlelength': 2}
+plt.rcParams.update(params)
+
+# plt.step(np.linspace(0,6,7),(40-0)*np.array([np.array(X_opt_mc)[3,-1,1].T,*np.array(X_opt_mc)[3,-1,1::2].T])+0, where='pre',
+# color='#AA3939', label='Proposed')
+u2_opt = np.array([25.06421834, 25.06421834, 16.03066446, 37.62476903, 39.99997563, 39.9999762, 39.99997472])
+plt.step(np.linspace(0,6,7),u2_opt, where='pre',
+ color='#AA3939', label='Optimal')
+u2 = np.array(X_opt_mc)[:,:,1::2]
+plt.fill_between(np.linspace(0,6,7),
+ np.quantile((40)*np.array([np.array(u2)[:,-1,1].T,
+ *np.array(u2)[:,-1].T]),0.05,axis=1),
+ np.quantile((40)*np.array([np.array(u2)[:,-1,1].T,
+ *np.array(u2)[:,-1].T]),0.95,axis=1),'-',step='pre'
+ ,color='#226666',alpha=0.2)
+plt.step(np.linspace(0,6,7),((40)*np.array([np.array(u2)[:,-1,1].T,*np.array(u2)[:,-1].T])).mean(1),'--',where='pre'
+ ,color='#226666', label='Proposed')
+
+plt.ylabel('$F_{\sf N} [$mg L$^{-1}$h$^{-1}]$ ')
+plt.xlabel('Normalized time [-]')
+plt.xlim(0, 6)
+plt.legend()
+plt.tick_params(right=True, top=True, left=True, bottom=True)
+plt.tick_params(axis="y", direction="in")
+plt.tick_params(axis="x", direction="in")
+plt.tight_layout()
+plt.savefig('figs_WO/f_N2.png', dpi=400)
+plt.close()
+
diff --git a/run_Bio/__init__.py b/run_Bio/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/run_Bio/figs_WO/EI_bio_GP.png b/run_Bio/figs_WO/EI_bio_GP.png
new file mode 100644
index 0000000..303261c
Binary files /dev/null and b/run_Bio/figs_WO/EI_bio_GP.png differ
diff --git a/run_Bio/figs_WO/I.png b/run_Bio/figs_WO/I.png
new file mode 100644
index 0000000..d73f100
Binary files /dev/null and b/run_Bio/figs_WO/I.png differ
diff --git a/run_Bio/figs_WO/I2.png b/run_Bio/figs_WO/I2.png
new file mode 100644
index 0000000..5da8b88
Binary files /dev/null and b/run_Bio/figs_WO/I2.png differ
diff --git a/run_Bio/figs_WO/f_N.png b/run_Bio/figs_WO/f_N.png
new file mode 100644
index 0000000..488353a
Binary files /dev/null and b/run_Bio/figs_WO/f_N.png differ
diff --git a/run_Bio/figs_WO/f_N2.png b/run_Bio/figs_WO/f_N2.png
new file mode 100644
index 0000000..e6217a7
Binary files /dev/null and b/run_Bio/figs_WO/f_N2.png differ
diff --git a/run_Bio/main_WO_prior_EI2.py b/run_Bio/main_WO_prior_EI2.py
new file mode 100644
index 0000000..a212f55
--- /dev/null
+++ b/run_Bio/main_WO_prior_EI2.py
@@ -0,0 +1,993 @@
+import time
+import random
+import numpy as np
+import numpy.random as rnd
+from scipy.spatial.distance import cdist
+
+import sobol_seq
+from scipy.optimize import minimize
+from scipy.optimize import broyden1
+from scipy import linalg
+import scipy
+import matplotlib.pyplot as plt
+import functools
+from matplotlib.patches import Ellipse
+from run_Bio.samples_eval_gen import samples_generation
+from casadi import *
+
+from sub_uts.systems2 import *
+from sub_uts.utilities_4 import *
+from plots_RTO import compute_obj, plot_obj, plot_obj_noise
+
+import pickle
+from plots_RTO import Plot
+#----------1) EI-NO PRIOR-UNKNOWN NOISE----------#
+#---------------------------------------------#
+if not(os.path.exists('figs_WO')):
+ os.mkdir('figs_WO')
+if not(os.path.exists('figs_noise_WO')):
+ os.mkdir('figs_noise_WO')
+
+#------------------------------------------------------------------
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+plant = Bio_system(nk=6)
+
+obj_system = plant.bio_obj_ca
+cons_system = [] # l.WO_obj_ca
+for k in range(plant.nk):
+ cons_system.append(functools.partial(plant.bio_con1_ca, k + 1))
+ cons_system.append(functools.partial(plant.bio_con2_ca, k + 1))
+
+X, start = samples_generation(cons_system, obj_system, plant.nk*plant.nu)
+
+for i in range(8):
+
+
+
+ plant = Bio_system(nk=6)
+ model = Bio_model(nk=6, empty=False)#empy=True)
+
+ u = [0.]*plant.nk*plant.nu
+ xf = plant.bio_obj_ca(u)
+ x1 = plant.bio_con1_ca(1,u)
+ x2 = plant.bio_con2_ca(1,u)
+ functools.partial(plant.bio_con1_ca,1)
+ obj_model = model.bio_obj_ca_RK4#mode
+ F = model.bio_model_ca()
+ cons_model = []# l.WO_obj_ca
+ for k in range(model.nk):
+ cons_model.append(functools.partial(model.bio_con1_ca_RK4, k+1))
+ cons_model.append(functools.partial(model.bio_con2_ca_RK4, k+1))
+
+
+
+ # obj_model = obj_empty
+ # cons_model = []# l.WO_obj_ca
+ # for k in range(model.nk):
+ # cons_model.append(con_empty)
+ # cons_model.append(con_empty)
+
+
+
+ x = np.random.rand(model.nk*model.nu)
+ x1= F(model.x0, x[:2])
+ print(model.bio_con1_ca_f(x1), model.bio_con1_ca_RK4(1, x))
+
+ n_iter = 50
+ bounds = ([[0., 1.]] * model.nk*model.nu)#[[0.,1.],[0.,1.]]
+ #X = pickle.load(open('initial_data_bio_12_ca_new.p','rb'))
+ Xtrain = X[:model.nk*model.nu+1]#1.*(np.random.rand(model.nk*model.nu+500,model.nk*model.nu))+0.#np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+#
+#1.*(np.random.rand(model.nk*model.nu+500,model.nk*model.nu))+0.#np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = X[start]#model.nk*model.nu+1]#np.array([*[0.6]*model.nk,*[0.8]*model.nk])#
+
+ Delta0 = 0.25
+ Delta_max =5.; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=40,
+ multi_hyper=10, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=False, model=model)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_with_exploration_ei.p','wb'))
+
+
+#-----------------Model---------
+
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+
+
+ plant = Bio_system()
+ model = Bio_model()
+
+ u = [0.]*plant.nk*plant.nu
+ xf = plant.bio_obj_ca(u)
+ x1 = plant.bio_con1_ca(1,u)
+ x2 = plant.bio_con2_ca(1,u)
+ functools.partial(plant.bio_con1_ca,1)
+ obj_model = model.bio_obj_ca_RK4#mode
+ F = model.bio_model_ca()
+ cons_model = []# l.WO_obj_ca
+ for k in range(model.nk):
+ cons_model.append(functools.partial(model.bio_con1_ca_RK4, k+1))
+ cons_model.append(functools.partial(model.bio_con2_ca_RK4, k+1))
+
+
+
+ # obj_model = obj_empty
+ # cons_model = []# l.WO_obj_ca
+ # for k in range(model.nk):
+ # cons_model.append(con_empty)
+ # cons_model.append(con_empty)
+
+ obj_system = plant.bio_obj_ca
+ cons_system = []# l.WO_obj_ca
+ for k in range(model.nk):
+ cons_system.append(functools.partial(plant.bio_con1_ca, k+1))
+ cons_system.append(functools.partial(plant.bio_con2_ca, k+1))
+
+ x = np.random.rand(24)
+ print(F(model.x0, x[:2])[1] / 800 - 1, -model.bio_con1_ca_RK4(1, x))
+
+ n_iter = 8
+ bounds = ([[0., 1.]] * model.nk*model.nu)#[[0.,1.],[0.,1.]]
+ X = pickle.load(open('initial_data_bio_12_ca_new.p','rb'))
+ Xtrain = X[:model.nk*model.nu+1]#1.*(np.random.rand(model.nk*model.nu+500,model.nk*model.nu))+0.#np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+#
+#1.*(np.random.rand(model.nk*model.nu+500,model.nk*model.nu))+0.#np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = X[18]#model.nk*model.nu+1]#np.array([*[0.6]*model.nk,*[0.8]*model.nk])#
+
+ Delta0 = 0.5
+ Delta_max =5.; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=40,
+ multi_hyper=10, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=False, model=model)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_with_exploration_ei.p','wb'))
+
+
+
+
+#-----------------------------------------------------------------------#
+#----------2) EI-PRIOR-UNKNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ model = WO_model()
+ plant = WO_system()
+
+ obj_model = model.WO_obj_ca
+ cons_model = [model.WO_con1_model_ca, model.WO_con2_model_ca]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=3, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('with_prior_with_exploration_ei.p','wb'))
+#-----------------------------------------------------------------------#
+#----------3) EI-PRIOR-KNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ model = WO_model()
+ plant = WO_system()
+
+ obj_model = model.WO_obj_ca
+ cons_model = [model.WO_con1_model_ca, model.WO_con2_model_ca]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = [0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=3, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('with_prior_with_exploration_ei_noise.p','wb'))
+#-----------------------------------------------------------------------#
+#----------4) EI-NO PRIOR-KNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+
+ plant = WO_system()
+
+ obj_model = obj_empty
+ cons_model = [con_empty, con_empty]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = [0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=3, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_with_exploration_ei_noise.p','wb'))
+#-----------------------------------------------------------------------#
+
+
+#-----------------------------UCB----------------------------------------------#
+#----------1) UCB-NO PRIOR-UNKNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ plant = WO_system()
+
+ obj_model = obj_empty#model.WO_obj_ca
+ cons_model = [con_empty, con_empty]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_with_exploration_ucb.p','wb'))
+#-----------------------------------------------------------------------#
+#----------2) UCB-PRIOR-UNKNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ model = WO_model()
+ plant = WO_system()
+
+ obj_model = model.WO_obj_ca
+ cons_model = [model.WO_con1_model_ca, model.WO_con2_model_ca]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('with_prior_with_exploration_ucb.p','wb'))
+#-----------------------------------------------------------------------#
+#----------3) UCB-PRIOR-KNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ model = WO_model()
+ plant = WO_system()
+
+ obj_model = model.WO_obj_ca
+ cons_model = [model.WO_con1_model_ca, model.WO_con2_model_ca]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = [0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('with_prior_with_exploration_ucb_noise.p','wb'))
+#-----------------------------------------------------------------------#
+#----------4) UCB-NO PRIOR-KNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+
+ plant = WO_system()
+
+ obj_model = obj_empty
+ cons_model = [con_empty, con_empty]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = [0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_with_exploration_ucb_noise.p','wb'))
+#-----------------------------------------------------------------------#
+
+
+#-----------------------------No exploration----------------------------------------------#
+#----------1) noexplore-NO PRIOR-UNKNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ plant = WO_system()
+
+ obj_model = obj_empty#model.WO_obj_ca
+ cons_model = [con_empty, con_empty]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=1, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_no_exploration.p','wb'))
+#-----------------------------------------------------------------------#
+#----------2) noexplorePRIOR-UNKNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ model = WO_model()
+ plant = WO_system()
+
+ obj_model = model.WO_obj_ca
+ cons_model = [model.WO_con1_model_ca, model.WO_con2_model_ca]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = None#[0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=1, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('with_prior_no_exploration.p','wb'))
+#-----------------------------------------------------------------------#
+#----------3) no exploration-PRIOR-KNOWN NOISE----------#
+#---------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+ model = WO_model()
+ plant = WO_system()
+
+ obj_model = model.WO_obj_ca
+ cons_model = [model.WO_con1_model_ca, model.WO_con2_model_ca]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = [0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=1, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('with_prior_no_exploration_noise.p','wb'))
+#-----------------------------------------------------------------------#
+#----------4) Noexplore-NO PRIOR-KNOWN NOISE----------#
+#---------------------------------------------#
+#----------------------------------------------#
+np.random.seed(0)
+X_opt_mc = []
+y_opt_mc = []
+TR_l_mc = []
+xnew_mc = []
+backtrack_1_mc = []
+
+for i in range(30):
+
+
+ plant = WO_system()
+
+ obj_model = obj_empty
+ cons_model = [con_empty, con_empty]
+ obj_system = plant.WO_obj_sys_ca
+ cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
+
+
+
+
+ n_iter = 20
+ bounds = [[4.,7.],[70.,100.]]
+ Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ #Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
+ samples_number = Xtrain.shape[0]
+ data = ['data0', Xtrain]
+ u0 = np.array([6.9,83])
+
+ Delta0 = 0.25
+ Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
+ TR_scaling_ = False
+ TR_curvature_ = False
+ inner_TR_ = False
+ noise = [0.5**2, 5e-8, 5e-8]
+
+
+ ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
+ Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, np.array(bounds),obj_setting=1, noise=noise, multi_opt=30,
+ multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+
+ print('Episode: ',i)
+ if not TR_curvature_:
+ X_opt, y_opt, TR_l, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ else:
+ X_opt, y_opt, TR_l, TR_l_angle, xnew, backtrack_l = ITR_GP_opt.RTO_routine()
+ backtrack_l = [False, *backtrack_l]
+ X_opt_mc += [X_opt]
+ y_opt_mc += [y_opt]
+ TR_l_mc += [TR_l]
+ xnew_mc += [xnew]
+ backtrack_1_mc += [backtrack_l]
+print(2)
+pickle.dump([X_opt_mc, y_opt_mc,TR_l_mc, xnew_mc, backtrack_1_mc], open('no_prior_no_exploration_noise.p','wb'))
+#-----------------------------------------------------------------------#
+
+
+
+Plot('no_prior_with_exploration_ei')
+Plot('with_prior_with_exploration_ei')
+Plot('with_prior_with_exploration_ei_noise')
+Plot('no_prior_with_exploration_ei_noise')
+Plot('no_prior_with_exploration_ucb')
+Plot('with_prior_with_exploration_ucb')
+Plot('with_prior_with_exploration_ucb_noise')
+Plot('no_prior_with_exploration_ucb_noise')
+Plot('no_prior_no_exploration')
+Plot('with_prior_no_exploration')
+Plot('with_prior_no_exploration_noise')
+Plot('with_prior_no_exploration_noise')
+Plot('no_prior_no_exploration_noise')
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Plot a sin curve using the x and y axes.
+n_points = 20
+u_1 = np.linspace(4., 7., n_points)
+u_2 = np.linspace(70., 100., n_points)
+
+# --- plotting functions --- #
+plant_f_grid = np.zeros((n_points, n_points)) # f_copy
+for u1i in range(len(u_1)):
+ for u2i in range(len(u_2)):
+ try:
+ plant_f_grid[u1i, u2i] = obj_system(np.array([u_1[u1i], u_2[u2i]]))
+ except:
+ print('point ', (u1i, u2i), ' failed')
+
+plant_f_grid = plant_f_grid.T
+# normalizing plot
+np_bounds = np.array(bounds)
+np_diff_bounds = np_bounds[:, 1] - np_bounds[:, 0]
+u_1 = np.linspace(4., 7., n_points) / np_diff_bounds[0]
+u_2 = np.linspace(70., 100., n_points) / np_diff_bounds[1]
+# --- constraints mapping manually set (lazyness) --- #
+g1 = [[4.0000, 4.1000, 4.2000, 4.3000, 4.4000, 4.5000, 4.6000, 4.7000, 4.8000, 4.9000, \
+ 5.0000, 5.1000, 5.2000, 5.3000, 5.4000, 5.5000, 5.6000, 5.7000, 5.8000, 5.9000, 6.0000, \
+ 6.1000, 6.2000, 6.3000, 6.4000, 6.5000, 6.6000, 6.7000, 6.8000, 6.9000, 7.0000], \
+ [83.8649, 82.9378, 82.0562, 81.2152, 80.4109, 79.6398, 78.8988, 78.1846, 77.4959, \
+ 76.8299, 76.1849, 75.5589, 74.9507, 74.3586, 73.7814, 73.2178, 72.6674, 72.1277, \
+ 71.5987, 71.0793, 70.5673, 70.0665, 69.5730, 69.0863, 68.6056, 68.1305, 67.6604, \
+ 67.1943, 66.7337, 66.2754, 65.8211]]
+g2 = [[4.0000, 4.1000, 4.2000, 4.3000, 4.4000, 4.5000, 4.6000, 4.7000, 4.8000, 4.9000, \
+ 5.0000, 5.1000, 5.2000, 5.3000, 5.4000, 5.5000, 5.6000, 5.7000, 5.8000, 5.9000, 6.0000, \
+ 6.1000, 6.2000, 6.3000, 6.4000, 6.5000, 6.6000, 6.7000, 6.8000, 6.9000, 7.0000], \
+ [78.0170, 78.6381, 79.2771, 79.9189, 80.5635, 81.2110, 81.8619, 82.5181, 83.1687, \
+ 83.8277, 84.4897, 85.1547, 85.8229, 86.4940, 87.1413, 87.8557, 88.5343, 89.2170, \
+ 89.9037, 90.5943, 91.2896, 91.9891, 92.6930, 93.4017, 94.1155, 94.7974, 95.5153, \
+ 96.2377, 96.9647, 97.6964, 98.4330]]
+
+# Contour plot
+# f_copy = f.reshape((n_points,n_points),order='F')
+
+
+fig, ax = plt.subplots()
+CS = ax.contour(u_1, u_2, plant_f_grid, 50)
+ax.plot(g1[0] / np_diff_bounds[0], g1[1] / np_diff_bounds[1], 'black', linewidth=3)
+ax.plot(g2[0] / np_diff_bounds[0], g2[1] / np_diff_bounds[1], 'black', linewidth=3)
+ax.plot(X_opt[samples_number:, 0] / np_diff_bounds[0], X_opt[samples_number:, 1] / np_diff_bounds[1], 'ro')
+ax.plot(X_opt[:samples_number, 0] / np_diff_bounds[0], X_opt[:samples_number, 1] / np_diff_bounds[1], '*')
+tr_bktrc = 0
+for i in range(X_opt[samples_number:, :].shape[0]):
+ if backtrack_l[i] == False:
+ x_pos = X_opt[samples_number + i, 0] / np_diff_bounds[0]
+ y_pos = X_opt[samples_number + i, 1] / np_diff_bounds[1]
+ # plt.text(x_pos, y_pos, str(i))
+ if TR_scaling_:
+ if TR_curvature_:
+ e2 = Ellipse((x_pos, y_pos), TR_l[i][0], TR_l[i][1],
+ facecolor='None', edgecolor='black', angle=TR_l_angle[i], linestyle='--', linewidth=1)
+ ax.add_patch(e2)
+ else:
+ e2 = Ellipse((x_pos, y_pos), TR_l[i][0][0], TR_l[i][1][0],
+ facecolor='None', edgecolor='black', angle=0, linestyle='--', linewidth=1)
+ ax.add_patch(e2)
+ else:
+ circle1 = plt.Circle((x_pos, y_pos), radius=TR_l[i], color='black', fill=False, linestyle='--')
+ ax.add_artist(circle1)
+
+ax.plot(xnew[:, 0] / np_diff_bounds[0], xnew[:, 1] / np_diff_bounds[1], 'yo')
+ax.set_title('Contour plot')
+# plt.axis([4.,7., 70.,100.])
+plt.show()
+
+# compute all objective function values
+obj_list = []
+for p_i in range(X_opt[samples_number:, :].shape[0]):
+ obj_list.append(obj_system(X_opt[samples_number + p_i, :]) + 100.)
+
+fig, ax = plt.subplots()
+ax.plot(obj_list)
+plt.yscale('log')
+ax.set_title('Objective function vs iterations')
+plt.show()
+
+# compute all objective function values
+if not TR_scaling_:
+ TR_list = []
+ for p_i in range(len(TR_l)):
+ TR_list.append(TR_l[p_i])
+
+fig, ax = plt.subplots()
+if not TR_scaling_:
+ ax.plot(TR_list)
+elif not TR_curvature_:
+ ax.plot(np.linalg.norm(TR_l, axis=(1, 2)))
+else:
+ ax.plot(np.linalg.norm(TR_l, axis=(1)))
+plt.yscale('log')
+ax.set_title('TR vs iterations')
+plt.show()
\ No newline at end of file
diff --git a/run_Bio/no_prior_with_exploration_ei_no_red_constraint_violation_GP.p b/run_Bio/no_prior_with_exploration_ei_no_red_constraint_violation_GP.p
new file mode 100644
index 0000000..ef29adc
Binary files /dev/null and b/run_Bio/no_prior_with_exploration_ei_no_red_constraint_violation_GP.p differ
diff --git a/run_Bio/prior_with_exploration_ei_no_red_constraint_violation.p b/run_Bio/prior_with_exploration_ei_no_red_constraint_violation.p
new file mode 100644
index 0000000..2b728b0
Binary files /dev/null and b/run_Bio/prior_with_exploration_ei_no_red_constraint_violation.p differ
diff --git a/run_Bio/samples_eval_gen.py b/run_Bio/samples_eval_gen.py
new file mode 100644
index 0000000..346e6c3
--- /dev/null
+++ b/run_Bio/samples_eval_gen.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+def samples_generation(predictor_plant, objective, nx):
+ n = 2 * nx + 1
+ X = np.random.rand(10*n,nx)
+ f = np.zeros([10*n,nx])
+ min = np.inf
+ p = 0
+ for i in range(10*n):
+ k = 0
+ for j in range(np.shape(predictor_plant)[0]):
+ if predictor_plant[j](X[i])>=0 and abs(objective(X[i]))<0.13:
+ k += 1
+ if k==np.shape(predictor_plant)[0]:
+ p += 1
+ f[p-1, :] = X[i]
+ if min > objective(X[i]):
+ min = objective(X[i])
+ mink = p-1
+ if p>=n:break
+
+ return f[:p], mink
\ No newline at end of file
diff --git a/run_WO/.idea/misc.xml b/run_WO/.idea/misc.xml
new file mode 100644
index 0000000..a2e120d
--- /dev/null
+++ b/run_WO/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/run_WO/.idea/modules.xml b/run_WO/.idea/modules.xml
new file mode 100644
index 0000000..5543867
--- /dev/null
+++ b/run_WO/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/run_WO/.idea/run_WO.iml b/run_WO/.idea/run_WO.iml
new file mode 100644
index 0000000..7c9d48f
--- /dev/null
+++ b/run_WO/.idea/run_WO.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/run_WO/.idea/vcs.xml b/run_WO/.idea/vcs.xml
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ b/run_WO/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/run_WO/.idea/workspace.xml b/run_WO/.idea/workspace.xml
new file mode 100644
index 0000000..05a788e
--- /dev/null
+++ b/run_WO/.idea/workspace.xml
@@ -0,0 +1,86 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 1599739200538
+
+
+ 1599739200538
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/run_WO/Bio_EI_model.p b/run_WO/Bio_EI_model.p
new file mode 100644
index 0000000..2203a1d
Binary files /dev/null and b/run_WO/Bio_EI_model.p differ
diff --git a/run_WO/figs_WO/EI_bio.png b/run_WO/figs_WO/EI_bio.png
new file mode 100644
index 0000000..b323c16
Binary files /dev/null and b/run_WO/figs_WO/EI_bio.png differ
diff --git a/run_WO/initial_data_bio.p b/run_WO/initial_data_bio.p
new file mode 100644
index 0000000..2ab323f
Binary files /dev/null and b/run_WO/initial_data_bio.p differ
diff --git a/run_WO/initial_data_bio_12.p b/run_WO/initial_data_bio_12.p
new file mode 100644
index 0000000..6e167b1
Binary files /dev/null and b/run_WO/initial_data_bio_12.p differ
diff --git a/run_WO/initial_data_bio_12_ca.p b/run_WO/initial_data_bio_12_ca.p
new file mode 100644
index 0000000..4be4854
Binary files /dev/null and b/run_WO/initial_data_bio_12_ca.p differ
diff --git a/run_WO/initial_data_bio_12_ca_new.p b/run_WO/initial_data_bio_12_ca_new.p
new file mode 100644
index 0000000..ca4b043
Binary files /dev/null and b/run_WO/initial_data_bio_12_ca_new.p differ
diff --git a/run_WO/main_WO_prior_EI.py b/run_WO/main_WO_prior_EI.py
index 3c0eb76..2252670 100644
--- a/run_WO/main_WO_prior_EI.py
+++ b/run_WO/main_WO_prior_EI.py
@@ -16,7 +16,7 @@
from casadi import *
from sub_uts.systems import *
-from sub_uts.utilities_2 import *
+from sub_uts.utilities_4 import *
from plots_RTO import compute_obj, plot_obj, plot_obj_noise
import pickle
@@ -28,6 +28,7 @@
if not(os.path.exists('figs_noise_WO')):
os.mkdir('figs_noise_WO')
+<<<<<<< Updated upstream
# obj_no_prior_with_exploration_ei = compute_obj('no_prior_with_exploration_ei')
# obj_with_prior_with_exploration_ei = compute_obj('with_prior_with_exploration_ei')
# obj_with_prior_with_exploration_ei_noise = compute_obj('with_prior_with_exploration_ei_noise')
@@ -89,8 +90,11 @@
# Plot('with_prior_no_exploration')
# Plot('with_prior_no_exploration_noise')
# Plot('no_prior_no_exploration_noise')
+# plot_obj(compute_obj)
+# plot_obj_noise(compute_obj)
+=======
plot_obj(compute_obj)
-plot_obj_noise(compute_obj)
+>>>>>>> Stashed changes
np.random.seed(0)
X_opt_mc = []
@@ -101,23 +105,48 @@
for i in range(30):
- plant = WO_system()
-
- obj_model = obj_empty#model.WO_obj_ca
- cons_model = [con_empty, con_empty]
- obj_system = plant.WO_obj_sys_ca
- cons_system = [plant.WO_con1_sys_ca, plant.WO_con2_sys_ca]
-
-
- n_iter = 20
- bounds = [[4.,7.],[70.,100.]]
- Xtrain = np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+ plant = Bio_system()
+ model = Bio_model()
+
+ u = [0.]*plant.nk*plant.nu
+ xf = plant.bio_obj_ca(u)
+ x1 = plant.bio_con1_ca(1,u)
+ x2 = plant.bio_con2_ca(1,u)
+ functools.partial(plant.bio_con1_ca,1)
+ obj_model = model.bio_obj_ca#mode
+ F = model.bio_model_ca()
+ cons_model = []# l.WO_obj_ca
+ for k in range(model.nk):
+ cons_model.append(functools.partial(model.bio_con1_ca, k+1))
+ cons_model.append(functools.partial(model.bio_con2_ca, k+1))
+
+ # obj_model = obj_empty
+ # cons_model = []# l.WO_obj_ca
+ # for k in range(model.nk):
+ # cons_model.append(con_empty)
+ # cons_model.append(con_empty)
+
+ obj_system = plant.bio_obj_ca
+ cons_system = []# l.WO_obj_ca
+ for k in range(model.nk):
+ cons_system.append(functools.partial(plant.bio_con1_ca, k+1))
+ cons_system.append(functools.partial(plant.bio_con2_ca, k+1))
+
+ x = np.random.rand(24)
+ print(F(model.x0, x[0])[1] / 800 - 1, model.bio_con1_ca(1, x))
+
+ n_iter = 50
+ bounds = ([[0., 1.]] * model.nk*model.nu)#[[0.,1.],[0.,1.]]
+ X = pickle.load(open('initial_data_bio_12.p','rb'))
+ Xtrain = X[:model.nk*model.nu+1]#1.*(np.random.rand(model.nk*model.nu+500,model.nk*model.nu))+0.#np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
+#
+#1.*(np.random.rand(model.nk*model.nu+500,model.nk*model.nu))+0.#np.array([[5.7, 74.],[6.35, 74.9],[6.6,75.],[6.75,79.]]) #U0
#Xtrain = np.array([[7.2, 74.],[7.2, 80],[6.7,75.]])#,[6.75,83.]]) #U0
samples_number = Xtrain.shape[0]
data = ['data0', Xtrain]
- u0 = np.array([6.9,83])
+ u0 = X[model.nk*model.nu+1]#np.array([*[0.6]*model.nk,*[0.8]*model.nk])#
Delta0 = 0.25
Delta_max =0.7; eta0=0.2; eta1=0.8; gamma_red=0.8; gamma_incr=1.2;
@@ -129,9 +158,9 @@
ITR_GP_opt = ITR_GP_RTO(obj_model, obj_system, cons_model, cons_system, u0, Delta0,
Delta_max, eta0, eta1, gamma_red, gamma_incr,
- n_iter, data, np.array(bounds),obj_setting=3, noise=noise, multi_opt=30,
- multi_hyper=15, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
- store_data=True, inner_TR=inner_TR_, scale_inputs=True)
+ n_iter, data, np.array(bounds),obj_setting=2, noise=noise, multi_opt=3,
+ multi_hyper=10, TR_scaling=TR_scaling_, TR_curvature=TR_curvature_,
+ store_data=True, inner_TR=inner_TR_, scale_inputs=False, model=model)
print('Episode: ',i)
if not TR_curvature_:
diff --git a/run_WO/no_prior_with_exploration_ei.p b/run_WO/no_prior_with_exploration_ei.p
index b79d179..e26f99e 100644
Binary files a/run_WO/no_prior_with_exploration_ei.p and b/run_WO/no_prior_with_exploration_ei.p differ
diff --git a/run_WO/no_prior_with_exploration_ei_bio.p b/run_WO/no_prior_with_exploration_ei_bio.p
new file mode 100644
index 0000000..cb9e92d
Binary files /dev/null and b/run_WO/no_prior_with_exploration_ei_bio.p differ
diff --git a/run_WO/prior_model_RK4_20intervals.p b/run_WO/prior_model_RK4_20intervals.p
new file mode 100644
index 0000000..ca95081
Binary files /dev/null and b/run_WO/prior_model_RK4_20intervals.p differ
diff --git a/run_WO/prior_with_exploration_ei_no_red_constraint_violation.p b/run_WO/prior_with_exploration_ei_no_red_constraint_violation.p
new file mode 100644
index 0000000..2b728b0
Binary files /dev/null and b/run_WO/prior_with_exploration_ei_no_red_constraint_violation.p differ
diff --git a/sub_uts/__pycache__/systems.cpython-37.pyc b/sub_uts/__pycache__/systems.cpython-37.pyc
new file mode 100644
index 0000000..391b500
Binary files /dev/null and b/sub_uts/__pycache__/systems.cpython-37.pyc differ
diff --git a/sub_uts/__pycache__/systems2.cpython-37.pyc b/sub_uts/__pycache__/systems2.cpython-37.pyc
new file mode 100644
index 0000000..dc881dc
Binary files /dev/null and b/sub_uts/__pycache__/systems2.cpython-37.pyc differ
diff --git a/sub_uts/__pycache__/utilities_2.cpython-37.pyc b/sub_uts/__pycache__/utilities_2.cpython-37.pyc
new file mode 100644
index 0000000..9ae4b99
Binary files /dev/null and b/sub_uts/__pycache__/utilities_2.cpython-37.pyc differ
diff --git a/sub_uts/__pycache__/utilities_4.cpython-37.pyc b/sub_uts/__pycache__/utilities_4.cpython-37.pyc
new file mode 100644
index 0000000..252b1c5
Binary files /dev/null and b/sub_uts/__pycache__/utilities_4.cpython-37.pyc differ
diff --git a/sub_uts/systems.py b/sub_uts/systems.py
index 991d21c..fb9f3d4 100644
--- a/sub_uts/systems.py
+++ b/sub_uts/systems.py
@@ -307,483 +307,457 @@ def obj_empty(u):
return f
-#
-# def DAE_model():
-# # Parameters
-# Fa = 1.8275
-# Mt = 2105.2
-# # kinetic parameters
-# phi1 = - 3.
-# psi1 = -17.
-# phi2 = - 4.
-# psi2 = -29.
-# # Reference temperature
-# Tref = 110. + 273.15 # [=] K.
-# # Define vectors with names of states
-# states = ['x']
-# nd = len(states)
-# xd = SX.sym('xd', nd)
-# for i in range(nd):
-# globals()[states[i]] = xd[i]
-#
-# # Define vectors with names of algebraic variables
-# algebraics = ['Xa', 'Xb', 'Xe', 'Xp', 'Xg']
-# na = len(algebraics)
-# xa = SX.sym('xa', na)
-# for i in range(na):
-# globals()[algebraics[i]] = xa[i]
-#
-# # Define vectors with banes of input variables
-# inputs = ['Fb', 'Tr']
-# nu = len(inputs)
-# u = SX.sym("u", nu)
-# for i in range(nu):
-# globals()[inputs[i]] = u[i]
-#
-# k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
-# k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
-#
-# # reaction rate
-# Fr = Fa + Fb
-# r1 = k1 * Xa * Xb * Xb * Mt
-# r2 = k2 * Xa * Xb * Xp * Mt
-# ODEeq = [0 * x]
-#
-# # Declare algebraic equations
-# Aeq = []
-#
-# Aeq += [Fa - r1 - r2 - Fr * Xa]
-# Aeq += [Fb - 2 * r1 - r2 - Fr * Xb]
-# Aeq += [+ 2 * r1 - Fr * Xe]
-# Aeq += [+ r1 - r2 - Fr * Xp]
-# Aeq += [+ 3 * r2 - Fr * Xg]
-#
-# return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
-#
-#
-# def integrator_model():
-# """
-# This function constructs the integrator to be suitable with casadi environment, for the equations of the model
-# and the objective function with variable time step.
-# inputs: NaN
-# outputs: F: Function([x, u, dt]--> [xf, obj])
-# """
-#
-# xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = DAE_model()
-# VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
-# solver = rootfinder('solver', 'newton', VV)
-#
-# # model = functools.partial(solver, np.zeros(np.shape(xa)))
-# return solver
-#
-#
-# def WO_obj_ca(u):
-# solver = integrator_model()
-# x = solver(np.zeros(5), u)
-# Fb = u[0]
-# Tr = u[1]
-# Fa = 1.8275
-# Fr = Fa + Fb
-#
-# obj = -(1043.38 * x[3] * Fr +
-# 20.92 * x[2] * Fr -
-# 79.23 * Fa -
-# 118.34 * Fb)
-#
-# return obj
-#
-#
-# def WO_con1_model_ca(u):
-# solver = integrator_model()
-# x = solver(np.zeros(5), u)
-# pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
-# return -pcon1.toarray()[0]
-#
-#
-# def WO_con2_model_ca(u):
-# solver = integrator_model()
-# x = solver(np.zeros(5), u)
-# pcon2 = x[4] - 0.08 # + 5e-4*np.random.normal(1., 1)
-# return -pcon2.toarray()[0]
-#
-# # Parameters
-#
-#
-#
-#
-# def DAE_system():
-# Fa = 1.8275
-# Mt = 2105.2
-# # kinetic parameters
-# phi1 = - 3.
-# psi1 = -17.
-# phi2 = - 4.
-# psi2 = -29.
-# # Reference temperature
-# Tref = 110. + 273.15 # [=] K.
-#
-# # Define vectors with names of states
-# states = ['x']
-# nd = len(states)
-# xd = SX.sym('xd', nd)
-# for i in range(nd):
-# globals()[states[i]] = xd[i]
-#
-# # Define vectors with names of algebraic variables
-# algebraics = ['Xa', 'Xb', 'Xc', 'Xe', 'Xp', 'Xg']
-# na = len(algebraics)
-# xa = SX.sym('xa', na)
-# for i in range(na):
-# globals()[algebraics[i]] = xa[i]
-#
-# inputs = ['Fb', 'Tr']
-# nu = len(inputs)
-# u = SX.sym("u", nu)
-# for i in range(nu):
-# globals()[inputs[i]] = u[i]
-#
-# # Reparametrization
-# k1 = 1.6599e6 * np.exp(-6666.7 / (Tr + 273.15))
-# k2 = 7.2117e8 * np.exp(-8333.3 / (Tr + 273.15))
-# k3 = 2.6745e12 * np.exp(-11111. / (Tr + 273.15))
-#
-# # reaction rate
-# Fr = Fa + Fb
-# r1 = k1 * Xa * Xb * Mt
-# r2 = k2 * Xb * Xc * Mt
-# r3 = k3 * Xc * Xp * Mt
-#
-# # residual for x
-# x_res = np.zeros((6, 1))
-# x_res[0, 0] = (Fa - r1 - Fr * Xa) / Mt
-# x_res[1, 0] = (Fb - r1 - r2 - Fr * Xb) / Mt
-# x_res[2, 0] = (+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt
-# x_res[3, 0] = (+ 2 * r2 - Fr * Xe) / Mt
-# x_res[4, 0] = (+ r2 - 0.5 * r3 - Fr * Xp) / Mt
-# x_res[5, 0] = (+ 1.5 * r3 - Fr * Xg) / Mt
-# # Define vectors with banes of input variables
-#
-# ODEeq = [0 * x]
-#
-# # Declare algebraic equations
-# Aeq = []
-#
-# Aeq += [(Fa - r1 - Fr * Xa) / Mt]
-# Aeq += [(Fb - r1 - r2 - Fr * Xb) / Mt]
-# Aeq += [(+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt]
-# Aeq += [(+ 2 * r2 - Fr * Xe) / Mt]
-# Aeq += [(+ r2 - 0.5 * r3 - Fr * Xp) / Mt]
-# Aeq += [(+ 1.5 * r3 - Fr * Xg) / Mt]
-#
-# return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
-#
-#
-# def integrator_system():
-# """
-# This function constructs the integrator to be suitable with casadi environment, for the equations of the model
-# and the objective function with variable time step.
-# inputs: NaN
-# outputs: F: Function([x, u, dt]--> [xf, obj])
-# """
-#
-# xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = DAE_system()
-# VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
-# solver = rootfinder('solver', 'newton', VV)
-#
-# return solver
-#
-#
-# def WO_obj_sys_ca(u):
-# solver = integrator_system()
-# x = solver(np.zeros(6), u)
-# Fb = u[0]
-# Tr = u[1]
-# Fa = 1.8275
-# Fr = Fa + Fb
-#
-# obj = -(1043.38 * x[4] * Fr +
-# 20.92 * x[3] * Fr -
-# 79.23 * Fa -
-# 118.34 * Fb)
-#
-# return obj
-#
-#
-# def WO_con1_sys_ca(u):
-# solver = integrator_system()
-# x = solver(np.zeros(6), u)
-# pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
-#
-# return -pcon1
-#
-#
-# def WO_con2_sys_ca(u):
-# solver = integrator_system()
-# x = solver(np.zeros(6), u)
-# pcon2 = x[5] - 0.08 # + 5e-4*np.random.normal(1., 1)
-#
-# return -pcon2
-#
-options = {'disp': False, 'maxiter': 10000} # solver options
-# Parameters
-Fa = 1.8275
-Mt = 2105.2
-# kinetic parameters
-phi1 = - 3.
-psi1 = -17.
-phi2 = - 4.
-psi2 = -29.
-# Reference temperature
-Tref = 110. + 273.15 # [=] K.
-
-
-# --- residual function for model opt --- #
-def WO_nonlinear_f_model_opt(x, u_):
- Fb = u_[0]
- Tr = u_[1]
-
- # states of the system
- Xa = x[0] # Mass fraction
- Xb = x[1] # Mass fraction
- Xe = x[2] # Mass fraction
- Xp = x[3] # Mass fraction
- Xg = x[4] # Mass fraction
-
- # Reparametrization
- k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
- k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
-
- # reaction rate
- Fr = Fa + Fb
- r1 = k1 * Xa * Xb * Xb * Mt
- r2 = k2 * Xa * Xb * Xp * Mt
-
- # residual for x
- x_res = np.zeros((5, 1))
- x_res[0, 0] = Fa - r1 - r2 - Fr * Xa
- x_res[1, 0] = Fb - 2 * r1 - r2 - Fr * Xb
- x_res[2, 0] = + 2 * r1 - Fr * Xe
- x_res[3, 0] = + r1 - r2 - Fr * Xp
- x_res[4, 0] = + 3 * r2 - Fr * Xg
-
- return np.sum(x_res ** 2)
-
-
-# --- residual function for model --- #
-def WO_nonlinear_f_model(u_, x):
- Fb = u_[0]
- Tr = u_[1]
-
- # states of the system
- Xa = x[0] # Mass fraction
- Xb = x[1] # Mass fraction
- Xe = x[2] # Mass fraction
- Xp = x[3] # Mass fraction
- Xg = x[4] # Mass fraction
-
- # Reparametrization
- k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
- k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
-
- # reaction rate
- Fr = Fa + Fb
- r1 = k1 * Xa * Xb * Xb * Mt
- r2 = k2 * Xa * Xb * Xp * Mt
-
- # residual for x
- x_res = np.zeros((5, 1))
- x_res[0, 0] = Fa - r1 - r2 - Fr * Xa
- x_res[1, 0] = Fb - 2 * r1 - r2 - Fr * Xb
- x_res[2, 0] = + 2 * r1 - Fr * Xe
- x_res[3, 0] = + r1 - r2 - Fr * Xp
- x_res[4, 0] = + 3 * r2 - Fr * Xg
-
- return x_res
-
-
-# --- WO model objective --- #
-def WO_Model_obj(u):
- x_guess = np.ones((5, 1)) * 0.2
- WO_f_model = functools.partial(WO_nonlinear_f_model, u)
- x_solved = broyden1(WO_f_model, x_guess, f_tol=1e-12)
-
- # definitions
- Fa = 1.8275
- Fb = u[0]
- Fr = Fa + Fb
+class Bio_system:
+
+
+ def __init__(self):
+ self.nk, self.tf, self.x0, _, _ = self.specifications()
+ self.xd, self.xa, self.u, _, self.ODEeq, self.Aeq, self.u_min, self.u_max,\
+ self.states, self.algebraics, self.inputs, self.nd, self.na, self.nu, \
+ self.nmp,self. modparval= self.DAE_system()
+ self.eval = self.integrator_model()
+ self.Sigma_v = [400.,1e5,1e-2]*diag(np.ones(self.nd))*1e-7*0
+ def specifications(self):
+ ''' Specify Problem parameters '''
+ tf = 240. # final time
+ nk = 12 # sampling points
+ x0 = np.array([1., 150., 0.])
+ Lsolver = 'mumps' # 'ma97' # Linear solver
+ c_code = False # c_code
+
+ return nk, tf, x0, Lsolver, c_code
+
+ def DAE_system(self):
+ # Define vectors with names of states
+ states = ['x', 'n', 'q']
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = []
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
- # calculating objective
- obj = -(1043.38 * x_solved[3, 0] * Fr +
- 20.92 * x_solved[2, 0] * Fr -
- 79.23 * Fa -
- 118.34 * Fb)
+ # Define vectors with banes of input variables
+ inputs = ['L', 'Fn']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
- return obj
+ # Define model parameter names and values
+ modpar = ['u_m', 'k_s', 'k_i', 'K_N', 'u_d', 'Y_nx', 'k_m', 'k_sq',
+ 'k_iq', 'k_d', 'K_Np']
+ modparval = [0.0923 * 0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
+ 2.544 * 0.62 * 1e-4, 23.51, 800.0, 0.281, 16.89]
+ nmp = len(modpar)
+ for i in range(nmp):
+ globals()[modpar[i]] = SX(modparval[i])
-# --- WO model con1 --- #
-def WO_Model_con1(u):
- x_guess = np.ones((5, 1)) * 0.2
- WO_f_model = functools.partial(WO_nonlinear_f_model, u)
- x_solved = broyden1(WO_f_model, x_guess, f_tol=1e-12)
+ # Additive measurement noise
+ # Sigma_v = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
- # calculating con1
- con1 = x_solved[0, 0] - 0.12
+ # Additive disturbance noise
+ # Sigma_w = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
- return -con1
+ # Initial additive disturbance noise
+ # Sigma_w0 = [1.,150.**2,0.]*diag(np.ones(nd))*1e-3
+ # Declare ODE equations (use notation as defined above)
-# --- WO model con1 opt --- #
-def WO_Model_con1_opt(u):
- x_guess = np.ones((5, 1)) * 0.2
+ dx = u_m * L / (L + k_s + L ** 2. / k_i) * x * n / (n + K_N) - u_d * x
+ dn = - Y_nx * u_m * L / (L + k_s + L ** 2. / k_i) * x * n / (n + K_N) + Fn
+ dq = k_m * L / (L + k_sq + L ** 2. / k_iq) * x - k_d * q / (n + K_Np)
- res = minimize(WO_nonlinear_f_model_opt, x_guess, args=(u),
- method='BFGS', options=options, tol=1e-12)
- x_solved = res.x
+ ODEeq = [dx, dn, dq]
- # calculating con1
- con1 = x_solved[0] - 0.12
+ # Declare algebraic equations
+ Aeq = []
- return -con1
+ # Define control bounds
+ u_min = np.array([120., 0.]) # lower bound of inputs
+ u_max = np.array([400., 40.]) # upper bound of inputs
+ # Define objective to be minimized
+ t = SX.sym('t')
-# --- WO model con2 --- #
-def WO_Model_con2(u):
- x_guess = np.ones((5, 1)) * 0.2
- WO_f_model = functools.partial(WO_nonlinear_f_model, u)
- x_solved = broyden1(WO_f_model, x_guess, f_tol=1e-12)
+ return xd, xa, u, 0, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval
- # calculating con1
- con2 = x_solved[4, 0] - 0.08
+ def integrator_model(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
- return -con2
+ xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \
+ = self.DAE_system()
+ dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),
+ 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}
+ opts = {'tf': self.tf / self.nk} # interval length
+ F = integrator('F', 'idas', dae, opts)
+ # model = functools.partial(solver, np.zeros(np.shape(xa)))
+ return F
-# --- WO model con2 opt --- #
-def WO_Model_con2_opt(u):
- x_guess = np.ones((5, 1)) * 0.2
- res = minimize(WO_nonlinear_f_model_opt, x_guess, args=(u),
- method='BFGS', options=options, tol=1e-12)
- x_solved = res.x
+ def bio_obj_ca(self, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = u0 * (self.u_max - self.u_min) + self.u_min
- # calculating con1
- con2 = x_solved[4] - 0.08
- return -con2
- # Parameters
+ for i in range(self.nk):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
-Fa = 1.8275
-Mt = 2105.2
-# kinetic parameters
-phi1 = - 3.
-psi1 = -17.
-phi2 = - 4.
-psi2 = -29.
-# Reference temperature
-Tref = 110. + 273.15 # [=] K.
-
-
-def DAE_system():
- # Define vectors with names of states
- states = ['x']
- nd = len(states)
- xd = SX.sym('xd', nd)
- for i in range(nd):
- globals()[states[i]] = xd[i]
-
- # Define vectors with names of algebraic variables
- algebraics = ['Xa', 'Xb', 'Xc', 'Xe', 'Xp', 'Xg']
- na = len(algebraics)
- xa = SX.sym('xa', na)
- for i in range(na):
- globals()[algebraics[i]] = xa[i]
-
- inputs = ['Fb', 'Tr']
- nu = len(inputs)
- u = SX.sym("u", nu)
- for i in range(nu):
- globals()[inputs[i]] = u[i]
-
- # Reparametrization
- k1 = 1.6599e6 * np.exp(-6666.7 / (Tr + 273.15))
- k2 = 7.2117e8 * np.exp(-8333.3 / (Tr + 273.15))
- k3 = 2.6745e12 * np.exp(-11111. / (Tr + 273.15))
-
- # reaction rate
- Fr = Fa + Fb
- r1 = k1 * Xa * Xb * Mt
- r2 = k2 * Xb * Xc * Mt
- r3 = k3 * Xc * Xp * Mt
-
- # residual for x
- x_res = np.zeros((6, 1))
- x_res[0, 0] = (Fa - r1 - Fr * Xa) / Mt
- x_res[1, 0] = (Fb - r1 - r2 - Fr * Xb) / Mt
- x_res[2, 0] = (+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt
- x_res[3, 0] = (+ 2 * r2 - Fr * Xe) / Mt
- x_res[4, 0] = (+ r2 - 0.5 * r3 - Fr * Xp) / Mt
- x_res[5, 0] = (+ 1.5 * r3 - Fr * Xg) / Mt
- # Define vectors with banes of input variables
-
- ODEeq = [0 * x]
-
- # Declare algebraic equations
- Aeq = []
-
- Aeq += [(Fa - r1 - Fr * Xa) / Mt]
- Aeq += [(Fb - r1 - r2 - Fr * Xb) / Mt]
- Aeq += [(+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt]
- Aeq += [(+ 2 * r2 - Fr * Xe) / Mt]
- Aeq += [(+ r2 - 0.5 * r3 - Fr * Xp) / Mt]
- Aeq += [(+ 1.5 * r3 - Fr * Xg) / Mt]
-
- return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
-
-
-def integrator_system():
- """
- This function constructs the integrator to be suitable with casadi environment, for the equations of the model
- and the objective function with variable time step.
- inputs: NaN
- outputs: F: Function([x, u, dt]--> [xf, obj])
- """
-
- xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = DAE_system()
- VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
- solver = rootfinder('solver', 'newton', VV)
-
- return solver
-
-
-def WO_obj_sys_ca(u):
- solver = integrator_system()
- x = solver(np.zeros(6), u)
- Fb = u[0]
- Tr = u[1]
- Fa = 1.8275
- Fr = Fa + Fb
- obj = -(1043.38 * x[4] * Fr +
- 20.92 * x[3] * Fr -
- 79.23 * Fa -
- 118.34 * Fb)
+ return -x[-1] + np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))[-1]
+
+ def bio_con1_ca(self, n, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = u0 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+ x[1] += np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))[1]
+ pcon1 = x[1]/800 - 1
+ return -pcon1#.toarray()[0]
+
+ def bio_con2_ca(self, n, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2) )
+ u = u0* (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+ x += np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))
+ pcon1 = x[2]/(0.011 * x[0])-1
+ return -pcon1#.toarray()[0]
+
+
+
+class Bio_model:
+
+
+ def __init__(self):
+ self.nk, self.tf, self.x0, _, _ = self.specifications()
+ self.xd, self.xa, self.u, _, self.ODEeq, self.Aeq, self.u_min, self.u_max,\
+ self.states, self.algebraics, self.inputs, self.nd, self.na, self.nu, \
+ self.nmp,self. modparval= self.DAE_system()
+ self.eval = self.integrator_model()
+
+ def specifications(self):
+ ''' Specify Problem parameters '''
+ tf = 240. # final time
+ nk = 12 # sampling points
+ x0 = np.array([1., 150., 0.])
+ Lsolver = 'mumps' # 'ma97' # Linear solver
+ c_code = False # c_code
+
+ return nk, tf, x0, Lsolver, c_code
+
+ def DAE_system(self):
+ # Define vectors with names of states
+ states = ['x', 'n', 'q']
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = []
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
+
+ # Define vectors with banes of input variables
+ inputs = ['L', 'Fn']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
+
+ # Define model parameter names and values
+ modpar = ['u_m', 'k_s', 'k_i', 'K_N', 'u_d', 'Y_nx', 'k_m', 'k_sq',
+ 'k_iq', 'k_d', 'K_Np']
+ modparval = [0.0923 * 0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
+ 2.544 * 0.62 * 1e-4, 23.51, 800.0, 0.281, 16.89]
+
+ nmp = len(modpar)
+ for i in range(nmp):
+ globals()[modpar[i]] = SX(modparval[i])
+
+ # Additive measurement noise
+ # Sigma_v = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Additive disturbance noise
+ # Sigma_w = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Initial additive disturbance noise
+ # Sigma_w0 = [1.,150.**2,0.]*diag(np.ones(nd))*1e-3
+
+ # Declare ODE equations (use notation as defined above)
+
+ dx = u_m * L / (L + k_s) * x * n / (n + K_N) - u_d * x
+ dn = - Y_nx * u_m * L / (L + k_s) * x * n / (n + K_N) + Fn
+ dq = k_m * L / (L + k_sq) * x - k_d * q / (n + K_Np)
+
+ ODEeq = [dx, dn, dq]
+
+ # Declare algebraic equations
+ Aeq = []
+
+ # Define control bounds
+ u_min = np.array([120., 0.]) # lower bound of inputs
+ u_max = np.array([400., 40.]) # upper bound of inputs
+
+ # Define objective to be minimized
+ t = SX.sym('t')
+
+ return xd, xa, u, 0, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval
+
+ def integrator_model(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
+
+ xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \
+ = self.DAE_system()
+ ODEeq_ = vertcat(*ODEeq)
+
+ self.ODEeq = Function('f', [xd, u], [vertcat(*ODEeq)], ['x0', 'p'], ['xdot'])
+
+ dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),
+ 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}
+ opts = {'tf': self.tf / self.nk} # interval length
+ F = integrator('F', 'idas', dae, opts)
+ # model = functools.partial(solver, np.zeros(np.shape(xa)))
+ return F
+
+ def bio_obj_ca(self, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = np.array(u0).reshape(-1,1) * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(self.nk):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))
+ x = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+
+
+ return -x[-1]
+
+ def bio_con1_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2))
+ u = np.array(u1).reshape(-1,1) * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))
+ x = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[1]/800-1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1#.toarray()[0]
+
+ def bio_con2_ca(self, n, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = np.array(u0).reshape((-1,1)) * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))
+ x = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1#.toarray()[0]
+
+ def bio_obj_ca_RK4(self, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = np.array(u0).reshape((-1,1)) * (self.u_max - self.u_min) + self.u_min
+ DT = self.tf/self.nk/4
+
+
+ for i in range(self.nk):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ f = self.ODEeq
+
+ for j in range(4):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+
+ # xd = self.eval(x0=vertcat(np.array(x1)), p=vertcat(u[i]))
+ # x1 = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+
+
+ return -x[-1].toarray()[0][0]
+
+ def bio_con1_ca_RK4(self, n, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = u0 * (self.u_max - self.u_min) + self.u_min
+
+
+ DT = self.tf/self.nk/4
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ f = self.ODEeq
+
+ for j in range(4):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[1]/800 -1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1.toarray()[0][0]
+
+ def bio_con2_ca_RK4(self, n, u0):
+ x = self.x0
+ u0 = np.array(u0).reshape((self.nk,2))
+ u = np.array(u0).reshape((-1,1)) * (self.u_max - self.u_min) + self.u_min
+
+
+ DT = self.tf/self.nk/4
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ f = self.ODEeq
+
+ for j in range(4):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1.toarray()[0][0]
+
+ def bio_model_ca(self):
+ M = 4 # RK4 steps per interval
+
+ X0 = SX.sym('X0', self.nd)
+ U = SX.sym('U', self.nu,1)
+ u = U * (self.u_max - self.u_min) + self.u_min
+ DT = self.tf/self.nk/M
- return obj
+ f = self.ODEeq
+ X = X0
+ for j in range(M):
+ k1 = f(X, u)
+ k2 = f(X + DT / 2 * k1, u)
+ k3 = f(X + DT / 2 * k2, u)
+ k4 = f(X + DT * k2, u)
+ X = X + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+ F = Function('F', [X0, U], [X], ['x0', 'u'], ['xf'])
-def WO_con1_sys_ca(u):
- solver = integrator_system()
- x = solver(np.zeros(6), u)
- pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
+ return F
- return -pcon1.toarray()[0]
+ def bio_obj_ca_f(self, x):
-def WO_con2_sys_ca(u):
- solver = integrator_system()
- x = solver(np.zeros(6), u)
- pcon2 = x[5] - 0.08 # + 5e-4*np.random.normal(1., 1)
+ return -x[-1]
- return -pcon2.toarray()[0]
+ def bio_con1_ca_f(self, x):
+ pcon1 = x[1]/800 -1 # + 5e-4*np.random.normal(1., 1)
+ return pcon1
+ def bio_con2_ca_f(self, x):
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return pcon1
\ No newline at end of file
diff --git a/sub_uts/systems2.py b/sub_uts/systems2.py
new file mode 100644
index 0000000..330665a
--- /dev/null
+++ b/sub_uts/systems2.py
@@ -0,0 +1,1066 @@
+# v2 includes shaping the TR with the curvature of the problem by a broyden update on derivatives
+# and a BFGS update on the Hessian, however the TR becomes very small in some parts, so the approach
+# does not seem to be too effective.
+
+import time
+import random
+import numpy as np
+import numpy.random as rnd
+from scipy.spatial.distance import cdist
+
+import sobol_seq
+from scipy.optimize import minimize
+from scipy.optimize import broyden1
+from scipy import linalg
+import scipy
+import matplotlib.pyplot as plt
+import functools
+from matplotlib.patches import Ellipse
+
+from casadi import *
+
+
+def Benoit_Model(u):
+ f = u[0] ** 2 + u[1] ** 2
+ return f
+
+
+def con1_model(u):
+ g1 = 1. - u[0] + u[1] ** 2
+ return -g1
+
+
+def Benoit_System(u):
+ f = u[0] ** 2 + u[1] ** 2 + u[0] * u[1] + np.random.normal(0., np.sqrt(1e-3))
+ return f
+
+
+def con1_system(u):
+ g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] - 2. + np.random.normal(0., np.sqrt(1e-3))
+ return -g1
+
+
+def con1_system_tight(u):
+ g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] + np.random.normal(0., np.sqrt(1e-3))
+ return -g1
+
+
+def Benoit_System_noiseless(u):
+ f = u[0] ** 2 + u[1] ** 2 + u[0] * u[1] # + np.random.normal(0., np.sqrt(1e-3))
+ return f
+
+
+def con1_system_noiseless(u):
+ g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] - 2. # + np.random.normal(0., np.sqrt(1e-3))
+ return -g1
+
+
+def con1_system_tight_noiseless(u):
+ g1 = 1. - u[0] + u[1] ** 2 + 2. * u[1] # + np.random.normal(0., np.sqrt(1e-3))
+ return -g1
+
+
+class WO_system:
+ # Parameters
+ Fa = 1.8275
+ Mt = 2105.2
+ # kinetic parameters
+ phi1 = - 3.
+ psi1 = -17.
+ phi2 = - 4.
+ psi2 = -29.
+ # Reference temperature
+ Tref = 110. + 273.15 # [=] K.
+
+ def __init__(self):
+ self.xd, self.xa, self.u, self.ODEeq, self.Aeq, self.states, self.algebraics, self.inputs = self.DAE_system()
+ self.eval = self.integrator_system()
+
+ def DAE_system(self):
+ # Define vectors with names of states
+ states = ['x']
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = ['Xa', 'Xb', 'Xc', 'Xe', 'Xp', 'Xg']
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
+
+ inputs = ['Fb', 'Tr']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
+
+ # Reparametrization
+ k1 = 1.6599e6 * np.exp(-6666.7 / (Tr + 273.15))
+ k2 = 7.2117e8 * np.exp(-8333.3 / (Tr + 273.15))
+ k3 = 2.6745e12 * np.exp(-11111. / (Tr + 273.15))
+
+ # reaction rate
+ Fr = Fa + Fb
+ r1 = k1 * Xa * Xb * Mt
+ r2 = k2 * Xb * Xc * Mt
+ r3 = k3 * Xc * Xp * Mt
+
+ # residual for x
+ x_res = np.zeros((6, 1))
+ x_res[0, 0] = (Fa - r1 - Fr * Xa) / Mt
+ x_res[1, 0] = (Fb - r1 - r2 - Fr * Xb) / Mt
+ x_res[2, 0] = (+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt
+ x_res[3, 0] = (+ 2 * r2 - Fr * Xe) / Mt
+ x_res[4, 0] = (+ r2 - 0.5 * r3 - Fr * Xp) / Mt
+ x_res[5, 0] = (+ 1.5 * r3 - Fr * Xg) / Mt
+ # Define vectors with banes of input variables
+
+ ODEeq = [0 * x]
+
+ # Declare algebraic equations
+ Aeq = []
+
+ Aeq += [(Fa - r1 - Fr * Xa) / Mt]
+ Aeq += [(Fb - r1 - r2 - Fr * Xb) / Mt]
+ Aeq += [(+ 2 * r1 - 2 * r2 - r3 - Fr * Xc) / Mt]
+ Aeq += [(+ 2 * r2 - Fr * Xe) / Mt]
+ Aeq += [(+ r2 - 0.5 * r3 - Fr * Xp) / Mt]
+ Aeq += [(+ 1.5 * r3 - Fr * Xg) / Mt]
+
+ return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
+
+ def integrator_system(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
+
+ xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_system()
+ VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
+ solver = rootfinder('solver', 'newton', VV)
+
+ return solver
+
+ def WO_obj_sys_ca(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
+ Fb = u[0]
+ Tr = u[1]
+ Fa = 1.8275
+ Fr = Fa + Fb
+
+ obj = -(1043.38 * x[4] * Fr +
+ 20.92 * x[3] * Fr -
+ 79.23 * Fa -
+ 118.34 * Fb) + 0.5 * np.random.normal(0., 1)
+
+ return obj
+
+ def WO_obj_sys_ca_noise_less(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
+ Fb = u[0]
+ Tr = u[1]
+ Fa = 1.8275
+ Fr = Fa + Fb
+
+ obj = -(1043.38 * x[4] * Fr +
+ 20.92 * x[3] * Fr -
+ 79.23 * Fa -
+ 118.34 * Fb) # + 0.5*np.random.normal(0., 1)
+
+ return obj
+
+ def WO_con1_sys_ca(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
+ pcon1 = x[0] - 0.12 + 5e-4 * np.random.normal(0., 1)
+
+ return -pcon1.toarray()[0]
+
+ def WO_con2_sys_ca(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
+ pcon2 = x[5] - 0.08 + 5e-4 * np.random.normal(0., 1)
+
+ return -pcon2.toarray()[0]
+
+ def WO_con1_sys_ca_noise_less(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
+ pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(0., 1)
+
+ return -pcon1.toarray()[0]
+
+ def WO_con2_sys_ca_noise_less(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.0260265, 0.207296, 0.0923376, 0.0339309]), u)
+ pcon2 = x[5] - 0.08 # + 5e-4*np.random.normal(0., 1)
+
+ return -pcon2.toarray()[0]
+
+
+class WO_model:
+ # Parameters
+ Fa = 1.8275
+ Mt = 2105.2
+ # kinetic parameters
+ phi1 = - 3.
+ psi1 = -17.
+ phi2 = - 4.
+ psi2 = -29.
+ # Reference temperature
+ Tref = 110. + 273.15 # [=] K.
+
+ def __init__(self):
+ self.xd, self.xa, self.u, self.ODEeq, self.Aeq, self.states, self.algebraics, self.inputs = self.DAE_model()
+ self.eval = self.integrator_model()
+
+ def DAE_model(self):
+ # Define vectors with names of states
+ states = ['x']
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = ['Xa', 'Xb', 'Xe', 'Xp', 'Xg']
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
+
+ # Define vectors with banes of input variables
+ inputs = ['Fb', 'Tr']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
+
+ k1 = np.exp(phi1) * np.exp((Tref / (Tr + 273.15) - 1) * psi1)
+ k2 = np.exp(phi2) * np.exp((Tref / (Tr + 273.15) - 1) * psi2)
+
+ # reaction rate
+ Fr = Fa + Fb
+ r1 = k1 * Xa * Xb * Xb * Mt
+ r2 = k2 * Xa * Xb * Xp * Mt
+ ODEeq = [0 * x]
+
+ # Declare algebraic equations
+ Aeq = []
+
+ Aeq += [Fa - r1 - r2 - Fr * Xa]
+ Aeq += [Fb - 2 * r1 - r2 - Fr * Xb]
+ Aeq += [+ 2 * r1 - Fr * Xe]
+ Aeq += [+ r1 - r2 - Fr * Xp]
+ Aeq += [+ 3 * r2 - Fr * Xg]
+
+ return xd, xa, u, ODEeq, Aeq, states, algebraics, inputs
+
+ def integrator_model(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
+
+ xd, xa, u, ODEeq, Aeq, states, algebraics, inputs = self.DAE_model()
+ VV = Function('vfcn', [xa, u], [vertcat(*Aeq)], ['w0', 'u'], ['w'])
+ solver = rootfinder('solver', 'newton', VV)
+
+ # model = functools.partial(solver, np.zeros(np.shape(xa)))
+ return solver
+
+ def WO_obj_ca(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ Fb = u[0]
+ Tr = u[1]
+ Fa = 1.8275
+ Fr = Fa + Fb
+
+ obj = -(1043.38 * x[3] * Fr +
+ 20.92 * x[2] * Fr -
+ 79.23 * Fa -
+ 118.34 * Fb)
+
+ return obj
+
+ def WO_con1_model_ca(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ pcon1 = x[0] - 0.12 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1.toarray()[0]
+
+ def WO_con2_model_ca(self, u):
+ x = self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ pcon2 = x[4] - 0.08 # + 5e-4*np.random.normal(1., 1)
+ return -pcon2.toarray()[0]
+
+
+def con_empty(u):
+ g1 = 0.
+ return -g1
+
+
+def obj_empty(u):
+ f = 0.
+ return f
+
+
+class Bio_system:
+
+
+ def __init__(self, nk=12):
+ self.nk, self.tf, self.x0, _, _ = self.specifications(nk)
+ self.xd, self.xa, self.u, _, self.ODEeq, self.Aeq, self.u_min, self.u_max,\
+ self.states, self.algebraics, self.inputs, self.nd, self.na, self.nu, \
+ self.nmp,self. modparval= self.DAE_system()
+ self.eval = self.integrator_model()
+ self.Sigma_v = [400.,1e5,1e-2]*diag(np.ones(self.nd))*0#1e-7
+ def specifications(self, nk):
+ ''' Specify Problem parameters '''
+ tf = 240. # final time
+ # nk = 12 # sampling points
+ x0 = np.array([1., 150., 0.])
+ Lsolver = 'mumps' # 'ma97' # Linear solver
+ c_code = False # c_code
+
+ return nk, tf, x0, Lsolver, c_code
+
+ def DAE_system(self):
+ # Define vectors with names of states
+ states = ['x', 'n', 'q']
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = []
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
+
+ # Define vectors with banes of input variables
+ inputs = ['L', 'Fn']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
+
+ # Define model parameter names and values
+ modpar = ['u_m', 'k_s', 'k_i', 'K_N', 'u_d', 'Y_nx', 'k_m', 'k_sq',
+ 'k_iq', 'k_d', 'K_Np']
+ modparval = [0.0923 * 0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
+ 2.544 * 0.62 * 1e-4, 23.51, 800.0, 0.281, 16.89]
+
+ nmp = len(modpar)
+ for i in range(nmp):
+ globals()[modpar[i]] = SX(modparval[i])
+
+ # Additive measurement noise
+ # Sigma_v = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Additive disturbance noise
+ # Sigma_w = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Initial additive disturbance noise
+ # Sigma_w0 = [1.,150.**2,0.]*diag(np.ones(nd))*1e-3
+
+ # Declare ODE equations (use notation as defined above)
+
+ dx = u_m * L / (L + k_s + L ** 2. / k_i) * x * n / (n + K_N) - u_d * x
+ dn = - Y_nx * u_m * L / (L + k_s + L ** 2. / k_i) * x * n / (n + K_N) + Fn
+ dq = k_m * L / (L + k_sq + L ** 2. / k_iq) * x - k_d * q / (n + K_Np)
+
+ ODEeq = [dx, dn, dq]
+
+ # Declare algebraic equations
+ Aeq = []
+
+ # Define control bounds
+ u_min = np.array([120., 0.]) # lower bound of inputs
+ u_max = np.array([400., 40.]) # upper bound of inputs
+
+ # Define objective to be minimized
+ t = SX.sym('t')
+
+ return xd, xa, u, 0, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval
+
+ def integrator_model(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
+
+ xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \
+ = self.DAE_system()
+
+ dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),
+ 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}
+ opts = {'tf': self.tf / self.nk} # interval length
+ F = integrator('F', 'idas', dae, opts)
+ # model = functools.partial(solver, np.zeros(np.shape(xa)))
+ return F
+
+ def bio_obj_ca(self, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2))
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(self.nk):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+
+
+ return -x[-1] + np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))[-1]
+
+ def bio_con1_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2))
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+ x[1] += np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))[1]
+ pcon1 = x[1]/800 - 1
+ return -pcon1#.toarray()[0]
+
+ def bio_con2_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 *(self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+ x += np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))
+ pcon1 = x[2]-(0.011 * x[0])
+ return -pcon1#.toarray()[0]
+
+class Bio_system_noiseless:
+
+
+ def __init__(self, nk=12):
+ self.nk, self.tf, self.x0, _, _ = self.specifications(nk)
+ self.xd, self.xa, self.u, _, self.ODEeq, self.Aeq, self.u_min, self.u_max,\
+ self.states, self.algebraics, self.inputs, self.nd, self.na, self.nu, \
+ self.nmp,self. modparval= self.DAE_system()
+ self.eval = self.integrator_model()
+ self.Sigma_v = [400.,1e5,1e-2]*diag(np.ones(self.nd))*0.
+ def specifications(self, nk):
+ ''' Specify Problem parameters '''
+ tf = 240. # final time
+ # nk = 12 # sampling points
+ x0 = np.array([1., 150., 0.])
+ Lsolver = 'mumps' # 'ma97' # Linear solver
+ c_code = False # c_code
+
+ return nk, tf, x0, Lsolver, c_code
+
+ def DAE_system(self):
+ # Define vectors with names of states
+ states = ['x', 'n', 'q']
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = []
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
+
+ # Define vectors with banes of input variables
+ inputs = ['L', 'Fn']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
+
+ # Define model parameter names and values
+ modpar = ['u_m', 'k_s', 'k_i', 'K_N', 'u_d', 'Y_nx', 'k_m', 'k_sq',
+ 'k_iq', 'k_d', 'K_Np']
+ modparval = [0.0923 * 0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
+ 2.544 * 0.62 * 1e-4, 23.51, 800.0, 0.281, 16.89]
+
+ nmp = len(modpar)
+ for i in range(nmp):
+ globals()[modpar[i]] = SX(modparval[i])
+
+ # Additive measurement noise
+ # Sigma_v = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Additive disturbance noise
+ # Sigma_w = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Initial additive disturbance noise
+ # Sigma_w0 = [1.,150.**2,0.]*diag(np.ones(nd))*1e-3
+
+ # Declare ODE equations (use notation as defined above)
+
+ dx = u_m * L / (L + k_s + L ** 2. / k_i) * x * n / (n + K_N) - u_d * x
+ dn = - Y_nx * u_m * L / (L + k_s + L ** 2. / k_i) * x * n / (n + K_N) + Fn
+ dq = k_m * L / (L + k_sq + L ** 2. / k_iq) * x - k_d * q / (n + K_Np)
+
+ ODEeq = [dx, dn, dq]
+
+ # Declare algebraic equations
+ Aeq = []
+
+ # Define control bounds
+ u_min = np.array([120., 0.]) # lower bound of inputs
+ u_max = np.array([400., 40.]) # upper bound of inputs
+
+ # Define objective to be minimized
+ t = SX.sym('t')
+
+ return xd, xa, u, 0, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval
+
+ def integrator_model(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
+
+ xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \
+ = self.DAE_system()
+
+ dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),
+ 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}
+ opts = {'tf': self.tf / self.nk} # interval length
+ F = integrator('F', 'idas', dae, opts)
+ # model = functools.partial(solver, np.zeros(np.shape(xa)))
+ return F
+
+ def bio_obj_ca(self, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2))
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(self.nk):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+
+
+ return -x[-1] + np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))[-1]
+
+ def bio_con1_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2))
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+ x[1] += np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))[1]
+ pcon1 = x[1]/800 - 1
+ return -pcon1#.toarray()[0]
+
+ def bio_con2_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 *(self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))#self.eval(np.array([0.114805, 0.525604, 0.207296, 0.0923376, 0.0339309]), u)
+ x = np.array(xd['xf'].T)[0]
+ x += np.random.multivariate_normal([0.]*self.nd,np.array(self.Sigma_v))
+ pcon1 = x[2]/(0.011 * x[0])-1
+ return -pcon1#.toarray()[0]
+
+
+class Bio_model:
+
+
+ def __init__(self, nk=12, n_between=50, empty =False):
+ self.nk, self.tf, self.x0, _, _ = self.specifications(nk)
+ self.xd, self.xa, self.u, _, self.ODEeq, self.Aeq, self.u_min, self.u_max,\
+ self.states, self.algebraics, self.inputs, self.nd, self.na, self.nu, \
+ self.nmp,self. modparval= self.DAE_system()
+ self.eval = self.integrator_model()
+ self.n_between=n_between
+ self.empty = empty
+ def specifications(self, nk):
+ ''' Specify Problem parameters '''
+ tf = 240. # final time
+ # nk = 12 # sampling points
+ x0 = np.array([1., 150., 0.])
+ Lsolver = 'mumps' # 'ma97' # Linear solver
+ c_code = False # c_code
+
+ return nk, tf, x0, Lsolver, c_code
+
+ def DAE_system(self):
+ # Define vectors with names of states
+ states = ['x', 'n', 'q']
+
+ nd = len(states)
+ xd = SX.sym('xd', nd)
+ for i in range(nd):
+ globals()[states[i]] = xd[i]
+
+ # Define vectors with names of algebraic variables
+ algebraics = []
+ na = len(algebraics)
+ xa = SX.sym('xa', na)
+ for i in range(na):
+ globals()[algebraics[i]] = xa[i]
+
+ # Define vectors with banes of input variables
+ inputs = ['L', 'Fn']
+ nu = len(inputs)
+ u = SX.sym("u", nu)
+ for i in range(nu):
+ globals()[inputs[i]] = u[i]
+
+ # Define model parameter names and values
+ modpar = ['u_m', 'k_s', 'k_i', 'K_N', 'u_d', 'Y_nx', 'k_m', 'k_sq',
+ 'k_iq', 'k_d', 'K_Np']
+ modparval = [0.0923 * 0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
+ 2.544 * 0.62 * 1e-4, 23.51, 800.0, 0.281, 16.89]
+
+ nmp = len(modpar)
+ for i in range(nmp):
+ globals()[modpar[i]] = SX(modparval[i])
+
+ # Additive measurement noise
+ # Sigma_v = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Additive disturbance noise
+ # Sigma_w = [400.,1e5,1e-2]*diag(np.ones(nd))*1e-6
+
+ # Initial additive disturbance noise
+ # Sigma_w0 = [1.,150.**2,0.]*diag(np.ones(nd))*1e-3
+
+ # Declare ODE equations (use notation as defined above)
+
+ dx = u_m * L / (L + k_s) * x * n / (n + K_N) - u_d * x
+ dn = - Y_nx * u_m * L / (L + k_s) * x * n / (n + K_N) + Fn
+ dq = k_m * L / (L + k_sq) * x - k_d * q / (n + K_Np)
+
+ ODEeq = [dx, dn, dq]
+
+ # Declare algebraic equations
+ Aeq = []
+
+ # Define control bounds
+ u_min = np.array([120., 0.]) # lower bound of inputs
+ u_max = np.array([400., 40.]) # upper bound of inputs
+
+ # Define objective to be minimized
+ t = SX.sym('t')
+
+ return xd, xa, u, 0, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval
+
+ def integrator_model(self):
+ """
+ This function constructs the integrator to be suitable with casadi environment, for the equations of the model
+ and the objective function with variable time step.
+ inputs: NaN
+ outputs: F: Function([x, u, dt]--> [xf, obj])
+ """
+
+ xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, states, algebraics, inputs, nd, na, nu, nmp, modparval \
+ = self.DAE_system()
+ ODEeq_ = vertcat(*ODEeq)
+
+ self.ODEeq = Function('f', [xd, u], [vertcat(*ODEeq)], ['x0', 'p'], ['xdot'])
+
+ dae = {'x': vertcat(xd), 'z': vertcat(xa), 'p': vertcat(u),
+ 'ode': vertcat(*ODEeq), 'alg': vertcat(*Aeq)}
+ opts = {'tf': self.tf / self.nk} # interval length
+ F = integrator('F', 'idas', dae, opts)
+ # model = functools.partial(solver, np.zeros(np.shape(xa)))
+ return F
+
+ def bio_obj_ca(self, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(self.nk):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))
+ x = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+
+ if self.empty:
+ return 0.
+ else:
+
+ return -x[-1]
+
+ def bio_con1_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2))
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))
+ x = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[1]/800-1 # + 5e-4*np.random.normal(1., 1)
+ if self.empty:
+ return 0.
+ else:
+ return -pcon1#.toarray()[0]
+
+ def bio_con2_ca(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+
+ for i in range(n):
+
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ xd = self.eval(x0=vertcat(np.array(x)), p=vertcat(u[i]))
+ x = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1#.toarray()[0]
+
+ def bio_obj_ca_RK4(self, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+ DT = self.tf/self.nk/self.n_between
+
+
+ for i in range(self.nk):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ f = self.ODEeq
+
+ for j in range(self.n_between):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+
+ # xd = self.eval(x0=vertcat(np.array(x1)), p=vertcat(u[i]))
+ # x1 = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+
+ if self.empty:
+ return 0.
+ else:
+
+ return -x[-1].toarray()[0][0]
+
+ def bio_con1_ca_RK4(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+ DT = self.tf/self.nk/self.n_between
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ f = self.ODEeq
+
+ for j in range(self.n_between):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[1]/800 -1 # + 5e-4*np.random.normal(1., 1)
+ if self.empty:
+ return 0.
+ else:
+ return -pcon1.toarray()[0][0]
+
+ def bio_con2_ca_RK4(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+ DT = self.tf/self.nk/self.n_between
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ f = self.ODEeq
+
+ for j in range(self.n_between):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)#
+ if self.empty:
+ return 0.
+ else:
+ return -pcon1.toarray()[0][0]
+
+ def bio_model_ca(self):
+ M = self.n_between # RK4 steps per interval
+
+ X0 = SX.sym('X0', self.nd)
+ U = SX.sym('U', self.nu,1)
+ u = U * (self.u_max - self.u_min) + self.u_min
+ DT = self.tf/self.nk/M
+
+ f = self.ODEeq
+ X = X0
+ for j in range(M):
+ k1 = f(X, u)
+ k2 = f(X + DT / 2 * k1, u)
+ k3 = f(X + DT / 2 * k2, u)
+ k4 = f(X + DT * k2, u)
+ X = X + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ F = Function('F', [X0, U], [X], ['x0', 'u'], ['xf'])
+
+ return F
+
+
+ def bio_obj_ca_f(self, x):
+ if not(self.empty):
+ return -x[-1]
+ else:
+ return 0.
+
+ def bio_con1_ca_f(self, x):
+ if not(self.empty):
+ pcon1 = x[1]/800 -1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1
+ else:
+ return 0.
+
+ def bio_con2_ca_f(self, x):
+ if not(self.empty):
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return -pcon1
+ else:
+ return 0.
+
+
+
+
+ def bio_obj_ca_RK4_empty(self, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+ DT = self.tf/self.nk/4
+
+
+ for i in range(self.nk):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+ f = self.ODEeq
+
+ for j in range(4):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+
+ # xd = self.eval(x0=vertcat(np.array(x1)), p=vertcat(u[i]))
+ # x1 = np.array(xd['xf'].T)[0]
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+
+
+ return -0*x[-1].toarray()[0][0]
+
+ def bio_con1_ca_RK4_empty(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+ DT = self.tf/self.nk/4
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ f = self.ODEeq
+
+ for j in range(4):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[1]/800 -1 # + 5e-4*np.random.normal(1., 1)
+ return -0*pcon1.toarray()[0][0]
+
+ def bio_con2_ca_RK4_empty(self, n, u0):
+ x = self.x0
+ u1 = np.array(u0).reshape((self.nk,2) )
+ u = u1 * (self.u_max - self.u_min) + self.u_min
+
+
+ DT = self.tf/self.nk/4
+
+ for i in range(n):
+ if np.any(x<0):
+ print(2)
+ elif np.any(u[i]<0):
+ print(2)
+ for j in range(self.nk):
+ if u[j,1]<0:
+ u[j,1]= 0.
+
+
+ f = self.ODEeq
+
+ for j in range(4):
+ k1 = f(x0=vertcat(np.array(x)), p=vertcat(u[i]))['xdot']
+ k2 = f(x0=vertcat(np.array(x + DT / 2 * k1)),p=vertcat(u[i]))['xdot']
+ k3 = f(x0=vertcat(np.array(x + DT / 2 * k2)), p=vertcat(u[i]))['xdot']
+ k4 = f(x0=vertcat(np.array(x + DT * k2)), p= vertcat(u[i]))['xdot']
+ x = x + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ for j in range(self.nd):
+ if x[j]<0:
+ x[j]=0
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return -0*pcon1.toarray()[0][0]
+
+ def bio_model_ca_empty(self):
+ M = self.n_between # RK4 steps per interval
+
+ X0 = SX.sym('X0', self.nd)
+ U = SX.sym('U', self.nu,1)
+ u = U * (self.u_max - self.u_min) + self.u_min
+ DT = self.tf/self.nk/M
+
+ f = self.ODEeq
+ X = X0
+ for j in range(M):
+ k1 = f(X, u)
+ k2 = f(X + DT / 2 * k1, u)
+ k3 = f(X + DT / 2 * k2, u)
+ k4 = f(X + DT * k2, u)
+ X = X + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
+
+ F = Function('F', [X0, U], [X], ['x0', 'u'], ['xf'])
+
+ return F
+
+
+ def bio_obj_ca_f_empty(self, x):
+
+ return -0*x[-1]
+
+ def bio_con1_ca_f_empty(self, x):
+ pcon1 = x[1]/800 -1 # + 5e-4*np.random.normal(1., 1)
+ return -0*pcon1
+
+ def bio_con2_ca_f_empty(self, x):
+ pcon1 = x[2]/(0.011 * x[0])-1 # + 5e-4*np.random.normal(1., 1)
+ return -0*pcon1
\ No newline at end of file
diff --git a/sub_uts/utilities_2.py b/sub_uts/utilities_2.py
index 11e2f2a..b784ac2 100644
--- a/sub_uts/utilities_2.py
+++ b/sub_uts/utilities_2.py
@@ -25,7 +25,7 @@ class GP_model:
###########################
# --- initializing GP --- #
###########################
- def __init__(self, X, Y, kernel, multi_hyper, var_out=True, noise=None):
+ def __init__(self, X, Y, kernel, multi_hyper, var_out=True, noise=None, GP_casadi = False):
# GP variable definitions
self.X, self.Y, self.kernel = X, Y, kernel
@@ -33,7 +33,7 @@ def __init__(self, X, Y, kernel, multi_hyper, var_out=True, noise=None):
self.ny_dim = Y.shape[1]
self.multi_hyper = multi_hyper
self.var_out = var_out
-
+ self.GP_casadi = GP_casadi
# normalize data
self.X_mean, self.X_std = np.mean(X, axis=0), np.std(X, axis=0)
self.Y_mean, self.Y_std = np.mean(Y, axis=0), np.std(Y, axis=0)
@@ -41,7 +41,7 @@ def __init__(self, X, Y, kernel, multi_hyper, var_out=True, noise=None):
# determine hyperparameters
self.hypopt, self.invKopt = self.determine_hyperparameters(noise)
-
+ self.meanfcn, self.varfcn = self.GP_predictor()
#############################
# --- Covariance Matrix --- #
@@ -179,11 +179,64 @@ def determine_hyperparameters(self, noise=None):
########################
# --- GP inference --- #
########################
+ def GP_predictor(self):# , X,Y):
+ nd, invKopt, hypopt = self.ny_dim, self.invKopt, self.hypopt
+ Ynorm, Xnorm = SX(DM(self.Y_norm)), SX(DM(self.X_norm))
+ ndat = Xnorm.shape[0]
+ nX, covSEfcn = self.nx_dim, self.covSEard()
+ stdX, stdY, meanX, meanY = SX(self.X_std), SX(self.Y_std), SX(self.X_mean), SX(self.Y_mean)
+ # nk = 12
+ x = SX.sym('x', nX)
+ # nk = X.shape[0]
+ xnorm = (x - meanX) / stdX
+ # Xnorm2 = (X - meanX)/stdX
+ # Ynorm2 = (Y - meanY)/stdY
+
+ k = SX.zeros(ndat)
+ # k2 = SX.zeros(ndat+nk)
+ mean = SX.zeros(nd)
+ mean2 = SX.zeros(nd)
+ var = SX.zeros(nd)
+ # Xnorm2 = SX.sym('Xnorm2',ndat+nk,nX)
+ # invKY2 = SX.sym('invKY2',ndat+nk,nd)
+
+ for i in range(nd):
+ invK = SX(DM(invKopt[i]))
+ hyper = SX(DM(hypopt[:, i]))
+ ellopt, sf2opt = exp(2 * hyper[:nX]), exp(2 * hyper[nX])
+ for j in range(ndat):
+ k[j] = covSEfcn(xnorm, Xnorm[j, :], ellopt, sf2opt)
+ # for j in range(ndat+nk):
+ # k2[j] = covSEfcn(xnorm,Xnorm2[j,:],ellopt,sf2opt)
+
+ invKYnorm = mtimes(invK, Ynorm[:, i])
+ mean[i] = mtimes(k.T, invKYnorm)
+ # mean2[i] = mtimes(k2.T,invKY2[:,i])
+ var[i] = sf2opt - mtimes(mtimes(k.T, invK), k)
+
+ meanfcn = Function('meanfcn', [x], [mean * stdY + meanY])
+ # meanfcn2 = Function('meanfcn2',[x,Xnorm2,invKY2],[mean2*stdY + meanY])
+ varfcn = Function('varfcn', [x], [var * stdY ** 2])
+ # varfcnsd = Function('varfcnsd',[x],[var])
+
+ return meanfcn, varfcn # , meanfcn2, varfcnsd
+
+ def covSEard(self):
+ nx_dim = self.nx_dim
+ ell = SX.sym('ell', nx_dim)
+ sf2 = SX.sym('sf2')
+ x, z = SX.sym('x', nx_dim), SX.sym('z', nx_dim)
+ dist = sum1((x - z)**2 / ell)
+ covSEfcn = Function('covSEfcn',[x,z,ell,sf2],[sf2*exp(-.5*dist)])
+
+ return covSEfcn
+
def GP_inference_np(self, x):
'''
--- decription ---
'''
+
nx_dim = self.nx_dim
kernel, ny_dim = self.kernel, self.ny_dim
hypopt, Cov_mat = self.hypopt, self.Cov_mat
@@ -192,31 +245,37 @@ def GP_inference_np(self, x):
invKsample = self.invKopt
Xsample, Ysample = self.X_norm, self.Y_norm
var_out = self.var_out
- # Sigma_w = self.Sigma_w (if input noise)
-
- xnorm = (x - meanX) / stdX
- mean = np.zeros(ny_dim)
- var = np.zeros(ny_dim)
- # --- Loop over each output (GP) --- #
- for i in range(ny_dim):
- invK = invKsample[i]
- hyper = hypopt[:, i]
- ellopt, sf2opt = np.exp(2 * hyper[:nx_dim]), np.exp(2 * hyper[nx_dim])
-
- # --- determine covariance of each output --- #
- k = calc_cov_sample(xnorm, Xsample, ellopt, sf2opt)
- mean[i] = np.matmul(np.matmul(k.T, invK), Ysample[:, i])
- var[i] = max(0., sf2opt - np.matmul(np.matmul(k.T, invK), k)) # numerical error
- # var[i] = sf2opt + Sigma_w[i,i]/stdY[i]**2 - np.matmul(np.matmul(k.T,invK),k) (if input noise)
-
- # --- compute un-normalized mean --- #
- mean_sample = mean * stdY + meanY
- var_sample = var * stdY ** 2
-
- if var_out:
- return mean_sample, var_sample
+ if self.GP_casadi:
+ if var_out:
+ return self.meanfcn(x).toarray().flatten()[0], self.varfcn(x).toarray().flatten()[0]
+ else:
+ return self.meanfcn(x).toarray().flatten()[0]#.flatten()[0]
else:
- return mean_sample.flatten()[0]
+ # Sigma_w = self.Sigma_w (if input noise)
+
+ xnorm = (x - meanX) / stdX
+ mean = np.zeros(ny_dim)
+ var = np.zeros(ny_dim)
+ # --- Loop over each output (GP) --- #
+ for i in range(ny_dim):
+ invK = invKsample[i]
+ hyper = hypopt[:, i]
+ ellopt, sf2opt = np.exp(2 * hyper[:nx_dim]), np.exp(2 * hyper[nx_dim])
+
+ # --- determine covariance of each output --- #
+ k = calc_cov_sample(xnorm, Xsample, ellopt, sf2opt)
+ mean[i] = np.matmul(np.matmul(k.T, invK), Ysample[:, i])
+ var[i] = max(0., sf2opt - np.matmul(np.matmul(k.T, invK), k)) # numerical error
+ # var[i] = sf2opt + Sigma_w[i,i]/stdY[i]**2 - np.matmul(np.matmul(k.T,invK),k) (if input noise)
+
+ # --- compute un-normalized mean --- #
+ mean_sample = mean * stdY + meanY
+ var_sample = var * stdY ** 2
+
+ if var_out:
+ return mean_sample, var_sample
+ else:
+ return mean_sample.flatten()[0]
######################################
# Central finite differences 5 points
@@ -285,7 +344,7 @@ class ITR_GP_RTO:
def __init__(self, obj_model, obj_system, cons_model, cons_system, x0,
Delta0, Delta_max, eta0, eta1, gamma_red, gamma_incr,
n_iter, data, bounds, obj_setting=2, noise=None, multi_opt=5, multi_hyper=10, TR_scaling=True,
- TR_curvature=True, store_data=True, inner_TR=False, scale_inputs=False):
+ TR_curvature=True, store_data=True, inner_TR=False, scale_inputs=False,GP_casadi=True):
'''
data = ['int', bound_list=[[0,10],[-5,5],[3,8]], samples_number] <=> d = ['int', np.array([[-12, 8]]), 3]
data = ['data0', Xtrain]
@@ -294,6 +353,7 @@ def __init__(self, obj_model, obj_system, cons_model, cons_system, x0,
'''
# internal variable definitions
+ self.GP_casadi = GP_casadi
self.obj, self.noise = obj_setting, noise
self.obj_model, self.n_iter = obj_model, n_iter
self.multi_opt, self.multi_hyper = multi_opt, multi_hyper
@@ -430,7 +490,7 @@ def GP_obj_f(self, d, GP, xk):
obj_f = GP.GP_inference_np(xk + d)
return obj_model((xk + d).flatten()) + obj_f[0] - 3 * np.sqrt(obj_f[1])
elif obj == 3:
- fs = self.obj_max
+ fs = -self.obj_min#self.obj_max
obj_f = GP.GP_inference_np(xk + d)
mean = obj_model((xk + d).flatten()) + obj_f[0]
Delta = fs - mean
@@ -812,6 +872,15 @@ def Update_derivatives(self, xk, xnew, H_inv, H_norm):
return H_norm, H_inv
+
+ def find_min_so_far(self,funcs_system, X_opt):
+ # ynew[0,ii] = funcs[ii](np.array(xnew[:]).flatten())
+ min = -1.
+ for i in range(len(X_opt)):
+ y= funcs_system[0](np.array(X_opt[i]).flatten())
+ if y< min:
+ min =y
+ return min
######################################
# --- Real-Time Optimization alg --- #
######################################
@@ -871,6 +940,7 @@ def RTO_routine(self):
# renaming data
X_opt = np.copy(Xtrain)
y_opt = np.copy(ytrain)
+ self.obj_min = self.find_min_so_far(funcs_system, X_opt)
# --- TR lists --- #
TR_l = ['error'] * (n_iter + 1)
@@ -928,10 +998,11 @@ def RTO_routine(self):
# ynew[0,ii] = funcs[ii](np.array(xnew[:]).flatten())
ynew[0, ii] = funcs_system[ii](np.array(xnew[:]).flatten()) - funcs_model[ii](
np.array(xnew[:]).flatten())
-
+ print('New Objective: ', -funcs_system[0](np.array(xnew[:]).flatten()))
# adding new point to sample
X_opt = np.vstack((X_opt, xnew))
y_opt = np.hstack((y_opt, ynew.T))
+ self.obj_min = self.find_min_so_far(funcs_system, X_opt)
# --- Update TR --- #
self.Delta0, xnew, xk = Step_constraint(self.Delta0, xk, xnew, GP_obj, i_rto) # adjust TR
diff --git a/sub_uts/utilities_4.py b/sub_uts/utilities_4.py
new file mode 100644
index 0000000..9e30ed3
--- /dev/null
+++ b/sub_uts/utilities_4.py
@@ -0,0 +1,1273 @@
+# v2 includes shaping the TR with the curvature of the problem by a broyden update on derivatives
+# and a BFGS update on the Hessian, however the TR becomes very small in some parts, so the approach
+# does not seem to be too effective.
+
+import time
+import random
+import numpy as np
+import numpy.random as rnd
+from scipy.spatial.distance import cdist
+
+import sobol_seq
+from scipy.optimize import minimize
+from scipy.optimize import broyden1
+from scipy import linalg
+import scipy
+import matplotlib.pyplot as plt
+import functools
+from matplotlib.patches import Ellipse
+from scipy.stats import norm
+from casadi import *
+
+
+class GP_model:
+
+ ###########################
+ # --- initializing GP --- #
+ ###########################
+ def __init__(self, X, Y, kernel, multi_hyper, var_out=True, noise=None, GP_casadi = True):
+
+ # GP variable definitions
+ self.X, self.Y, self.kernel = X, Y, kernel
+ self.n_point, self.nx_dim = X.shape[0], X.shape[1]
+ self.ny_dim = Y.shape[1]
+ self.multi_hyper = multi_hyper
+ self.var_out = var_out
+ self.GP_casadi = GP_casadi
+ # normalize data
+ self.X_mean, self.X_std = np.mean(X, axis=0), np.std(X, axis=0)
+ self.Y_mean, self.Y_std = np.mean(Y, axis=0), np.std(Y, axis=0)
+ self.X_norm, self.Y_norm = (X - self.X_mean) / self.X_std, (Y - self.Y_mean) / self.Y_std
+
+ # determine hyperparameters
+ self.hypopt, self.invKopt = self.determine_hyperparameters(noise)
+ self.meanfcn, self.varfcn = self.GP_predictor(kernel=kernel)
+ #############################
+
+ # --- Covariance Matrix --- #
+ #############################
+
+ def Cov_mat(self, kernel, X_norm, W, sf2):
+ '''
+ Calculates the covariance matrix of a dataset Xnorm
+ --- decription ---
+ '''
+ dist = cdist(X_norm, X_norm, 'seuclidean', V=W) ** 2
+ r = np.sqrt(dist)
+ if kernel == 'RBF':
+ cov_matrix = sf2 * np.exp(-0.5 * dist)
+ return cov_matrix
+ # Note: cdist => sqrt(sum(u_i-v_i)^2/V[x_i])
+ elif kernel == 'Matern32':
+ cov_matrix = sf2 * (1 + 3**0.5 * r)*np.exp(-r*3**0.5)
+ return cov_matrix
+ elif kernel == 'Matern52':
+ cov_matrix = sf2 * (1 + 5**0.5 * r + 5/3 * r**2) * np.exp(-r*5**0.5)
+ return cov_matrix
+ else:
+ print('ERROR no kernel with name ', kernel)
+
+ ################################
+ # --- Covariance of sample --- #
+ ################################
+
+ def calc_cov_sample(self, xnorm, Xnorm, ell, sf2):
+ '''
+ Calculates the covariance of a single sample xnorm against the dataset Xnorm
+ --- decription ---
+ '''
+ # internal parameters
+ nx_dim = self.nx_dim
+ kernel = self.kernel
+ if kernel == 'RBF':
+ dist = cdist(Xnorm, xnorm.reshape(1,nx_dim), 'seuclidean', V=ell)**2
+ cov_matrix = sf2 * np.exp(-.5*dist)
+ elif kernel =='Matern32':
+ dist = cdist(Xnorm, xnorm.reshape(1,nx_dim), 'seuclidean', V=ell)
+ t_1 = 1 + 3**0.5*dist
+ t_2 = np.exp(-3**0.5*dist)
+ cov_matrix = sf2 * t_1 * t_2
+ elif kernel == 'Matern52':
+ dist = cdist(Xnorm, xnorm.reshape(1,nx_dim), 'seuclidean', V=ell)
+ t_1 = 1 + 5**0.5* dist + 5/3*dist**2
+ t_2 = np.exp(-5**2*dist)
+ cov_matrix = sf2 * t_1 * t_2
+ else:
+ print('ERROR no kernel with name ', kernel)
+ cov_matrix = 0.
+
+
+ return cov_matrix
+
+ ###################################
+
+ # --- negative log likelihood --- #
+ ###################################
+
+ def negative_loglikelihood(self, hyper, X, Y):
+ '''
+ --- decription ---
+ '''
+ # internal parameters
+ n_point, nx_dim = self.n_point, self.nx_dim
+ kernel = self.kernel
+
+ W = np.exp(2 * hyper[:nx_dim]) # W <=> 1/lambda
+ sf2 = np.exp(2 * hyper[nx_dim]) # variance of the signal
+ sn2 = np.exp(2 * hyper[nx_dim + 1]) # variance of noise
+
+ K = self.Cov_mat(kernel, X, W, sf2) # (nxn) covariance matrix (noise free)
+ K = K + (sn2 + 1e-8) * np.eye(n_point) # (nxn) covariance matrix
+ K = (K + K.T) * 0.5 # ensure K is simetric
+ L = np.linalg.cholesky(K) # do a cholesky decomposition
+ logdetK = 2 * np.sum(
+ np.log(np.diag(L))) # calculate the log of the determinant of K the 2* is due to the fact that L^2 = K
+ invLY = np.linalg.solve(L, Y) # obtain L^{-1}*Y
+ alpha = np.linalg.solve(L.T, invLY) # obtain (L.T L)^{-1}*Y = K^{-1}*Y
+ NLL = np.dot(Y.T, alpha) + logdetK # construct the NLL
+
+ return NLL
+
+ ############################################################
+ # --- Minimizing the NLL (hyperparameter optimization) --- #
+ ############################################################
+
+ def determine_hyperparameters(self, noise=None):
+ '''
+ --- decription ---
+ Notice we construct one GP for each output
+ '''
+ # internal parameters
+ X_norm, Y_norm = self.X_norm, self.Y_norm
+ nx_dim, n_point = self.nx_dim, self.n_point
+ kernel, ny_dim = self.kernel, self.ny_dim
+ Cov_mat = self.Cov_mat
+
+ lb = np.array([-5.] * (nx_dim + 1) + [-10.]) # lb on parameters (this is inside the exponential)
+ ub = np.array([8.] * (nx_dim + 1) + [-4.]) # lb on parameters (this is inside the exponential)
+
+
+
+ bounds = np.hstack((lb.reshape(nx_dim + 2, 1),
+ ub.reshape(nx_dim + 2, 1)))
+ multi_start = self.multi_hyper # multistart on hyperparameter optimization
+ multi_startvec = sobol_seq.i4_sobol_generate(nx_dim + 2, multi_start)
+
+ options = {'disp': False, 'maxiter': 10000} # solver options
+ hypopt = np.zeros((nx_dim + 2, ny_dim)) # hyperparams w's + sf2+ sn2 (one for each GP i.e. output var)
+ localsol = [0.] * multi_start # values for multistart
+ localval = np.zeros((multi_start)) # variables for multistart
+
+ invKopt = []
+ # --- loop over outputs (GPs) --- #
+ for i in range(ny_dim):
+ if not(np.array([noise]).reshape((-1,))[i]==None):
+ noise = np.array([noise]).reshape((-1,))
+ lb[-1] = np.log(noise[i]/self.Y_std[i]**2)*0.5-0.001
+ ub[-1] = np.log(noise[i]/self.Y_std[i]**2)*0.5+0.001
+ bounds = np.hstack((lb.reshape(nx_dim + 2, 1),
+ ub.reshape(nx_dim + 2, 1)))
+ # --- multistart loop --- #
+ for j in range(multi_start):
+ # print('multi_start hyper parameter optimization iteration = ',j,' input = ',i)
+ hyp_init = lb + (ub - lb) * multi_startvec[j, :]
+ # --- hyper-parameter optimization --- #
+ res = minimize(self.negative_loglikelihood, hyp_init, args=(X_norm, Y_norm[:, i]) \
+ , method='SLSQP', options=options, bounds=bounds, tol=1e-12)
+ localsol[j] = res.x
+ localval[j] = res.fun
+
+ # --- choosing best solution --- #
+ minindex = np.argmin(localval)
+ hypopt[:, i] = localsol[minindex]
+ ellopt = np.exp(2. * hypopt[:nx_dim, i])
+ sf2opt = np.exp(2. * hypopt[nx_dim, i])
+ sn2opt = np.exp(2. * hypopt[nx_dim + 1, i]) #+ 1e-8
+
+ # --- constructing optimal K --- #
+ Kopt = Cov_mat(kernel, X_norm, ellopt, sf2opt) + sn2opt * np.eye(n_point)
+ # --- inverting K --- #
+ invKopt += [np.linalg.solve(Kopt, np.eye(n_point))]
+
+ return hypopt, invKopt
+
+ ########################
+ # --- GP inference --- #
+ ########################
+ def GP_predictor(self, kernel='RBF'):# , X,Y):
+ nd, invKopt, hypopt = self.ny_dim, self.invKopt, self.hypopt
+ Ynorm, Xnorm = SX(DM(self.Y_norm)), SX(DM(self.X_norm))
+ ndat = Xnorm.shape[0]
+ nX, covSEfcn = self.nx_dim, self.covSEard(kernel=kernel)
+ stdX, stdY, meanX, meanY = SX(self.X_std), SX(self.Y_std), SX(self.X_mean), SX(self.Y_mean)
+ # nk = 12
+ x = SX.sym('x', nX)
+ # nk = X.shape[0]
+ xnorm = (x - meanX) / stdX
+ # Xnorm2 = (X - meanX)/stdX
+ # Ynorm2 = (Y - meanY)/stdY
+
+ k = SX.zeros(ndat)
+ # k2 = SX.zeros(ndat+nk)
+ mean = SX.zeros(nd)
+ mean2 = SX.zeros(nd)
+ var = SX.zeros(nd)
+ # Xnorm2 = SX.sym('Xnorm2',ndat+nk,nX)
+ # invKY2 = SX.sym('invKY2',ndat+nk,nd)
+
+ for i in range(nd):
+ invK = SX(DM(invKopt[i]))
+ hyper = SX(DM(hypopt[:, i]))
+ ellopt, sf2opt = exp(2 * hyper[:nX]), exp(2 * hyper[nX])
+ for j in range(ndat):
+ k[j] = covSEfcn(xnorm, Xnorm[j, :], ellopt, sf2opt)
+ # for j in range(ndat+nk):
+ # k2[j] = covSEfcn(xnorm,Xnorm2[j,:],ellopt,sf2opt)
+
+ invKYnorm = mtimes(invK, Ynorm[:, i])
+ mean[i] = mtimes(k.T, invKYnorm)
+ # mean2[i] = mtimes(k2.T,invKY2[:,i])
+ var[i] = sf2opt - mtimes(mtimes(k.T, invK), k)
+
+ meanfcn = Function('meanfcn', [x], [mean * stdY + meanY])
+ # meanfcn2 = Function('meanfcn2',[x,Xnorm2,invKY2],[mean2*stdY + meanY])
+ varfcn = Function('varfcn', [x], [var * stdY ** 2])
+ # varfcnsd = Function('varfcnsd',[x],[var])
+
+ return meanfcn, varfcn # , meanfcn2, varfcnsd
+
+ def covSEard(self, kernel):
+ nx_dim = self.nx_dim
+ ell = SX.sym('ell', nx_dim)
+ sf2 = SX.sym('sf2')
+ x, z = SX.sym('x', nx_dim), SX.sym('z', nx_dim)
+ if kernel=='RBF':
+ dist = sum1((x - z)**2 / ell)
+ covSEfcn = Function('covSEfcn',[x,z,ell,sf2],[sf2*exp(-.5*dist)])
+ elif kernel =='Matern32':
+ dist = sqrt(sum1((x - z)**2 / ell))
+ t_1 = 1 + 3**0.5*dist
+ t_2 = np.exp(-3**0.5*dist)
+ cov_matrix = sf2 * t_1 * t_2
+ covSEfcn = Function('covSEfcn',[x,z,ell,sf2],[cov_matrix])
+ elif kernel == 'Matern52':
+ dist = sqrt(sum1((x - z)**2 / ell))
+ t_1 = 1 + 5**0.5* dist + 5/3*dist**2
+ t_2 = np.exp(-5**2*dist)
+ cov_matrix = sf2 * t_1 * t_2
+ covSEfcn = Function('covSEfcn',[x,z,ell,sf2],[cov_matrix])
+ else:
+ print('ERROR no kernel with name ', kernel)
+ covSEfcn = Function('covSEfcn',[x,z,ell,sf2],[0.])
+ return covSEfcn
+
+
+ def GP_inference_np(self, x):
+ '''
+ --- decription ---
+ '''
+
+ nx_dim = self.nx_dim
+ kernel, ny_dim = self.kernel, self.ny_dim
+ hypopt, Cov_mat = self.hypopt, self.Cov_mat
+ stdX, stdY, meanX, meanY = self.X_std, self.Y_std, self.X_mean, self.Y_mean
+ calc_cov_sample = self.calc_cov_sample
+ invKsample = self.invKopt
+ Xsample, Ysample = self.X_norm, self.Y_norm
+ var_out = self.var_out
+ if self.GP_casadi:
+ if var_out:
+ return self.meanfcn(x).toarray().flatten()[0], self.varfcn(x).toarray().flatten()[0]
+ else:
+ return self.meanfcn(x).toarray().flatten()[0]#.flatten()[0]
+ else:
+ # Sigma_w = self.Sigma_w (if input noise)
+
+ xnorm = (x - meanX) / stdX
+ mean = np.zeros(ny_dim)
+ var = np.zeros(ny_dim)
+ # --- Loop over each output (GP) --- #
+ for i in range(ny_dim):
+ invK = invKsample[i]
+ hyper = hypopt[:, i]
+ ellopt, sf2opt = np.exp(2 * hyper[:nx_dim]), np.exp(2 * hyper[nx_dim])
+
+ # --- determine covariance of each output --- #
+ k = calc_cov_sample(xnorm, Xsample, ellopt, sf2opt)
+ mean[i] = np.matmul(np.matmul(k.T, invK), Ysample[:, i])
+ var[i] = max(0., sf2opt - np.matmul(np.matmul(k.T, invK), k)) # numerical error
+ # var[i] = sf2opt + Sigma_w[i,i]/stdY[i]**2 - np.matmul(np.matmul(k.T,invK),k) (if input noise)
+
+ # --- compute un-normalized mean --- #
+ mean_sample = mean * stdY + meanY
+ var_sample = var * stdY ** 2
+
+ if var_out:
+ return mean_sample, var_sample
+ else:
+ return mean_sample.flatten()[0]
+
+######################################
+# Central finite differences 5 points
+######################################
+
+def central_finite_diff5(f, x):
+ Delta = np.sqrt(np.finfo(float).eps) #step-size is taken as the square root of the machine precision
+ n = np.shape(x)[0]
+ x = x.reshape((n,1))
+ dX = np.zeros((n,1))
+ for j in range(n):
+ x_d_f2 = np.copy(x); x_d_f1 = np.copy(x)
+ x_d_b2 = np.copy(x); x_d_b1 = np.copy(x)
+
+ x_d_f2[j] = x_d_f2[j] + 2*Delta; x_d_b2[j] = x_d_b2[j] - 2*Delta
+ x_d_f1[j] = x_d_f1[j] + 1*Delta; x_d_b1[j] = x_d_b1[j] - 1*Delta
+
+ dX[j] = (f(x_d_b2.flatten()) - 8*f(x_d_b1.flatten()) + 8*f(x_d_f1.flatten()) - f(x_d_f2.flatten()))/(12*Delta)
+
+ return dX
+
+#########################################
+# Central second order finite differences
+#########################################
+
+def Second_diff_fxx(f, x):
+ '''
+ Calculating the Hessian via finite differences: https://en.wikipedia.org/wiki/Finite_difference
+ '''
+ Delta = 1e2*np.sqrt(np.finfo(float).eps) #step-size is taken as function of machine precision
+ n = np.shape(x)[0]
+ x = x.reshape((n,1))
+ Hxx = np.zeros((n,n))
+ for j in range(n):
+ # compute Fxx (diagonal elements)
+ x_d_f = np.copy(x)
+ x_d_b = np.copy(x)
+ x_d_f[j] = x_d_f[j] + Delta
+ x_d_b[j] = x_d_b[j] - Delta
+ Hxx[j,j] = (f(x_d_f) -2*f(x) + f(x_d_b))/Delta**2
+
+ for i in range(j+1,n):
+ # compute Fxy (off-diagonal elements)
+ # Fxy
+ x_d_fxfy = np.copy(x_d_f)
+ x_d_fxfy[i] = x_d_fxfy[i] + Delta
+ x_d_fxby = np.copy(x_d_f)
+ x_d_fxby[i] = x_d_fxby[i] - Delta
+ x_d_bxfy = np.copy(x_d_b)
+ x_d_bxfy[i] = x_d_bxfy[i] + Delta
+ x_d_bxby = np.copy(x_d_b)
+ x_d_bxby[i] = x_d_bxby[i] - Delta
+ Hxx[j,i] = (f(x_d_fxfy) - f(x_d_fxby) -
+ f(x_d_bxfy) + f(x_d_bxby))/(4*Delta**2)
+ Hxx[i,j] = Hxx[j,i]
+
+ return Hxx
+
+
+class ITR_GP_RTO:
+
+ ###################################
+ # --- initializing ITR_GP_RTO --- #
+ ###################################
+
+ def __init__(self, obj_model, obj_system, cons_model, cons_system, x0,
+ Delta0, Delta_max, eta0, eta1, gamma_red, gamma_incr,
+ n_iter, data, bounds, obj_setting=2, noise=None, multi_opt=5, multi_hyper=10, TR_scaling=True,
+ TR_curvature=True, store_data=True, inner_TR=False, scale_inputs=False,GP_casadi=True,
+ model=None,kernel='Matern32'):
+ '''
+ data = ['int', bound_list=[[0,10],[-5,5],[3,8]], samples_number] <=> d = ['int', np.array([[-12, 8]]), 3]
+ data = ['data0', Xtrain]
+
+ Note 1: remember the data collected
+
+ '''
+ # internal variable definitions
+ self.model = model
+ self.GP_casadi = GP_casadi
+ self.obj, self.noise = obj_setting, noise
+ self.obj_model, self.n_iter = obj_model, n_iter
+ self.multi_opt, self.multi_hyper = multi_opt, multi_hyper
+ self.store_data, self.data, self.bounds = store_data, data, bounds
+ self.x0, self.obj_system, self.cons_model = x0, obj_system, cons_model
+ self.cons_system, self.TR_curvature = cons_system, TR_curvature
+ self.TR_scaling = TR_scaling
+ # TR adjustment variables
+ self.Delta_max, self.eta0, self.eta1 = Delta_max, eta0, eta1
+ self.gamma_red, self.gamma_incr = gamma_red, gamma_incr
+ self.inner_TR = inner_TR
+ self.scale_inputs = scale_inputs
+ self.kernel=kernel
+ # other definitions
+ if scale_inputs:
+ self.TRmat = np.linalg.inv(np.diag(bounds[:, 1] - bounds[:, 0]))
+ else:
+ self.TRmat = np.eye(len(x0))
+
+ self.ng = len(cons_model)
+ if noise ==None:
+ self.noise = [None]*self.ng
+ self.Delta0 = Delta0
+ # data creating
+ self.Xtrain, self.ytrain = self.data_handling()
+ self.ndim, self.ndat = self.Xtrain.shape[1], self.Xtrain.shape[0]
+ # alerts
+ if TR_curvature == True:
+ TR_scaling = True
+ print('note: remember constraints are set as positive, so they should be set as -g(x)')
+
+ #########################
+ # --- training data --- #
+ #########################
+
+ def data_handling(self):
+ '''
+ --- description ---
+ '''
+ data = self.data
+
+ # Training data
+ if data[0] == 'int':
+ print('- No preliminar data supplied, computing data by sobol sequence')
+ Xtrain = np.array([])
+ Xtrain, ytrain = self.compute_data(data, Xtrain)
+ return Xtrain, ytrain
+
+ elif data[0] == 'data0':
+ print('- preliminar data supplied, computing objective and constraint values')
+ Xtrain = data[1]
+ Xtrain, ytrain = self.compute_data(data, Xtrain)
+ return Xtrain, ytrain
+
+ else:
+ print('- error, data ragument ', data, ' is of wrong type; can be int or ')
+ return None
+
+ ##########################
+
+ # --- computing data --- #
+ ##########################
+
+ def compute_data(self, data, Xtrain):
+ '''
+ --- description ---
+ '''
+ # internal variable calls
+ obj_model, cons_model = self.obj_model, self.cons_model
+ data[1], cons_system = np.array(data[1]), self.cons_system
+ ng, obj_system = self.ng, self.obj_system
+
+ if Xtrain.shape == (0,): # no data suplied
+ # data arrays
+ ndim = data[1].shape[0]
+ x_max, x_min = data[1][:, 1], data[1][:, 0]
+ ndata = data[2]
+ Xtrain = np.zeros((ndata, ndim))
+ ytrain = np.zeros((ng + 1, ndata))
+ funcs_model = [obj_model] + cons_model
+ funcs_system = [obj_system] + cons_system
+
+ for ii in range(ng + 1):
+ # computing data
+ fx = np.zeros(ndata)
+ xsmpl = sobol_seq.i4_sobol_generate(ndim, ndata) # xsmpl.shape = (ndat,ndim)
+
+ # computing Xtrain
+ for i in range(ndata):
+ xdat = x_min + xsmpl[i, :] * (x_max - x_min)
+ Xtrain[i, :] = xdat
+ for i in range(ndata):
+ fx[i] = funcs_system[ii](np.array(Xtrain[i, :])) - funcs_model[ii](np.array(Xtrain[i, :]))
+ # not meant for multi-output
+ ytrain[ii, :] = fx
+
+ else: # data suplied
+ # data arrays
+ ndim = Xtrain.shape[1]
+ ndata = Xtrain.shape[0]
+ ytrain = np.zeros((ng + 1, ndata))
+ funcs_model = [obj_model] + cons_model
+ funcs_system = [obj_system] + cons_system
+
+ for ii in range(ng + 1):
+ fx = np.zeros(ndata)
+
+ for i in range(ndata):
+ fx[i] = funcs_system[ii](np.array(Xtrain[i, :])) - funcs_model[ii](np.array(Xtrain[i, :]))
+ # not meant for multi-output
+ ytrain[ii, :] = fx
+
+ Xtrain = np.array(Xtrain)
+ ytrain = np.array(ytrain)
+
+ return Xtrain.reshape(ndata, ndim, order='F'), ytrain.reshape(ng + 1, ndata, order='F')
+
+ ##############################
+ # --- GP as obj function --- #
+ ##############################
+
+ def GP_obj_f(self, d, GP, xk):
+ '''
+ define exploration - explotation strategy
+ '''
+ obj = self.obj # setting on what the optimizer will do
+ obj_model = self.obj_model
+
+ # internal variable calls
+ if obj == 1:
+ obj_f = GP.GP_inference_np(xk + d)
+ return obj_model((xk + d).flatten()) + obj_f[0]
+
+ elif obj == 2:
+ obj_f = GP.GP_inference_np(xk + d)
+ return obj_model((xk + d).flatten()) + obj_f[0] - 3 * np.sqrt(obj_f[1])
+ elif obj == 3:
+ fs = self.obj_max
+ obj_f = GP.GP_inference_np(xk + d)
+ mean = obj_model((xk + d).flatten()) + obj_f[0]
+ Delta = fs - mean
+ sigma = np.sqrt(obj_f[1])
+ # Delta_p = np.max(mean(X) - fs)
+ if sigma == 0.:
+ Z = 0.
+ else:
+ Z = (Delta) / sigma
+ EI = -(sigma * norm.pdf(Z) + Delta * norm.cdf(Z))
+ return EI # -(GP.GP_inference_np(X)[0][0] + 3 * GP.GP_inference_np(X)[1][0])#
+
+ else:
+ print('error, objective for GP not specified')
+
+ ####################################
+ # --- System constraints check --- #
+ ####################################
+
+ def System_Cons_check(self, x):
+ '''
+ This checks if all the constraints of the system are satisfied
+ '''
+ # internal calls
+ cons_system = self.cons_system
+
+ cons = []
+ for con_i in range(len(cons_system)):
+ cons.append(cons_system[con_i](x))
+ cons = np.array(cons)
+ satisfact = cons > -1e-8
+ satisfact = satisfact.all()
+
+ return satisfact
+
+ ###################################
+ # --- Trust Region Constraint --- #
+ ###################################
+
+ def TR_con(self, d):
+ '''
+ TR constraint
+ '''
+ if self.TR_scaling:
+ d = d.flatten()
+ d = d / self.Broyd_norm.flatten()
+
+ return self.Delta0 ** 2 - d @ d.T
+
+ #############################################
+ # --- Curvature Trust Region Constraint --- #
+ #############################################
+
+ def Curvature_TR_con(self, H_norm_vec, d):
+ '''
+ TR constraint
+ '''
+ return self.Delta0 ** 2 - d * H_norm_vec @ d.T
+
+ ##############################
+ # --- Nearest PSD matrix --- #
+ ##############################
+
+ def Nearest_PSDM(self, A_mat):
+ '''
+ Computes the nearest Symmetric PSD matrix for the Frobenius norm.
+ Since Matrix is Symmetric, we could even pass eigenvalues and eigenvecs
+ '''
+ eps = 1e-4 # notice that there is no minimum, just an infimum. Positive definite matrices are not a closed set.
+ B_mat = 0.5 * (A_mat + A_mat.T) # not necessary if matrix is already symmetric
+ eigVals, Q = np.linalg.eig(B_mat) # eigendecomposition
+ eigVals[np.argwhere(eigVals < 0)] = eps # take eigVals+ = max(eigVals,0)
+ L_mat = np.diag(eigVals)
+ A_PSD = Q @ L_mat @ Q.T
+
+ return A_PSD
+
+ ###################################
+ # --- Mismatch Constraint --- #
+ ##################################
+
+ def mistmatch_con(self, xk, GP_con_i, cons_model_i, d):
+ '''
+ mistmatch constraint
+ '''
+ return cons_model_i((xk + d).flatten()) + GP_con_i.GP_inference_np((xk + d).flatten())
+
+ ##########################
+ # --- Inner TR shape --- #
+ ##########################
+
+ def Inner_TR_shape(self, xk, GP_obj, GP_con):
+ '''
+ THIS IS WRONG, WE SHOULD ADD A THE MODEL INTO THE computation of this
+ Shaping the inner TR constraint
+ '''
+ # inner function calls
+ ndim, n_outputs, xk = self.ndim, self.n_outputs, xk.flatten()
+ # creting empty matrix
+ Q_mat = np.zeros((n_outputs, ndim))
+ # obtaining dy/dx vector
+ GP_obj.var_out = False
+ Q_mat[0, :] = central_finite_diff5(GP_obj.GP_inference_np, xk).flatten()
+ Q_mat[0, :] = Q_mat[0, :] / np.sqrt(np.exp(2 * GP_obj.hypopt[ndim + 1]) * GP_obj.Y_std)
+ for i_c in range(n_outputs - 1):
+ Q_mat[i_c + 1, :] = central_finite_diff5(GP_con[i_c].GP_inference_np, xk).flatten()
+ Q_mat[i_c + 1, :] = Q_mat[i_c + 1, :] / np.sqrt(
+ np.exp(2 * GP_con[i_c].hypopt[ndim + 1]) * GP_con[i_c].Y_std)
+
+ # V_mat = np.linalg.inv(Q_mat.T@Q_mat)
+ Q_mat = Q_mat.T @ Q_mat
+ GP_obj.var_out = True
+
+ ###################################
+ # --- Inner Region Constraint --- #
+ ###################################
+
+ def Inner_TR_con(self, inner_TR_rad, d):
+ '''
+ TR inner constraint
+ '''
+ print('======= this is not implemented =======')
+ return d @ d.T - inner_TR_rad ** 2
+
+ ##########################
+ # --- Broyden Update --- #
+ ##########################
+
+ def Broyden_update(self, y_new, y_past, x_new, x_past):
+ '''
+ Updating the gradient by a Broyden update
+ '''
+ Broyd = self.Broyd
+ x_vec = (x_new - x_past).T
+ y_vec = (y_new - y_past).T
+ Broyd = Broyd + (y_vec - Broyd.T @ x_vec) / (x_vec.T @ x_vec) * x_vec
+
+ return Broyd
+
+ #######################
+ # --- TR function --- #
+ #######################
+
+ def Adjust_TR(self, Delta0, xk, xnew, GP_obj, i_rto):
+ '''
+ Adjusts the TR depending on the rho ratio between xk and xnew
+ '''
+ Delta_max, eta0, eta1 = self.Delta_max, self.eta0, self.eta1
+ gamma_red, gamma_incr = self.gamma_red, self.gamma_incr
+ obj_system = self.obj_system
+
+ # --- compute rho --- #
+ plant_i = obj_system(np.array(xk).flatten())
+ plant_iplus = obj_system(np.array(xnew).flatten())
+ rho = (plant_i - plant_iplus) / (
+ (self.obj_model((xk).flatten()) + GP_obj.GP_inference_np(np.array(xk).flatten())[0]) -
+ (self.obj_model((xnew).flatten()) + GP_obj.GP_inference_np(np.array(xnew).flatten())[0]))
+
+ # --- Update TR --- #
+ # if plant_iplus < plant_i:
+ # if rho >= eta0:
+ # if rho >= eta1:
+ # Delta0 = min(Delta0 * gamma_incr, Delta_max) # here we should add gradient !!
+ # elif rho < eta1:
+ # Delta0 = Delta0 * gamma_red
+ # # Note: xk = xnew this is done later in the code
+ # if rho < eta0:
+ # # xnew = xk
+ # # self.backtrack_l[i_rto] = True
+ # # print('rho= eta0:
+ if rho >= eta1:
+ Delta0 = min(Delta0 * gamma_incr, Delta_max) # here we should add gradient !!
+ elif rho < eta1:
+ Delta0 = Delta0 * gamma_red
+ # Note: xk = xnew this is done later in the code
+ if rho < eta0:
+ xnew = xk
+ self.backtrack_l[i_rto] = True
+ print('rho S=T’T
+
+ unif_ell = T.H * unif_sph # Hypersphere to hyperellipsoid mapping
+
+ # Translation and scaling about the center
+ z_fa = (unif_ell * np.sqrt(Gamma_Threshold) + (z_hat * np.ones((1, m_FA))))
+ for i in range(200):
+ if self.TR_con(z_fa[:, i].reshape((1, -1))) >= 0:
+ d_init = z_fa[:, i]
+ break
+
+ return np.array(d_init).flatten()
+
+ ####################
+ # --- inner TR --- #
+ ####################
+
+ def Inner_TR_f(self, GP_con, GP_obj):
+ '''
+ This function samples randomly withing an ellipse self.Delta0
+ '''
+ ndim = self.ndim
+ sn2_l = []
+
+ # compute paraneter for GP_obj
+ sn2_l.append(np.exp(2 * GP_obj.hypopt[ndim + 1]) * GP_obj.Y_std)
+
+ for i in range(len(GP_con)):
+ sn2_l.append(np.exp(2 * GP_con[i].hypopt[ndim + 1]) * GP_con[i].Y_std)
+
+ print('max(sn2_l) = ', max(sn2_l)[0])
+
+ return max(sn2_l)[0]
+
+ ########################################
+ # --- Constrain Violation and step --- #
+ ########################################
+
+ def Step_constraint(self, Delta0, xk, xnew, GP_obj, i_rto):
+ '''
+ Calls Adjust_TR which adjusts the trust region and decides on the step size
+ depending on constraint violation or the objective similarity
+ '''
+ Adjust_TR = self.Adjust_TR
+
+ if not self.System_Cons_check(np.array(xnew).flatten()):
+ xnew = xk
+ self.backtrack_l[i_rto] = True
+ print('Constraint violated -- backtracking')
+ Delta0 = Delta0 #* self.gamma_red
+
+ # if Delta0 < 0.1:
+ # Delta0 = 0.1
+
+ return Delta0 , xnew, xk
+
+ else:
+ Delta0, xnew, xk = Adjust_TR(Delta0, xk, xnew, GP_obj, i_rto)
+ # if Delta0 < 0.1:
+ # Delta0 = 0.1#Delta0
+ return Delta0, xnew, xk
+
+ ########################################
+ # --- Constraint GP construction --- #
+ ########################################
+
+ def GPs_construction(self, xk, Xtrain, ytrain, ndatplus, H_norm=0.):
+ '''
+ Constructs a GP for every cosntraint
+ '''
+ # internal calls
+ ndat, multi_hyper, ng, ndim = self.ndat + ndatplus, self.multi_hyper, self.ng, self.ndim
+ cons_model, mistmatch_con = self.cons_model, self.mistmatch_con
+ TR_curvature, Curvature_TR_con = self.TR_curvature, self.Curvature_TR_con
+
+ # --- objective function GP --- #
+ self.obj_max = np.max(ytrain[0, :].reshape(ndat, 1))
+ if self.noise[0] == None:
+ GP_obj = GP_model(Xtrain, ytrain[0, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=True)
+ else:
+ GP_obj = GP_model(Xtrain, ytrain[0, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=True, noise = self.noise[0])
+ GP_con = [0] * ng # Gaussian processes that output mistmatch (functools)
+ GP_con_2 = [0] * ng # Constraints for the NLP
+
+ if TR_curvature:
+ GP_con_curv = [0] * (ndim) # TR ellipsoid (functools)
+ GP_con_f = [0] * (ng + ndim) # Constraint plus ellipsoids
+ else:
+ GP_con_f = [0] * (ng + 1)
+
+ for igp in range(ng):
+ if self.noise[0] == None:
+ GP_con[igp] = GP_model(Xtrain, ytrain[igp + 1, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=False)
+ else:
+ GP_con[igp] = GP_model(Xtrain, ytrain[igp + 1, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=False, noise = self.noise[igp+1])
+
+ GP_con_2[igp] = functools.partial(mistmatch_con, xk, GP_con[igp],
+ cons_model[igp]) # partially evaluating a function
+ GP_con_f[igp] = {'type': 'ineq', 'fun': GP_con_2[igp]}
+ if TR_curvature:
+ for jdim in range(ndim):
+ GP_con_curv[jdim] = functools.partial(Curvature_TR_con, H_norm[:, jdim].reshape(1, ndim))
+ GP_con_f[ng + jdim] = {'type': 'ineq', 'fun': GP_con_curv[jdim]}
+ else:
+ GP_con_f[igp + 1] = {'type': 'ineq', 'fun': self.TR_con}
+ if self.inner_TR:
+ inner_TR_val = Inner_TR_f(GP_con, GP_obj)
+ inner_TR_ff = functools.partial(self.Inner_TR_con, self.inner_TR_val)
+ GP_con_f.append({'type': 'ineq', 'fun': inner_TR_ff})
+
+ return GP_obj, GP_con_f, GP_con
+
+ #############################################################
+ # --- Update derivatives and Hessian (Broyden and BFGS) --- #
+ #############################################################
+
+ def Update_derivatives(self, xk, xnew, H_inv, H_norm):
+ '''
+ H_norm is passed in case xk==xnew, then the old value of H_norm is passed back
+ '''
+ # internal function calls
+ TR_curvature, obj_system = self.TR_curvature, self.obj_system
+ Broyden_update, Nearest_PSDM = self.Broyden_update, self.Nearest_PSDM
+
+ # --- Broyden update --- #
+ if np.sum(np.abs(xk)) != np.sum(np.abs(xnew)):
+ y_iplus = obj_system(np.array(xnew[:]).flatten())
+ y_i = obj_system(np.array(xk[:]).flatten())
+ Broyd_past = np.copy(self.Broyd)
+ self.Broyd = Broyden_update(y_iplus, y_i, xnew, xk)
+ # --- BFGS for the inverse Hessian approx --- #
+ skk = (xnew - xk).T
+ ykk = self.Broyd - Broyd_past
+ rhokk = 1. / (ykk.T @ skk)
+ Imatrix = np.eye(skk.shape[0])
+ H_inv = (Imatrix - rhokk * skk @ ykk.T) @ H_inv @ (Imatrix - rhokk * ykk @ skk.T) + rhokk * skk @ skk.T
+ if not np.all(np.linalg.eigvals(H_inv) >= 0):
+ print('H_inv not PSD, making it PSD')
+ H_inv = Nearest_PSDM(H_inv)
+ # --- BFGS for the Hessian approx --- #
+ self.Broyd_norm = H_inv @ self.Broyd
+ Broyd_2norm = np.linalg.norm(self.Broyd_norm)
+ self.Broyd_norm = self.Broyd_norm / Broyd_2norm
+ if TR_curvature == True: # if ellipse TR WITH rotation
+ H_matrix = np.linalg.inv(H_inv)
+ H_det = np.linalg.det(H_matrix)
+ H_norm = H_matrix / H_det
+ # print('PSD H_norm? ',np.all(np.linalg.eigvals(H_norm) >= 0))
+ # print('PSD H_inv? ',np.all(np.linalg.eigvals(H_inv) >= 0))
+ else:
+ print('xk == xnew')
+
+ return H_norm, H_inv
+
+ ########################################
+ # --- Constraint GP construction for casadi--- #
+ ########################################
+
+ def GPs_construction_ca(self, xk, Xtrain, ytrain, ndatplus, H_norm=0.):
+ '''
+ Constructs a GP for every cosntraint
+ '''
+ # internal calls
+ ndat, multi_hyper, ng, ndim = self.ndat + ndatplus, self.multi_hyper, self.ng, self.ndim
+ cons_model, mistmatch_con = self.cons_model, self.mistmatch_con
+ TR_curvature, Curvature_TR_con = self.TR_curvature, self.Curvature_TR_con
+
+ # --- objective function GP --- #
+ self.obj_max = np.max(ytrain[0, :].reshape(ndat, 1))
+ if self.noise[0] == None:
+ GP_obj = GP_model(Xtrain, ytrain[0, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=True, GP_casadi=True)
+ else:
+ GP_obj = GP_model(Xtrain, ytrain[0, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=True, noise = self.noise[0], GP_casadi=True)
+ GP_con = [0] * ng # Gaussian processes that output mistmatch (functools)
+ GP_con_2 = [0] * ng # Constraints for the NLP
+
+ if TR_curvature:
+ GP_con_curv = [0] * (ndim) # TR ellipsoid (functools)
+ GP_con_f = [0] * (ng + ndim) # Constraint plus ellipsoids
+ else:
+ GP_con_f = [0] * (ng + 1)
+
+ for igp in range(ng):
+ if self.noise[0] == None:
+ GP_con[igp] = GP_model(Xtrain, ytrain[igp + 1, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=False, GP_casadi=True)
+ else:
+ GP_con[igp] = GP_model(Xtrain, ytrain[igp + 1, :].reshape(ndat, 1), self.kernel,
+ multi_hyper=multi_hyper, var_out=False,
+ noise = self.noise[igp+1], GP_casadi=True)
+
+ GP_con_2[igp] = functools.partial(mistmatch_con, xk, GP_con[igp],
+ cons_model[igp]) # partially evaluating a function
+ GP_con_f[igp] = {'type': 'ineq', 'fun': GP_con_2[igp]}
+ if TR_curvature:
+ for jdim in range(ndim):
+ GP_con_curv[jdim] = functools.partial(Curvature_TR_con, H_norm[:, jdim].reshape(1, ndim))
+ GP_con_f[ng + jdim] = {'type': 'ineq', 'fun': GP_con_curv[jdim]}
+ else:
+ GP_con_f[igp + 1] = {'type': 'ineq', 'fun': self.TR_con}
+ if self.inner_TR:
+ inner_TR_val = Inner_TR_f(GP_con, GP_obj)
+ inner_TR_ff = functools.partial(self.Inner_TR_con, self.inner_TR_val)
+ GP_con_f.append({'type': 'ineq', 'fun': inner_TR_ff})
+
+ return GP_obj, GP_con_f, GP_con
+
+ ######################################
+ # --- Real-Time Optimization alg --- #
+ ######################################
+
+ def construct_opt_ca(self, d_init, uk, GP_obj, GP_con, model):
+ u_init = d_init.reshape(uk.shape) + uk
+ x0 = model.x0#np.array([1., 150., 0.])
+ f = model.bio_model_ca()
+ x = MX.sym('x_0',3)
+ w = []
+ w0 = []
+ lbw = []
+ ubw = []
+
+ g = []
+ lbg = []
+ ubg = []
+ U = []
+ w += [x]
+
+ lbw.extend([0]*model.nd)
+ ubw.extend([1000]*model.nd)
+ w0.extend(x0)
+
+
+ g += [x - x0]
+ lbg.extend([-0.00001]*model.nd)
+ ubg.extend([0.00001]*model.nd)
+ X = []
+ gg = []
+ xx = []
+ yy = []
+ zz = []
+ for i in range(model.nk):
+ u = MX.sym('u_'+str(i), model.nu)
+ w += [u]
+ lbw.extend([0] * model.nu)
+ ubw.extend([1] * model.nu)
+ w0.extend(u_init[model.nu*i:model.nu*i+model.nu])
+ U += [u]
+ x1 = f(x,u)* (not(model.empty)).real
+ x = MX.sym('x_'+str(i+1),model.nd)
+ X += [x1]
+ g += [x1-x]
+ lbg.extend([-0.00001]*model.nd)
+ ubg.extend([0.00001]*model.nd)
+
+ w += [x]
+
+ lbw.extend([0]*model.nd)
+ ubw.extend([inf]*model.nd)
+ w0.extend(x0)
+ gg += [model.bio_con1_ca_f(x1)]
+ gg += [model.bio_con2_ca_f(x1)]
+ Sum_slack = 0
+ for i in range(model.nk*model.nu):
+
+ mean, var = GP_con[i].GP_predictor(kernel=self.kernel)
+ slack = MX.sym('s_'+str(i))
+ # w += [slack]
+ # w0.extend([0.])
+ # lbw.extend([0])
+ # ubw.extend([inf])
+ # Sum_slack += (slack**2)
+ g += [mean(vertcat(*U))+ gg[i]]# + slack]
+ xx+= [mean(vertcat(*U))]
+ lbg.extend([0.])
+ ubg.extend([inf])
+
+ g += [sum1((vertcat(*U)-uk.reshape(-1,1))**2)-self.Delta0 ** 2]
+ lbg.extend([-inf])
+ ubg.extend([0.])
+ mean_obj, var_obj = GP_obj.GP_predictor(kernel=self.kernel)
+ if self.obj == 1:
+ obj = model.bio_obj_ca_f(X[-1]) + mean_obj(vertcat(*U))
+ elif self.obj == 2:
+ obj = model.bio_obj_ca_f(X[-1]) + mean_obj(vertcat(*U)) \
+ - 3 * sqrt(var_obj(vertcat(*U))) # + 1e4*Sum_slack
+ elif self.obj == 3:
+ fs = self.obj_min
+ obj_f = mean_obj(vertcat(*U))
+ mean = model.bio_obj_ca_f(X[-1])+ obj_f
+ Delta = fs - mean - 0.01
+ sigma = sqrt(var_obj(vertcat(*U)))
+ # Delta_p = np.max(mean(X) - fs)
+ # if sigma == 0.:
+ # Z = 0.
+ # else:
+ Z = (Delta) / sigma
+ norm_pdf = exp(-Z**2/2)/sqrt(2*np.pi)
+ norm_cdf = 0.5 * (1+ erf(Z/sqrt(2)))
+ obj = -(sigma * norm_pdf + Delta * norm_cdf)
+ yy += [model.bio_obj_ca_f(X[-1])]
+ zz += [mean_obj(vertcat(*U))]
+ opts = {}
+ opts["expand"] = True
+ opts["ipopt.print_level"] = 0
+ opts["ipopt.max_iter"] = 1000
+ opts["ipopt.tol"] = 1e-8
+ opts["calc_lam_p"] = False
+ opts["calc_multipliers"] = False
+ opts["ipopt.print_timing_statistics"] = "no"
+ opts["print_time"] = False
+ problem = {'f': obj, 'x': vertcat(*w), 'g': vertcat(*g)}
+ trajectories = Function('trajectories', [vertcat(*w)]
+ , [horzcat(*U), horzcat(*X), horzcat(*gg), horzcat(*xx), horzcat(*yy), horzcat(*zz)],
+ ['w'], ['x', 'u' ,'gg', 'gpc', 'objm', 'gpobj'])
+
+ return problem, w0, lbw, ubw, lbg, ubg, trajectories, opts
+
+
+
+ def find_min_so_far(self,funcs_system, X_opt):
+ # ynew[0,ii] = funcs[ii](np.array(xnew[:]).flatten())
+ min = np.inf#-1.
+ for i in range(len(X_opt)):
+ y= funcs_system[0](np.array(X_opt[i]).flatten())
+ if y< min:
+ min =y
+ return min
+ ######################################
+ # --- Real-Time Optimization alg --- #
+ ######################################
+
+ def RTO_routine(self):
+ '''
+ --- description ---
+ '''
+ # internal variable calls
+ obj_model, cons_model = self.obj_model, self.cons_model
+ store_data, Xtrain, ytrain = self.store_data, self.Xtrain, self.ytrain
+ multi_hyper, n_iter = self.multi_hyper, self.n_iter
+ ndim, ndat, multi_opt = self.ndim, self.ndat, self.multi_opt
+ GP_obj_f, bounds, ng = self.GP_obj_f, self.bounds, self.ng
+ multi_opt, Delta0, x0 = self.multi_opt, self.Delta0, self.x0
+ obj_system, cons_system = self.obj_system, self.cons_system
+ Adjust_TR, Step_constraint = self.Adjust_TR, self.Step_constraint
+ TR_curvature, mistmatch_con = self.TR_curvature, self.mistmatch_con
+ TR_scaling, Broyden_update = self.TR_scaling, self.Broyden_update
+ Curvature_TR_con, Ball_sampling = self.Curvature_TR_con, self.Ball_sampling
+ inner_TR, Inner_TR_f, = self.inner_TR, self.Inner_TR_f
+ Inner_TR_con, GPs_construction = self.Inner_TR_con, self.GPs_construction
+ Update_derivatives = self.Update_derivatives
+ self.n_outputs, Inner_TR_shape = len(cons_model) + 1, self.Inner_TR_shape
+
+ # variable definitions
+ funcs_model = [obj_model] + cons_model
+ funcs_system = [obj_system] + cons_system
+ self.backtrack_l = [False] * n_iter # !!
+
+ # Initialize B~dF/dx and H~dF^2/dx^2 as gradient and derivatives from the model
+ if TR_scaling:
+ self.Broyd = central_finite_diff5(obj_model, x0)
+ H_matrix = Second_diff_fxx(obj_model, x0)
+ H_inv = np.linalg.inv(H_matrix)
+ step_dir = H_inv @ self.Broyd
+ step_dir_norm = np.linalg.norm(step_dir)
+ self.Broyd_norm = step_dir / step_dir_norm
+ if TR_curvature: # if ellipse TR WITH rotation
+ H_det = np.linalg.det(H_matrix)
+ H_norm = H_matrix / H_det
+
+ # --- building GP models from existing data --- #
+ # evaluating initial point
+ xnew = x0
+ Xtrain = np.vstack((Xtrain, xnew))
+ ynew = np.zeros((1, ng + 1))
+ for ii in range(ng + 1):
+ ynew[0, ii] = funcs_system[ii](np.array(xnew[:]).flatten()) - funcs_model[ii](np.array(xnew[:]).flatten())
+ ytrain = np.hstack((ytrain, ynew.T))
+ # --- building GP models for first time --- #
+ if TR_scaling and TR_curvature:
+ GP_obj, GP_con_f, GP_con = GPs_construction(xnew, Xtrain, ytrain, 1, H_norm)
+ else:
+ GP_obj, GP_con_f, GP_con = GPs_construction(xnew, Xtrain, ytrain, 1)
+
+ # renaming data
+ X_opt = np.copy(Xtrain)
+ y_opt = np.copy(ytrain)
+ self.obj_min = self.find_min_so_far(funcs_system, X_opt)
+
+ # --- TR lists --- #
+ TR_l = ['error'] * (n_iter + 1)
+ if TR_curvature:
+ TR_l_angle = ['error'] * (n_iter + 1)
+ w, v = np.linalg.eig(H_inv)
+ max_idx = np.argmax(w)
+ TR_l_angle[0] = np.arctan(v[max_idx][1] / v[max_idx][0]) * 180. / np.pi
+ TR_l[0] = (2. * self.Delta0 / np.sqrt(np.abs(np.diag(H_norm)))).tolist()
+ elif TR_scaling:
+ TR_l[0] = (2. * self.Delta0 * self.Broyd_norm).tolist()
+ else:
+ TR_l[0] = self.Delta0
+
+ # --- rto -- iterations --- #
+ options = {'disp': False, 'maxiter': 10000} # solver options
+ lb, ub = bounds[:, 0], bounds[:, 1]
+ for i_rto in range(n_iter):
+ print('============================ RTO iteration ', i_rto)
+
+ # --- optimization -- multistart --- #
+ localsol = [0.] * multi_opt # values for multistart
+ localval = np.zeros((multi_opt)) # variables for multistart
+ xk = xnew
+ TRb = (bounds - xk.reshape(ndim, 1, order='F'))
+ # TODO: sampling on ellipse, not only Ball
+ for j in range(multi_opt):
+ if TR_curvature or self.scale_inputs:
+ # d_init = Ellipse_sampling(ndim)
+ d_init = self.Ellipse_sampling(ndim)
+ else:
+ d_init = self.Ball_sampling(ndim) # random sampling in a ball
+
+ # GP optimization
+ # res = minimize(GP_obj_f, d_init, args=(GP_obj, xk), method='SLSQP',
+ # options=options, bounds=(TRb), constraints=GP_con_f, tol=1e-12)
+ problem, w0, lbw, ubw, lbg, ubg,trajectories, opts = self.construct_opt_ca(d_init, xk.reshape((-1,1)), GP_obj, GP_con, self.model)
+ solver = nlpsol('solver', 'ipopt', problem, opts)#, "ipopt.max_iter": 1e4}) # , {"ipopt.hessian_approximation":"limited-memory"})#, {"ipopt.tol": 1e-10, "ipopt.print_level": 0})#, {"ipopt.hessian_approximation":"limited-memory"})
+ # Function to get x and u trajectories from w
+
+ sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg)
+
+ u, _, _, _, _, _ = trajectories(sol['x'])
+
+ uu = np.array(u).reshape(self.ndim, 1, order='F')
+
+ res = uu.reshape((-1,))# - xk.reshape((-1,))
+ localsol[j] = res
+ if solver.stats()['return_status'] == 'Solve_Succeeded' or solver.stats()['return_status'] == 'Solved To Acceptable Level' :
+ localval[j] = sol['f']
+ else:
+ localval[j] = np.inf
+ if np.min(localval) == np.inf:
+ print('warming, no feasible solution found')
+
+ xnew = xk*(1+0.01*np.random.randn()) # selecting best solution
+ else:
+ # selecting best point
+ minindex = np.argmin(localval) # choosing best solution
+ xnew = localsol[minindex]# + xk # selecting best solution
+
+ # re-evaluate best point (could be done more efficiently - no re-evaluation)
+ xnew = np.array([xnew]).reshape(1, ndim)
+ ynew = np.zeros((1, ng + 1))
+ for ii in range(ng + 1):
+ # ynew[0,ii] = funcs[ii](np.array(xnew[:]).flatten())
+ ynew[0, ii] = funcs_system[ii](np.array(xnew[:]).flatten()) - funcs_model[ii](
+ np.array(xnew[:]).flatten())
+ print('New Objective: ', -funcs_system[0](np.array(xnew[:]).flatten()), 'Delta: ', self.Delta0)
+ # adding new point to sample
+ X_opt = np.vstack((X_opt, xnew))
+ y_opt = np.hstack((y_opt, ynew.T))
+ self.obj_min = self.find_min_so_far(funcs_system, X_opt)
+ # --- Update TR --- #
+ self.Delta0, xnew, xk = Step_constraint(self.Delta0, xk, xnew, GP_obj, i_rto) # adjust TR
+
+ # --- Update derivatives and Hessian (Broyden and BFGS) --- #
+ if TR_scaling:
+ H_norm, H_inv = Update_derivatives(xk, xnew, H_inv, H_norm)
+
+ # --- re-training GP --- #
+ if TR_scaling and TR_curvature:
+ GP_obj, GP_con_f, GP_con = GPs_construction(xnew, X_opt, y_opt, 2 + i_rto, H_norm)
+ else:
+ GP_obj, GP_con_f, GP_con = GPs_construction(xnew, X_opt, y_opt, 2 + i_rto)
+
+ # --- TR listing --- #
+ if not TR_scaling: # if normal TR
+ TR_l[i_rto + 1] = self.Delta0
+ if inner_TR:
+ print('ratio of inner to outer TR = ', self.Delta0 / (inner_TR_val + 1e-8))
+ else:
+ if TR_curvature == True: # if ellipse TR WITH rotation
+ TR_l[i_rto + 1] = (2. * self.Delta0 / np.sqrt(np.abs(np.diag(H_norm)))).tolist()
+ w, v = np.linalg.eig(H_matrix)
+ max_idx = np.argmax(w)
+ TR_l_angle[i_rto + 1] = np.arctan(v[max_idx][1] / v[max_idx][0]) * 180. / np.pi
+ else:
+ TR_l[i_rto + 1] = (2. * self.Delta0 * self.Broyd_norm).tolist()
+
+ # Inner_TR_shape(xk, GP_obj, GP_con)
+ # --- output data --- #
+ if not TR_curvature:
+ return X_opt, y_opt, TR_l, xnew, self.backtrack_l
+ else:
+ return X_opt, y_opt, TR_l, TR_l_angle, xnew, self.backtrack_l
\ No newline at end of file