I have found the reply on this question very useful and is trying to replicate on a toy scheduling problem I constructed. The problem represents purify-store-use process flow with the following priorities:
- First, maximise consumption of pure product from storage
- Second, if pure product usage constraints are reached, store the product.
- Lastly, if maximum accumulation in store (defined by storage high limits) are reach, use the raw/unpurified material.
I have observed that in certain scenarios (initial conditions and objective function coefficients), the solution is one where during the last step the manipulated variable(s) are moved without regard for violation of storage limits that would have been observed at the next step outside the prediction horizon. I realise that fixing the last two points of the independent variables in an integrating process does not imply that dependent variable prediction will be inside limits for steps outside prediction horizon unless the derivate of the integrating independent variable for the last step is set to 0. That said, I still encountered an issue when I connect the last two nodes for m.mv_Pure_Use
. I am trading off inventory building against production via the following two statements:
- Line 88 :
m.mv_Pure_Use.COST=-10
and - Line 95 :
m.Minimize(store_acc_cost*m.Store_acc)
What is problematic for me is that:
- without the
Connections
made in line 65-66 the final value for the objective function is-8117
but with theConnections
the objective functions goes to-1.E+15
albeit still with a reasonable solution. I expected the same OOM objective function value irrespective of connections made. - When I change the value of
store_acc_cost
from-8
to -7 the solver reportsUnbounded solution
. At a value of-6
it reports a solution again.
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 11 09:40:06 2023
@author: strydohj
"""
from gekko import GEKKO
import numpy as np
import json
import pandas as pd
m=GEKKO(remote=False)
duration=10 #schedule duration
m.time=np.linspace(0,9,10) #10 day schedule
#set up vectors to instantiate Parameters with.
rundown_schedule=np.full(duration,100) #init an upstream rundown schedule, tons/h, but schedule in days
raw_process_max =np.full(duration,100) #init raw process max sched, tons/h but schedule in days
pure_use_max =np.full(duration,150) #init purification max sched, tons/h,but schedule in days
pure_use_max[3]=89
#model Constants
m.c_Yield=m.Const(value=0.90,name='yield')
#define model Parameters
m.p_Raw_Rundown =m.Param(value=rundown_schedule,name='rundown_schedule')
m.p_Raw_Process_Max =m.Param(value=raw_process_max,name='raw_process_max')
m.p_Pure_Use_Max =m.Param(value=pure_use_max,name='Purification Maximum')
#define MV and CV's
m.mv_Raw_Flare =m.MV(value=2,lb=0,ub=100,name='raw_flare') #tons/h
m.mv_Raw_Purify =m.MV(value=90,lb=0, ub=100,name='raw_purify') #tons/h
m.mv_Raw_Use =m.MV(value=8,lb=0, ub=150,name='raw_use') #tons/h
m.cv_Store =m.CV(value=100,name='store') #tons
m.mv_Pure_Use =m.MV(value=81,lb=0,ub=110,name='pure_use') #tons/h
m.Store_acc =m.SV(0,name='Store_acc')
#Intermediate calculations
m.Pure_Product=m.Intermediate(m.mv_Raw_Purify*m.c_Yield,name='Pure_Product')
#Setup Equations
m.Equation(m.mv_Raw_Purify+m.mv_Raw_Use+m.mv_Raw_Flare==m.p_Raw_Rundown)
m.Equation(m.cv_Store.dt()==(m.mv_Raw_Purify*m.c_Yield-m.mv_Pure_Use)*24)
m.Equation(m.mv_Raw_Purify<=m.p_Raw_Process_Max)
m.Equation(m.mv_Pure_Use<=m.p_Pure_Use_Max)
m.Equation(m.Store_acc==m.mv_Raw_Purify*m.c_Yield-m.mv_Pure_Use)
m.fix_final(m.Store_acc,0)
#--------SETTING UP GLOBAL OPTIONS
m.options.NODES=3 #collocation nodes
m.options.SOLVER=1 # 1=APOPT, 2=BPOPT, 3=IPOPT
m.options.CV_TYPE=1 #1 = linear from dead band trajectory
m.options.REQCTRLMODE=3 #3= CONTROL
m.options.IMODE=6 #dynamic control
m.options.MAX_ITER=50
m.options.MV_DCOST_SLOPE=1 #Increase MV Movement Cost over time
m.options.CV_WGT_SLOPE=1 #Increase CV Error Penalty over time
#Create prediction horizon - keep mv fixed for laste two time points.
#m.Connection(m.mv_Pure_Use,m.mv_Pure_Use,8,9) #connect end point nodes
#m.Connection(m.mv_Pure_Use,m.mv_Pure_Use,8,9,1,2) #connect internal nodes
mv_list=[m.mv_Pure_Use,m.mv_Raw_Purify,m.mv_Raw_Flare,m.mv_Raw_Use]
for mv in mv_list:
mv.STATUS=1 #Use this CV
mv.DCOST=0.01 #Movement cost
#CV Options
m.cv_Store.STATUS=1 #control this cv
m.cv_Store.TAU=1 #Time constant for trajectory
m.cv_Store.TR_INIT=0 #1=Recenter at cold start.
m.cv_Store.TR_OPEN=1 #Opening shape of trajectory
m.cv_Store.WSPLO=200 #penalty for exceeding limits
m.cv_Store.WSPHI=200
m.cv_Store.SPLO=20
m.cv_Store.SPHI=140
#set up specific COST variables
m.mv_Pure_Use.COST=-10 #First priority, up to constraints
m.mv_Raw_Purify.COST=0
m.cv_Store.COST=0
m.mv_Raw_Use.COST=0
m.mv_Raw_Flare.COST=0
store_acc_cost=-8
m.Minimize(store_acc_cost*m.Store_acc) #additional objective on accumulation
m.solve()
with open(m.path+'//results.json') as f:
results = json.load(f)
result_df=pd.DataFrame(results)
result_df[['store','store.tr_hi','store.tr_lo']].plot()
result_df[['raw_flare','raw_use','raw_purify','pure_use']].plot()
print('Total Accumulated:\t',result_df['store_acc'].sum())
print('Total Used:\t',result_df['pure_use'].sum())
#print(m.Store)