I'm attempting to use PySCIPopt to solve a traditional Ax-b + constraints type of problem. I have many values of b, and I need to run the optimizer on each of them. How can I reuse the setup? Second question, what is the equivalent of norm
in PySCIPopt? Or what is the right way to drive Ax-b as close to zero as possible? See the ??? marks below
import numpy as np
from pyscipopt import Model, quicksum
def make_program():
A = ... load constant master matrix ...
model = Model('Match_to_Master')
x = []
y = []
for i in range(A.shape[1]):
x.append(model.addVar(vtype='C', lb=0.0, ub=4.0, name='x(%s)' % i))
y.append(model.addVar(vtype='B', name='y(%s)' % i))
model.addCons(x[i] <= y[i]*4)
for i in range(0, A.shape[1] - 20, 20):
model.addCons(quicksum(y[i:i+20]) <= 1)
#b = Parameter(A.shape[0], nonneg=True) ???
model.setObjective(norm(A*x - b), sense='minimize') ???
return b, x, model
def run_program(data, thresh=0.2):
b, x, model = make_program()
B = ... from data load matrix for analysis ...
c = 0
for column in B.T:
b.value = column ???
model.optimize() # reuse previous x values as starting point
x.value[x.value < thresh] = 0.0
for i in range(0, x.value.size - 20, 20):
sum = np.sum(x.value[i:i+20])
if sum > 0.2:
print(' hit at ', str(i//20), ' for column ', str(c))
c += 1