.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
the search function
def fit(array, convex=1):
"""Fit a smooth line to the given time-series data"""
N = len(array)
m = gurobipy.Model()
fv = m.addVars(N)
if convex == 1:
m.addConstrs(fv[i] <= fv[i-1] for i in range(1,N))
m.addConstrs(fv[i] + fv[i-2] >= 2*fv[i-1] for i in range(2,N))
else:
m.addConstrs(fv[i] >= fv[i-1] for i in range(1,N))
m.addConstrs(fv[i] + fv[i-2] <= 2*fv[i-1] for i in range(2,N))
m.setObjective(
gurobipy.quicksum([fv[i] * fv[i] for i in range(N)])
- 2 * gurobipy.LinExpr(array,fv.values())
m.Params.outputFlag = 0
m.optimize()
return [fv[i].X for i in range(N)]
Solve a well formulated lp problem
creates a cplex model, variables and constraints and attaches
them to the lp model which it then solves
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using cplex")
self.callSolver(lp)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints
log.debug("Resolve the Model using gurobi")
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS,
-constraint.constant)
lp.solverModel.update()
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using gurobi")
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
Example #5
Solve a well formulated lp problem
uses the old solver and modifies the rhs of the modified constraints
log.debug("Resolve the Model using gurobi")
for constraint in lp.constraints.values():
if constraint.modified:
constraint.solverConstraint.setAttr(gurobipy.GRB.Attr.RHS,
-constraint.constant)
lp.solverModel.update()
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
Example #6
Solve a well formulated lp problem
creates a glpk model, variables and constraints and attaches
them to the lp model which it then solves
self.buildSolverModel(lp)
#set the initial solution
log.debug("Solve the Model using glpk")
self.callSolver(lp, callback = callback)
#get the solution information
solutionStatus = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solutionStatus
Solve a well formulated lp problem
creates a gurobi model, variables and constraints and attaches
them to the lp model which it then solves
self.buildSolverModel(lp)
# set the initial solution
log.debug("Solve the Model using gurobi")
self.callSolver(lp, callback=callback)
# get the solution information
solution_status = self.findSolutionValues(lp)
for var in lp.variables():
var.modified = False
for constraint in lp.constraints.values():
constraint.modified = False
return solution_status
Example #8
if self.autoencoder is None:
self.autoencoder = get_autoencoder_model(input_shape=(28,28,1))
self.autoencoder.compile(optimizer=optimizers.Adam(lr=0.0003), loss='binary_crossentropy')
self.autoencoder.fit(X_train, X_train,
epochs=200,
batch_size=256,
shuffle=True,
verbose=2)
encoder = Model(self.autoencoder.input, self.autoencoder.get_layer('embedding').input)
self.embedding = encoder.predict(X_train.reshape((-1,28,28,1)), batch_size=1024)
# subsample from the unlabeled set:
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
unlabeled_idx = np.random.choice(unlabeled_idx, np.min([labeled_idx.shape[0]*10, unlabeled_idx.size]), replace=False)
# iteratively sub-sample using the discriminative sampling routine:
labeled_so_far = 0
sub_sample_size = int(amount / self.sub_batches)
while labeled_so_far < amount:
if labeled_so_far + sub_sample_size > amount:
sub_sample_size = amount - labeled_so_far
model = train_discriminative_model(self.embedding[labeled_idx], self.embedding[unlabeled_idx], self.embedding[0].shape, gpu=self.gpu)
predictions = model.predict(self.embedding[unlabeled_idx])
selected_indices = np.argpartition(predictions[:,1], -sub_sample_size)[-sub_sample_size:]
labeled_idx = np.hstack((labeled_idx, unlabeled_idx[selected_indices]))
labeled_so_far += sub_sample_size
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
unlabeled_idx = np.random.choice(unlabeled_idx, np.min([labeled_idx.shape[0]*10, unlabeled_idx.size]), replace=False)
# delete the model to free GPU memory:
del model
gc.collect()
return labeled_idx
def query(self, X_train, Y_train, labeled_idx, amount):
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
# use the learned representation for the k-greedy-center algorithm:
representation_model = Model(inputs=self.model.input, outputs=self.model.get_layer('softmax').input)
representation = representation_model.predict(X_train, verbose=0)
new_indices = self.greedy_k_center(representation[labeled_idx, :], representation[unlabeled_idx, :], amount)
return np.hstack((labeled_idx, unlabeled_idx[new_indices]))
Example #10
def query(self, X_train, Y_train, labeled_idx, amount):
# subsample from the unlabeled set:
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
unlabeled_idx = np.random.choice(unlabeled_idx, np.min([labeled_idx.shape[0]*10, unlabeled_idx.size]), replace=False)
embedding_model = Model(inputs=self.model.input,
outputs=self.model.get_layer('softmax').input)
representation = embedding_model.predict(X_train, batch_size=256).reshape((X_train.shape[0], -1, 1))
# iteratively sub-sample using the discriminative sampling routine:
labeled_so_far = 0
sub_sample_size = int(amount / self.sub_batches)
while labeled_so_far < amount:
if labeled_so_far + sub_sample_size > amount:
sub_sample_size = amount - labeled_so_far
model = train_discriminative_model(representation[labeled_idx], representation[unlabeled_idx], representation[0].shape, gpu=self.gpu)
predictions = model.predict(representation[unlabeled_idx])
predictions -= 1 # for numerical stability
predictions = np.exp(predictions / self.temperature)
predictions[:,1] /= np.sum(predictions[:,1])
selected_indices = np.random.choice(unlabeled_idx, sub_sample_size, replace=False, p=predictions[:,1])
labeled_idx = np.hstack((labeled_idx, selected_indices))
labeled_so_far += sub_sample_size
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
unlabeled_idx = np.random.choice(unlabeled_idx, np.min([labeled_idx.shape[0]*10, unlabeled_idx.size]), replace=False)
# delete the model to free GPU memory:
del model
gc.collect()
del embedding_model
return labeled_idx
def __init__(self, input_data, input_params):
self.input_data = input_data
self.input_params = input_params
self.model = grb.Model('prod_planning')
self._create_decision_variables()
self._create_main_constraints()
self._set_objective_function()
# ================== Decision variables ==================
Example #12
def query(self, X_train, Y_train, labeled_idx, amount):
# subsample from the unlabeled set:
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
unlabeled_idx = np.random.choice(unlabeled_idx, np.min([labeled_idx.shape[0]*10, unlabeled_idx.size]), replace=False)
embedding_model = Model(inputs=self.model.input,
outputs=self.model.get_layer('softmax').input)
representation = embedding_model.predict(X_train, batch_size=128).reshape((X_train.shape[0], -1, 1))
# iteratively sub-sample using the discriminative sampling routine:
labeled_so_far = 0
sub_sample_size = int(amount / self.sub_batches)
while labeled_so_far < amount:
if labeled_so_far + sub_sample_size > amount:
sub_sample_size = amount - labeled_so_far
model = train_discriminative_model(representation[labeled_idx], representation[unlabeled_idx], representation[0].shape, gpu=self.gpu)
predictions = model.predict(representation[unlabeled_idx])
selected_indices = np.argpartition(predictions[:,1], -sub_sample_size)[-sub_sample_size:]
labeled_idx = np.hstack((labeled_idx, unlabeled_idx[selected_indices]))
labeled_so_far += sub_sample_size
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
unlabeled_idx = np.random.choice(unlabeled_idx, np.min([labeled_idx.shape[0]*10, unlabeled_idx.size]), replace=False)
# delete the model to free GPU memory:
del model
gc.collect()
del embedding_model
gc.collect()
return labeled_idx
def solve(self, start=0, flag_rolling=0, **kwargs):
"""Call extensive solver to solve the discretized problem. It will first
construct the extensive model and then call Gurobi solver to solve it.
Parameters
----------
**kwargs: optional
Gurobipy attributes to specify on extensive model.
# extensive solver is able to solve MSLP with CTG or without CTG
self.MSP._check_individual_stage_models()
self.MSP._check_multistage_model()
construction_start_time = time.time()
self.extensive_model = gurobipy.Model()
self.extensive_model.modelsense = self.MSP.sense
self.start = start
for k, v in kwargs.items():
setattr(self.extensive_model.Params, k, v)
self._construct_extensive(flag_rolling)
construction_end_time = time.time()
self.construction_time = construction_end_time - construction_start_time
solving_start_time = time.time()
self.extensive_model.optimize()
solving_end_time = time.time()
self.solving_time = solving_end_time - solving_start_time
self.total_time = self.construction_time + self.solving_time
return self.extensive_model.objVal
Example #14
def __init__(self, name, sense):
self.gurobi_model = gurobipy.Model(name)
self.sense = sense
if sense == LpMaximize:
self.gurobi_model.setAttr("ModelSense", -1)
self.varables = {}
self.objective = None
self.status = None
Solves tsp problem.
:param distance_matrix: symmetric matrix of distances, where the i,j element is the distance between object i and j
:return: matrix containing {0, 1}, 1 for each transition that is included in the tsp solution
n = len(distance_matrix)
m = Model()
m.setParam("OutputFlag", False)
m.setParam("Threads", 1)
# Create variables
vars = {}
for i in range(n):
for j in range(i + 1):
vars[i, j] = m.addVar(
obj=0.0 if i == j else distance_matrix[i][j], vtype=GRB.BINARY, name="e" + str(i) + "_" + str(j)
vars[j, i] = vars[i, j]
m.update()
# Add degree-2 constraint, and forbid loops
for i in range(n):
m.addConstr(quicksum(vars[i, j] for j in range(n)) == 2)
vars[i, i].ub = 0
m.update()
# Optimize model
m._vars = vars
m.params.LazyConstraints = 1
def subtour_fn(model, where):
return subtourelim(n, model, where)
m.optimize(subtour_fn)
solution = m.getAttr("x", vars)
selected = [(i, j) for i in range(n) for j in range(n) if solution[i, j] > 0.5]
result = np.zeros_like(distance_matrix)
for (i, j) in selected:
result[i][j] = 1
return result
Example #16
def __init__(self, name=""):
self._model = gurobipy.Model(env=gurobipy.Env(), name=name)
# each and every instance must have state variables, local copy variables
self.states = []
self.local_copies = []
# (discretized) uncertainties
# stage-wise independent discrete uncertainties
self.uncertainty_rhs = {}
self.uncertainty_coef = {}
self.uncertainty_obj = {}
# indices of stage-dependent uncertainties
self.uncertainty_rhs_dependent = {}
self.uncertainty_coef_dependent = {}
self.uncertainty_obj_dependent = {}
# true uncertainties
# stage-wise independent true continuous uncertainties
self.uncertainty_rhs_continuous = {}
self.uncertainty_coef_continuous = {}
self.uncertainty_obj_continuous = {}
self.uncertainty_mix_continuous = {}
# stage-wise independent true discrete uncertainties
self.uncertainty_rhs_discrete = {}
self.uncertainty_coef_discrete = {}
self.uncertainty_obj_discrete = {}
# cutting planes approximation of recourse variable alpha
self.alpha = None
self.cuts = []
# linking constraints
self.link_constrs = []
# number of discrete uncertainties
self.n_samples = 1
# number of state varibles
self.n_states = 0
# probability measure for discrete uncertainties
self.probability = None
# type of true problem: continuous/discrete
self._type = None
# flag to indicate discretization of true problem
self._flag_discrete = 0
# collection of all specified dim indices of Markovian uncertainties
self.Markovian_dim_index = []
# risk measure
self.measure = Expectation
Example #17
def mip_model(self, representation, labeled_idx, budget, delta, outlier_count, greedy_indices=None):
import gurobipy as gurobi
model = gurobi.Model("Core Set Selection")
# set up the variables:
points = {}
outliers = {}
for i in range(representation.shape[0]):
if i in labeled_idx:
points[i] = model.addVar(ub=1.0, lb=1.0, vtype="B", name="points_{}".format(i))
else:
points[i] = model.addVar(vtype="B", name="points_{}".format(i))
for i in range(representation.shape[0]):
outliers[i] = model.addVar(vtype="B", name="outliers_{}".format(i))
outliers[i].start = 0
# initialize the solution to be the greedy solution:
if greedy_indices is not None:
for i in greedy_indices:
points[i].start = 1.0
# set the outlier budget:
model.addConstr(sum(outliers[i] for i in outliers) <= outlier_count, "budget")
# build the graph and set the constraints:
model.addConstr(sum(points[i] for i in range(representation.shape[0])) == budget, "budget")
neighbors = {}
graph = {}
print("Updating Neighborhoods In MIP Model...")
for i in range(0, representation.shape[0], 1000):
print("At Point " + str(i))
if i+1000 > representation.shape[0]:
distances = self.get_distance_matrix(representation[i:], representation)
amount = representation.shape[0] - i
else:
distances = self.get_distance_matrix(representation[i:i+1000], representation)
amount = 1000
distances = np.reshape(distances, (amount, -1))
for j in range(i, i+amount):
graph[j] = [(idx, distances[j-i, idx]) for idx in np.reshape(np.where(distances[j-i, :] <= delta),(-1))]
neighbors[j] = [points[idx] for idx in np.reshape(np.where(distances[j-i, :] <= delta),(-1))]
neighbors[j].append(outliers[j])
model.addConstr(sum(neighbors[j]) >= 1, "coverage+outliers")
model.__data = points, outliers
model.Params.MIPFocus = 1
model.params.TIME_LIMIT = 180
return model, graph
Example #18
unlabeled_idx = get_unlabeled_idx(X_train, labeled_idx)
# use the learned representation for the k-greedy-center algorithm:
representation_model = Model(inputs=self.model.input, outputs=self.model.get_layer('softmax').input)
representation = representation_model.predict(X_train, batch_size=128, verbose=0)
print("Calculating Greedy K-Center Solution...")
new_indices, max_delta = self.greedy_k_center(representation[labeled_idx], representation[unlabeled_idx], amount)
new_indices = unlabeled_idx[new_indices]
outlier_count = int(X_train.shape[0] / 10000)
# outlier_count = 250
submipnodes = 20000
# iteratively solve the MIP optimization problem:
eps = 0.01
upper_bound = max_delta
lower_bound = max_delta / 2.0
print("Building MIP Model...")
model, graph = self.mip_model(representation, labeled_idx, len(labeled_idx) + amount, upper_bound, outlier_count, greedy_indices=new_indices)
model.Params.SubMIPNodes = submipnodes
points, outliers = model.__data
model.optimize()
indices = [i for i in graph if points[i].X == 1]
current_delta = upper_bound
while upper_bound - lower_bound > eps:
print("upper bound is {ub}, lower bound is {lb}".format(ub=upper_bound, lb=lower_bound))
if model.getAttr(gurobi.GRB.Attr.Status) in [gurobi.GRB.INFEASIBLE, gurobi.GRB.TIME_LIMIT]:
print("Optimization Failed - Infeasible!")
lower_bound = max(current_delta, self.get_graph_min(representation, current_delta))
current_delta = (upper_bound + lower_bound) / 2.0
del model
gc.collect()
model, graph = self.mip_model(representation, labeled_idx, len(labeled_idx) + amount, current_delta, outlier_count, greedy_indices=indices)
points, outliers = model.__data
model.Params.SubMIPNodes = submipnodes
else:
print("Optimization Succeeded!")
upper_bound = min(current_delta, self.get_graph_max(representation, current_delta))
current_delta = (upper_bound + lower_bound) / 2.0
indices = [i for i in graph if points[i].X == 1]
del model
gc.collect()
model, graph = self.mip_model(representation, labeled_idx, len(labeled_idx) + amount, current_delta, outlier_count, greedy_indices=indices)
points, outliers = model.__data
model.Params.SubMIPNodes = submipnodes
if upper_bound - lower_bound > eps:
model.optimize()
return np.array(indices)
Example #19
Creates a simple MIP model for a set cover instance.
Creates one binary decision variable s_i for each set. The objective
function is simply the sum over the c_i * s_i. The constraints
are that each item is captured by at least one set that is taken.
Args:
instance: The set cover instance as created by read().
Returns:
A pair of the Gurobi MIP model and the mapping from the sets
in the instance to the corresponding Gurobi variables.
name, nitems, sets = instance
model = grb.Model(name)
# One variable for each set. Also remember which sets cover each item.
covered_by = [[] for i in range(nitems)]
vars = []
for i, set in enumerate(sets):
cost, covers = set
vars.append(model.addVar(obj=cost, vtype=grb.GRB.BINARY, name="s_{0}".format(i)))
for item in covers:
covered_by[item].append(vars[i])
model.update()
# Constraint: Each item covered at least once.
for item in range(nitems):
model.addConstr(grb.quicksum(covered_by[item]) >= 1)
# We want to minimize. Objective coefficients already fixed during variable creation.
model.setAttr("ModelSense", grb.GRB.MINIMIZE)
# Tuning parameters derived from sc_330_0
model.read("mip.prm")
model.setParam("Threads", 3)
model.setParam("MIPGap", 0.001) # 0.1% usually suffices
return model, vars
Creates a simple MIP model for a set cover instance.
Creates one binary decision variable s_i for each set. The objective
function is simply the sum over the c_i * s_i. The constraints
are that each item is captured by at least one set that is taken.
Args:
instance: The set cover instance as created by read().
Returns:
A pair of the Gurobi MIP model and the mapping from the sets
in the instance to the corresponding Gurobi variables.
name, nitems, sets = instance
model = grb.Model(name)
# One variable for each set. Also remember which sets cover each item.
covered_by = [[] for i in range(nitems)]
vars = []
for i, set in enumerate(sets):
cost, covers = set
vars.append(model.addVar(obj=cost, vtype=grb.GRB.BINARY, name="s_{0}".format(i)))
for item in covers:
covered_by[item].append(vars[i])
model.update()
# Constraint: Each item covered at least once.
for item in range(nitems):
model.addConstr(grb.quicksum(covered_by[item]) >= 1)
# We want to minimize. Objective coefficients already fixed during variable creation.
model.setAttr("ModelSense", grb.GRB.MINIMIZE)
# Tuning parameters derived from sc_330_0
model.read("mip.prm")
model.setParam("Threads", 3)
model.setParam("MIPGap", 0.001) # 0.1% usually suffices
return model, vars
Example #21
def _mmp_solve(w1_ij, x_ij_keys, n, w2_ij = None):
"""A helper function that solves a weighted maximum matching problem.
m = Model("MBSA")
if __debug__:
log(DEBUG,"")
log(DEBUG,"Solving a weighted maximum matching problem with "+
"%d savings weights." % len(w1_ij))
# build model
x_ij = m.addVars(x_ij_keys, obj=w1_ij, vtype=GRB.BINARY, name='x')
_mmp_add_cnts_sum(m, x_ij, x_ij_keys, w1_ij, n)
m._vars = x_ij
m.modelSense = GRB.MAXIMIZE
m.update()
# disable output
m.setParam('OutputFlag', 0)
m.setParam('TimeLimit', MAX_MIP_SOLVER_RUNTIME)
m.setParam('Threads', MIP_SOLVER_THREADS)
#m.write("out.lp")
m.optimize()
# restore SIGINT callback handler which is changed by gurobipy
signal(SIGINT, default_int_handler)
if __debug__:
log(DEBUG-1, "Gurobi runtime = %.2f"%m.Runtime)
if m.Status == GRB.OPTIMAL:
if w2_ij==None:
max_wt, max_merge = max( (w1_ij[k], x_ij_keys[k])
for k, v in enumerate(m.X) if v )
else:
max_wt, _, max_merge = max( (w1_ij[k], w2_ij[k], x_ij_keys[k])
for k, v in enumerate(m.X) if v )
return max_wt, max_merge[0], max_merge[1]
elif m.Status == GRB.TIME_LIMIT:
raise GurobiError(10023, "Gurobi timeout reached when attempting to solve GAP")
elif m.Status == GRB.INTERRUPTED:
raise KeyboardInterrupt()
return None
Example #22
def buildSolverModel(self, lp):
Takes the pulp lp model and translates it into a gurobi model
log.debug("create the gurobi model")
lp.solverModel = gurobipy.Model(lp.name)
log.debug("set the sense of the problem")
if lp.sense == LpMaximize:
lp.solverModel.setAttr("ModelSense", -1)
if self.timeLimit:
lp.solverModel.setParam("TimeLimit", self.timeLimit)
if self.epgap:
lp.solverModel.setParam("MIPGap", self.epgap)
log.debug("add the variables to the problem")
for var in lp.variables():
lowBound = var.lowBound
if lowBound is None:
lowBound = -gurobipy.GRB.INFINITY
upBound = var.upBound
if upBound is None:
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if var.cat == LpInteger and self.mip:
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound, vtype=varType, obj=obj, name=var.name)
lp.solverModel.update()
log.debug("add the Constraints to the problem")
for name, constraint in lp.constraints.items():
# build the expression
expr = gurobipy.LinExpr(list(constraint.values()), [v.solverVar for v in constraint.keys()])
if constraint.sense == LpConstraintLE:
relation = gurobipy.GRB.LESS_EQUAL
elif constraint.sense == LpConstraintGE:
relation = gurobipy.GRB.GREATER_EQUAL
elif constraint.sense == LpConstraintEQ:
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr, relation, -constraint.constant, name)
lp.solverModel.update()
Example #23
def buildSolverModel(self, lp):
Takes the pulp lp model and translates it into a gurobi model
log.debug("create the gurobi model")
lp.solverModel = gurobipy.Model(lp.name)
log.debug("set the sense of the problem")
if lp.sense == LpMaximize:
lp.solverModel.setAttr("ModelSense", -1)
if self.timeLimit:
lp.solverModel.setParam("TimeLimit", self.timeLimit)
if self.epgap:
lp.solverModel.setParam("MIPGap", self.epgap)
log.debug("add the variables to the problem")
for var in lp.variables():
lowBound = var.lowBound
if lowBound is None:
lowBound = -gurobipy.GRB.INFINITY
upBound = var.upBound
if upBound is None:
upBound = gurobipy.GRB.INFINITY
obj = lp.objective.get(var, 0.0)
varType = gurobipy.GRB.CONTINUOUS
if var.cat == LpInteger and self.mip:
varType = gurobipy.GRB.INTEGER
var.solverVar = lp.solverModel.addVar(lowBound, upBound,
vtype = varType,
obj = obj, name = var.name)
lp.solverModel.update()
log.debug("add the Constraints to the problem")
for name,constraint in lp.constraints.items():
#build the expression
expr = gurobipy.LinExpr(list(constraint.values()),
[v.solverVar for v in constraint.keys()])
if constraint.sense == LpConstraintLE:
relation = gurobipy.GRB.LESS_EQUAL
elif constraint.sense == LpConstraintGE:
relation = gurobipy.GRB.GREATER_EQUAL
elif constraint.sense == LpConstraintEQ:
relation = gurobipy.GRB.EQUAL
else:
raise PulpSolverError('Detected an invalid constraint type')
constraint.solverConstraint = lp.solverModel.addConstr(expr,
relation, -constraint.constant, name)
lp.solverModel.update()
Example #24
seed_s=None,
integral=True,
imposed_schedule: ImposedSchedule = ImposedSchedule.FULL_SCHEDULE,
solve_r=True,
write_model_file: Optional[PathLike] = None,
gurobi_params: Dict[str, Any] = None,
self.GRB_CONSTRAINED_PRESOLVE_TIME_LIMIT = 300 # todo (paras): read this from gurobi_params
self.gurobi_params = gurobi_params
self.num_threads = self.gurobi_params.get("Threads", 1)
self.model_file = write_model_file
self.seed_s = seed_s
self.integral = integral
self.imposed_schedule = imposed_schedule
self.solve_r = solve_r
self.eps_noise = eps_noise
self.budget = budget
self.g = g
self.solve_time = None
if not self.integral:
assert not self.solve_r, "Can't solve for R if producing a fractional solution"
self.init_constraints = [] # used for seeding the model
self.m = Model("checkpointmip_gc_{}_{}".format(self.g.size, self.budget))
if gurobi_params is not None:
for k, v in gurobi_params.items():
setattr(self.m.Params, k, v)
T = self.g.size
self.ram_gcd = self.g.ram_gcd(self.budget)
if self.integral:
self.R = self.m.addVars(T, T, name="R", vtype=GRB.BINARY)
self.S = self.m.addVars(T, T, name="S", vtype=GRB.BINARY)
self.Free_E = self.m.addVars(T, len(self.g.edge_list), name="FREE_E", vtype=GRB.BINARY)
else:
self.R = self.m.addVars(T, T, name="R", vtype=GRB.CONTINUOUS, lb=0.0, ub=1.0)
self.S = self.m.addVars(T, T, name="S", vtype=GRB.CONTINUOUS, lb=0.0, ub=1.0)
self.Free_E = self.m.addVars(T, len(self.g.edge_list), name="FREE_E", vtype=GRB.CONTINUOUS, lb=0.0, ub=1.0)
gcd = float(budget) / self.ram_gcd
self.U = self.m.addVars(T, T, name="U", lb=0.0, ub=gcd)
for x in range(T):
for y in range(T):
self.m.addLConstr(self.U[x, y], GRB.GREATER_EQUAL, 0)
self.m.addLConstr(self.U[x, y], GRB.LESS_EQUAL, float(budget) / self.ram_gcd)
def solve(self):
T = self.g.size
with Timer("Gurobi model optimization", extra_data={"T": str(T), "budget": str(self.budget)}):
if self.seed_s is not None:
self.m.Params.TimeLimit = self.GRB_CONSTRAINED_PRESOLVE_TIME_LIMIT
self.m.optimize()
if self.m.status == GRB.INFEASIBLE:
print("Infeasible ILP seed at budget {:.2E}".format(self.budget))
self.m.remove(self.init_constraints)
self.m.Params.TimeLimit = self.gurobi_params.get("TimeLimit", 0)
self.m.message("\n\nRestarting solve\n\n")
with Timer("ILPSolve") as solve_ilp:
self.m.optimize()
self.solve_time = solve_ilp.elapsed
infeasible = self.m.status == GRB.INFEASIBLE
if infeasible:
raise ValueError("Infeasible model, check constraints carefully. Insufficient memory?")
if self.m.solCount < 1:
raise ValueError("Model status is {} (not infeasible), but solCount is {}".format(self.m.status, self.m.solCount))
Rout = np.zeros((T, T), dtype=checkmate.core.utils.solver_common.SOLVER_DTYPE if self.integral else np.float)
Sout = np.zeros((T, T), dtype=checkmate.core.utils.solver_common.SOLVER_DTYPE if self.integral else np.float)
Uout = np.zeros((T, T), dtype=checkmate.core.utils.solver_common.SOLVER_DTYPE if self.integral else np.float)
Free_Eout = np.zeros((T, len(self.g.edge_list)), dtype=checkmate.core.utils.solver_common.SOLVER_DTYPE)
solver_dtype_cast = int if self.integral else float
for t in range(T):
for i in range(T):
Rout[t][i] = solver_dtype_cast(self.R[t, i].X)
except (AttributeError, TypeError):
Rout[t][i] = solver_dtype_cast(self.R[t, i])
Sout[t][i] = solver_dtype_cast(self.S[t, i])
except (AttributeError, TypeError):
Sout[t][i] = solver_dtype_cast(self.S[t, i].X)
Uout[t][i] = self.U[t, i].X * self.ram_gcd
except (AttributeError, TypeError):
Uout[t][i] = self.U[t, i] * self.ram_gcd
for e in range(len(self.g.edge_list)):
Free_Eout[t][e] = solver_dtype_cast(self.Free_E[t, e].X)
except (AttributeError, TypeError):
Free_Eout[t][e] = solver_dtype_cast(self.Free_E[t, e])
except AttributeError as e:
logging.exception(e)
return None, None, None, None
# prune R using closed-form solver
if self.solve_r and self.integral:
Rout = solve_r_opt(self.g, Sout)
return Rout, Sout, Uout, Free_Eout