diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index ff4cb6233..6758104f2 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -13,8 +13,8 @@ concurrency: cancel-in-progress: true defaults: - run: - shell: bash -l {0} + run: + shell: bash -l {0} jobs: ruff: @@ -33,7 +33,7 @@ jobs: - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -49,12 +49,10 @@ jobs: cd mpisppy/tests python test_ef_ph.py - regression: name: Basic regression tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 @@ -98,11 +96,8 @@ jobs: runall_persistent: name: run_all.py persistent solver runs-on: ubuntu-latest + needs: [ruff] timeout-minutes: 15 - # this takes the most time, so we'll start it - # without waiting on the linting and other checks - # needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 @@ -134,10 +129,6 @@ jobs: name: run_all.py direct solver runs-on: ubuntu-latest timeout-minutes: 15 - # this takes the most time, so we'll start it - # without waiting on the linting and other checks - # needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 @@ -168,51 +159,63 @@ jobs: schur-complement: name: schur-complement runs-on: ubuntu-latest - needs: [ruff] strategy: matrix: - python-version: [3.9] + python-version: [3.11] steps: - - uses: actions/checkout@v3 - - name: setup conda - uses: conda-incubator/setup-miniconda@v2 - with: - python-version: ${{ matrix.python-version }} - channels: anaconda, conda-forge - activate-environment: test_env - auto-activate-base: false - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install nose pybind11 - conda install conda-forge::libstdcxx-ng - conda install openmpi pymumps --no-update-deps - pip install mpi4py pandas - pip install git+https://github.com/pyutilib/pyutilib.git - git clone https://github.com/pyomo/pyomo.git - cd pyomo/ - pip install -e . - pyomo download-extensions - pyomo build-extensions - cd ../ - pip install git+https://github.com/parapint/parapint.git - pip install -e . - - name: Test with nose - run: | - nosetests -v mpisppy/tests/test_sc.py - mpirun -np 3 -oversubscribe nosetests mpisppy/tests/test_sc.py + - uses: actions/checkout@v3 + + - name: setup conda + uses: conda-incubator/setup-miniconda@v2 + with: + python-version: ${{ matrix.python-version }} + channels: anaconda, conda-forge + activate-environment: test_env + auto-activate-base: false + + - name: Install dependencies + shell: bash -l {0} + run: | + python -m pip install --upgrade pip + # Test tooling + pip install pytest pytest-cov + # Core build/test deps + pip install pybind11 + conda install -y conda-forge::libstdcxx-ng + conda install -y openmpi pymumps --no-update-deps + pip install mpi4py pandas + # pyutilib / pyomo / parapint + pip install git+https://github.com/pyutilib/pyutilib.git + git clone https://github.com/pyomo/pyomo.git + cd pyomo/ + pip install -e . + pyomo download-extensions + pyomo build-extensions + cd ../ + pip install git+https://github.com/parapint/parapint.git + # Install this repo (editable) + pip install -e . + + - name: Test with pytest (serial) + shell: bash -l {0} + run: | + pytest -q -vv mpisppy/tests/test_sc.py + + - name: Test with pytest under MPI + shell: bash -l {0} + run: | + mpirun -np 3 -oversubscribe python -m pytest -q -vv mpisppy/tests/test_sc.py straight-tests: name: straight_tests.py runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -232,13 +235,12 @@ jobs: name: admm wrapper tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -262,13 +264,12 @@ jobs: name: aph tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -290,13 +291,12 @@ jobs: name: pickled bundles tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -313,12 +313,10 @@ jobs: cd mpisppy/tests python test_pickle_bundle.py - mps: name: MPS tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 @@ -341,18 +339,16 @@ jobs: cd mpisppy/tests python test_mps.py - confidence-intervals: name: confidence intervals tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -380,7 +376,6 @@ jobs: runs-on: ubuntu-latest needs: [ruff] timeout-minutes: 15 - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 @@ -411,7 +406,6 @@ jobs: name: gradient and rho tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v3 @@ -439,13 +433,12 @@ jobs: test-headers: name: header test runs-on: ubuntu-latest - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -466,13 +459,12 @@ jobs: name: pysp tests runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -500,13 +492,12 @@ jobs: name: tests on some cylinders runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: test_env - python-version: 3.9 + python-version: 3.11 auto-activate-base: false - name: Install dependencies run: | @@ -527,7 +518,6 @@ jobs: name: tests on agnostic runs-on: ubuntu-latest needs: [ruff] - steps: - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 @@ -543,6 +533,7 @@ jobs: python -m pip install amplpy --upgrade python -m amplpy.modules install highs cbc gurobi python -m pip install gamspy + python -m pip install mip # license? - name: setup the program @@ -560,3 +551,12 @@ jobs: run: | cd mpisppy/agnostic/examples python afew_agnostic.py + + - name: run loose AMPL + timeout-minutes: 10 + run: | + cd examples/loose_agnostic/AMPL + bash farmer_example.bash + # GAMS is not tested because I don't want to deal with the license + #cd ../GAMS + #bash farmer_example.bash diff --git a/doc/src/agnostic.rst b/doc/src/agnostic.rst index ad27f245d..63e000cb5 100644 --- a/doc/src/agnostic.rst +++ b/doc/src/agnostic.rst @@ -16,18 +16,38 @@ Code for creating a Pyomo model from an mps file is in ``mpisppy.utils.mps_reader.py``, but you can also just use ``generic_cylinders.py`` and give it the module ``mpisppy.utils.mps_module`` (you will need to specify -that path to this module) and the ``--mps-files-directory`` +the path to this module) and the ``--mps-files-directory`` option. Note that at the time of this writing, the number of scenarios is obtained by counting the mps files in the directory given. -The file ``examples.sizes.mps_demo.bash`` has two commands. The second illustrates -how to instruction ``MPI-SPPY`` to read mps/json file pairs for each scenario from a -directory. The first command illustrates how to use ``MPI-SPPY`` to write -them in the first place (but if ``MPI-SPPY`` can get your scenarios, there -is probably no reason to write them and then read them again!). This -functionality is intended to be used by users of other AMLs or other -scenario-based stochastic programming applications. +The file ``examples.loose_agnostic.AMPL.farmer_example.bash`` has three +commands. The second illustrates how to instruct ``MPI-SPPY`` to read +mps/json file pairs for each scenario from a directory. The first runs +an `AMPLpy` program that creates the scenario files. This program is +in ``examples.loose_agnostic.AMPL.farmer_writer.py`` and, apart from +the `scenario_creator` function, is pretty general for two-stage +problems. You be able to copy the program and +write a `scenario_creator` function for your two-stage problem. +The third command runs a script that illustrates how to map column +names created by the MPS writer back to AMPL variable names. + +The file ``examples.loose_agnostic.GAMS.farmer_example.bash`` has +three commands that mimic the commands for AMPL. The GAMS bash script +is not part of the automated tests because I don't want to deal with +the license. + +A somewhat strange example is in the file +``examples.sizes.mps_demo.bash`` has two commands. The second +illustrates how to instruct ``MPI-SPPY`` to read mps/json file pairs +for each scenario from a directory. The first command illustrates how +to use ``MPI-SPPY`` to write them in the first place (but if +``MPI-SPPY`` can get your scenarios, there is probably no reason to +write them and then read them again!). This functionality is intended +to be used by users of other AMLs or other scenario-based stochastic +programming applications. + +There is low-level support for `.lp` files instead of `.mps` files. JSON file format ---------------- diff --git a/examples/loose_agnostic/AMPL/colmap.py b/examples/loose_agnostic/AMPL/colmap.py new file mode 100644 index 000000000..232c5665b --- /dev/null +++ b/examples/loose_agnostic/AMPL/colmap.py @@ -0,0 +1,124 @@ +############################################################################### +# mpi-sppy: MPI-based Stochastic Programming in PYthon +# +# Copyright (c) 2025, Lawrence Livermore National Security, LLC, Alliance for +# Sustainable Energy, LLC, The Regents of the University of California, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for +# full copyright and license information. +############################################################################### +#!/usr/bin/env python3 +import argparse +import csv +import sys +from pathlib import Path + +def load_colnames(col_path): + """Return a 1-based list of AMPL variable names in column order.""" + names = [] + with open(col_path, "r", encoding="utf-8") as f: + for line in f: + s = line.strip() + if not s or s.startswith("#"): + continue + names.append(s) + if not names: + raise ValueError(f"No variable names found in {col_path}") + return names # index 0 -> C0001 (index+1) + +def parse_c_label(label): + """ + Convert 'C0001' or 'C1' (case-insensitive) to its 1-based integer index: 1. + Returns None if it can't parse. + """ + if not label: + return None + s = label.strip() + if len(s) < 2 or (s[0] not in "Cc"): + return None + digits = s[1:] + if not digits.isdigit(): + return None + return int(digits) + +def build_index_to_name(colnames): + """Map 1-based column index -> AMPL var name.""" + return {i + 1: name for i, name in enumerate(colnames)} + +def main(): + ap = argparse.ArgumentParser( + description="Map (Cxxxx,value) CSV to (AMPL var name,value) using .col order." + ) + ap.add_argument("col_file", help="Path to .col file (one AMPL var name per line, in column order)") + ap.add_argument("input_csv", help="CSV with rows like: C0001,183.33") + ap.add_argument("output_csv", help="Output CSV with rows: AMPL_var_name,value") + ap.add_argument("--strict", action="store_true", + help="Error out if an input CSV C-label does not exist in the .col mapping.") + args = ap.parse_args() + + try: + colnames = load_colnames(args.col_file) + except Exception as e: + print(f"Error reading .col: {e}", file=sys.stderr) + sys.exit(1) + + idx_to_name = build_index_to_name(colnames) + missing = 0 + converted = 0 + + # Read input CSV and write output CSV + in_path = Path(args.input_csv) + out_path = Path(args.output_csv) + + try: + with open(in_path, "r", encoding="utf-8", newline="") as fin, \ + open(out_path, "w", encoding="utf-8", newline="") as fout: + + reader = csv.reader(fin) + writer = csv.writer(fout) + # Header + writer.writerow(["varname", "value"]) + + for rownum, row in enumerate(reader, start=1): + if not row: + continue + if len(row) < 2: + print(f"Warning: line {rownum} in {in_path} has fewer than 2 columns; skipping.", + file=sys.stderr) + continue + + c_label = row[0].strip() + value = row[1].strip() + + idx = parse_c_label(c_label) + if idx is None: + print(f"Warning: line {rownum}: cannot parse C-label '{c_label}'; skipping.", + file=sys.stderr) + missing += 1 + if args.strict: + sys.exit(2) + continue + + name = idx_to_name.get(idx) + if name is None: + print(f"Warning: line {rownum}: C-index {idx} not found in .col (max={len(colnames)}); skipping.", + file=sys.stderr) + missing += 1 + if args.strict: + sys.exit(2) + continue + + writer.writerow([name, value]) + converted += 1 + + except FileNotFoundError as e: + print(f"File not found: {e.filename}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error processing files: {e}", file=sys.stderr) + sys.exit(1) + + # Summary to stderr so the CSV stays clean + print(f"Done. Wrote {converted} rows to {out_path}. Skipped {missing}.", file=sys.stderr) + +if __name__ == "__main__": + main() diff --git a/examples/loose_agnostic/AMPL/farmer.mod b/examples/loose_agnostic/AMPL/farmer.mod new file mode 100644 index 000000000..3d75f7c5d --- /dev/null +++ b/examples/loose_agnostic/AMPL/farmer.mod @@ -0,0 +1,111 @@ +# The farmer's problem in AMPL +# +# Reference: +# John R. Birge and Francois Louveaux. Introduction to Stochastic Programming. +# +# AMPL coding by Victor Zverovich; ## modifed by dlw; now *minimization* + +##function expectation; +##function random; + +##suffix stage IN; + +set Crops; + +##set Scen; +##param P{Scen}; # probabilities + +param TotalArea; # acre +param PlantingCost{Crops}; # $/acre +param SellingPrice{Crops}; # $/T +param ExcessSellingPrice; # $/T +param PurchasePrice{Crops}; # $/T +param MinRequirement{Crops}; # T +param BeetsQuota; # T + +# Area in acres devoted to crop c. +var area{c in Crops} >= 0; + +# Tons of crop c sold (at favourable price) under scenario s. +var sell{c in Crops} >= 0, suffix stage 2; + +# Tons of sugar beets sold in excess of the quota under scenario s. +var sell_excess >= 0, suffix stage 2; + +# Tons of crop c bought under scenario s +var buy{c in Crops} >= 0, suffix stage 2; + +# The random variable (parameter) representing the yield of crop c. +##var RandomYield{c in Crops}; +param RandomYield{c in Crops}; + +# Realizations of the yield of crop c. +##param Yield{c in Crops, s in Scen}; # T/acre + +##maximize profit: +## expectation( +## ExcessSellingPrice * sell_excess + +## sum{c in Crops} (SellingPrice[c] * sell[c] - +## PurchasePrice[c] * buy[c])) - +## sum{c in Crops} PlantingCost[c] * area[c]; + +minimize minus_profit: + - ExcessSellingPrice * sell_excess - + sum{c in Crops} (SellingPrice[c] * sell[c] - + PurchasePrice[c] * buy[c]) + + sum{c in Crops} (PlantingCost[c] * area[c]); + +s.t. totalArea: sum {c in Crops} area[c] <= TotalArea; + +s.t. requirement{c in Crops}: + RandomYield[c] * area[c] - sell[c] + buy[c] >= MinRequirement[c]; + +s.t. quota: sell['beets'] <= BeetsQuota; + +s.t. sellBeets: + sell['beets'] + sell_excess <= RandomYield['beets'] * area['beets']; + +##yield: random({c in Crops} (RandomYield[c], {s in Scen} Yield[c, s])); + +data; + +set Crops := wheat corn beets; +#set Scen := below average above; + +param TotalArea := 500; + +##param Yield: +## below average above := +## wheat 2.0 2.5 3.0 +## corn 2.4 3.0 3.6 +## beets 16.0 20.0 24.0; + +# Average Scenario +param RandomYield := + wheat 2.5 + corn 3.0 + beets 20.0; + +param PlantingCost := + wheat 150 + corn 230 + beets 260; + +param SellingPrice := + wheat 170 + corn 150 + beets 36; + +param ExcessSellingPrice := 10; + +param PurchasePrice := + wheat 238 + corn 210 + beets 100; + +param MinRequirement := + wheat 200 + corn 240 + beets 0; + +param BeetsQuota := 6000; diff --git a/examples/loose_agnostic/AMPL/farmer_example.bash b/examples/loose_agnostic/AMPL/farmer_example.bash new file mode 100644 index 000000000..cddb3429b --- /dev/null +++ b/examples/loose_agnostic/AMPL/farmer_example.bash @@ -0,0 +1,45 @@ +#!/bin/bash +# run the example where an AMPLpy script writes scenarios to be read by mpi-sppy + +set -e + +ODIR="_fromAMPL" +SOLVER="cplex_direct" +SOLBASE="farmer_solution_output" + +empty_or_create_dir() { + local d=$1 + + # Refuse dangerous/meaningless targets + if [[ -z "$d" || "$d" == "/" || "$d" == "." ]]; then + echo "Refusing to operate on empty path, /, or ." >&2 + return 1 + fi + + # If the path exists but isn't a directory, bail + if [[ -e "$d" && ! -d "$d" ]]; then + echo "Refusing: '$d' exists and is not a directory." >&2 + return 1 + fi + + if [[ -d "$d" ]]; then + # Empty contents but keep the directory node (preserves perms/ACLs) + find "$d" -mindepth 1 -exec rm -rf -- {} + + else + mkdir -p -- "$d" + fi +} + +empty_or_create_dir $ODIR +echo "Create the files" +python farmer_writer.py --output-directory=$ODIR + +echo "Use the files (just an interface demo)" +# This is perhaps too clever by about half: the module is the mps_module and its scenario_creator +# function assumes that mps-files-directory has been set on the command line. +# You can have any generic cylinders commands you like. +# Note that we don't use a lower bound (so only the trivial bound will be there) +mpiexec -np 2 python -m mpi4py ../../../mpisppy/generic_cylinders.py --module-name ../../../mpisppy/utils/mps_module --mps-files-directory $ODIR --solver-name ${SOLVER} --max-iterations 2 --default-rho 1 --solution-base-name $SOLBASE --xhatshuffle + +echo "write the nonant values with AMPL names to nonant_output.csv" +python colmap.py ${ODIR}/scen0.col ${SOLBASE}.csv nonant_output.csv --strict diff --git a/examples/loose_agnostic/AMPL/farmer_writer.py b/examples/loose_agnostic/AMPL/farmer_writer.py new file mode 100644 index 000000000..d983e817b --- /dev/null +++ b/examples/loose_agnostic/AMPL/farmer_writer.py @@ -0,0 +1,322 @@ +############################################################################### +# mpi-sppy: MPI-based Stochastic Programming in PYthon +# +# Copyright (c) 2025, Lawrence Livermore National Security, LLC, Alliance for +# Sustainable Energy, LLC, The Regents of the University of California, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for +# full copyright and license information. +############################################################################### +# example to write files from AMPL that allow loose coupling with mpi-sppy +# This is a fixed-format MPS file example +# Note that AMPL provides col and row files to get back to nice names. +# (See farmer_free_writer for a free format writer that can't be read as of Oct 2025) + +import os +import sys +import re +import json +from pathlib import Path +from typing import Iterable + +from mpisppy.utils import config +import pyomo.environ as pyo +import mpisppy.utils.sputils as sputils +import numpy as np +from mpisppy import MPI # for debugging + +from amplpy import AMPL, add_to_path +add_to_path(r"full path to the AMPL installation directory") + + +fullcomm = MPI.COMM_WORLD +global_rank = fullcomm.Get_rank() + +# If you need random numbers, use this random stream: +farmerstream = np.random.RandomState() # pylint: disable=no-member + +def scenario_creator( + scenario_name, + ampl_file_name, + use_integer=False, + sense=pyo.minimize, + crops_multiplier=1, + num_scens=None, + seedoffset=0, +): + """ Create a scenario for the (scalable) farmer example + + Args: + scenario_name (str): + Name of the scenario to construct. + ampl_file_name (str): + The name of the ampl model file (with AMPL in it) + (This adds flexibility that maybe we don't need; it could be hardwired) + use_integer (bool, optional): + If True, restricts variables to be integer. Default is False. + sense (int, optional): + Model sense (minimization or maximization). Must be either + pyo.minimize or pyo.maximize. Default is pyo.minimize. + crops_multiplier (int, optional): + Factor to control scaling. There will be three times this many + crops. Default is 1. + num_scens (int, optional): + Number of scenarios. We use it to compute _mpisppy_probability. + Default is None. + seedoffset (int): used by confidence interval code + + NOTE: for ampl, the names will be tuples name, index + + Returns: + ampl_model (AMPL object): the AMPL model + prob (float or "uniform"): the scenario probability + nonant_var_data_list (list of AMPL variables): the nonants + obj_fct (AMPL Objective function): the objective function + """ + assert crops_multiplier == 1, "for AMPL, just getting started with 3 crops" + + ampl = AMPL() + ampl.read(ampl_file_name) + + # scenario specific data applied + scennum = sputils.extract_num(scenario_name) + assert scennum < 3, "three scenarios hardwired for now" + y = ampl.get_parameter("RandomYield") + if scennum == 0: # below + y.set_values({"wheat": 2.0, "corn": 2.4, "beets": 16.0}) + elif scennum == 2: # above + y.set_values({"wheat": 3.0, "corn": 3.6, "beets": 24.0}) + + areaVarDatas = list(ampl.get_variable("area").instances()) + + try: + obj_fct = ampl.get_objective("minus_profit") + except Exception: + print("big troubles!!; we can't find the objective function") + raise + return ampl, "uniform", areaVarDatas, obj_fct + +# this function is fairly general +def _nonant_names_from_mps( + mps_path: str, + nonants: Iterable, + col_map_path: str | None = None, +): + """ + Given an MPS path and a list of nonants (AMPL var instances or strings), + return the list of MPS column ids (C0001, C0002, ...) corresponding to them, + by matching each nonant's *index tuple* to the AMPL-generated .col lines. + + We do NOT require knowing the base variable name; we match by the bracketed + index (e.g., ['wheat'] or ['a','b']) that appears in the .col file. + """ + + def _read_name_list(path: Path): + out = [] + with path.open("r", encoding="utf-8") as f: + for line in f: + s = line.strip() + if s: + out.append(s) + return out + + # Normalize a bracketed index string like: + # "['wheat']" -> "'wheat'" + # "[ 'a' , 'b' ]" -> "'a','b'" + # We compare *only* the inner content (without the surrounding []), with: + # - single quotes + # - no spaces + def _normalize_bracket_inner(bracket_str: str) -> str: + s = bracket_str.replace('"', "'") + # extract inside [...] + m = re.search(r"\[([^\]]+)\]", s) + if not m: + return "" + inner = m.group(1) + # remove spaces around commas + inner = re.sub(r"\s*,\s*", ",", inner) + # remove stray spaces + inner = re.sub(r"\s+", "", inner) + return inner + + # Try to get "area['wheat']" etc. from an amplpy instance; fall back to str(x) + def _extract_index_key_from_nonant(x) -> str | None: + # 1) If the object has a name() or name attribute with brackets, use it + try: + nm = getattr(x, "name", None) + if callable(nm): + nm = nm() + if isinstance(nm, str) and "[" in nm and "]" in nm: + key = _normalize_bracket_inner(nm) + if key: + return key + except Exception: + pass + + # 2) Parse from str(x). For amplpy VariableInstance, str(x) often looks like: + # "(('wheat',), )" + sx = str(x).replace('"', "'") + # Grab the first parenthesized tuple of indices inside the leading "( ... , C0001 + key = _normalize_bracket_inner(raw) + if key: # only store lines that actually have brackets + # If duplicates existed (rare), keep the first occurrence + index_to_cid.setdefault(key, f"C{idx:04d}") + + # Map requested index keys to Cxxxx + mps_ids: list[str] = [] + missing = [] + for key in target_keys: + cid = index_to_cid.get(key) + if cid: + mps_ids.append(cid) + else: + missing.append(key) + + if missing: + # Helpful debug: show a preview of what we saw + preview = [ell for ell in col_lines[:10]] + raise ValueError( + "Some nonants were not found in .col. " + f"Missing keys (normalized inside []): {missing}. " + f"First .col lines: {preview}" + ) + + return mps_ids + + +def write_mps_file(ampl: AMPL, stub: str, name_maps: bool = True): + """Write .mps (and .row/.col if name_maps).""" + if name_maps: + ampl.eval('option auxfiles rc;') + # AMPL requires: write m; (no space, no quotes) + ampl.eval(f'write m{stub};') + + +def check_empty_dir(dirname: str) -> bool: + """Require that dirname exists and is an empty directory.""" + if not os.path.isdir(dirname): + print(f"Error: '{dirname}' is not a valid directory path.", file=sys.stderr) + return False + if os.listdir(dirname): + print(f"Error: Directory '{dirname}' is not empty.", file=sys.stderr) + return False + return True + + +if __name__ == "__main__": + num_scens = 3 + ampl_file_name = "farmer.mod" + + cfg = config.Config() + cfg.add_to_config( + "output_directory", + description="The directory where scenario files will be written", + domain=str, + default=None, + argparse_args={"required": True}, + ) + cfg.parse_command_line("farmer_writer.py") + + dirname = cfg.output_directory + if not check_empty_dir(dirname): + raise RuntimeError(f"{dirname} must exist and be empty") + + namebase = os.path.join(dirname, "scen") + + for s in range(num_scens): + # scenario_name should contain the scenario number for extract_num(); + # we keep the simple "scen{s}" (digits at the end are what matters). + scenario_name = f"scen{s}" + ampl, prob, nonants, obj_fct = scenario_creator( + scenario_name, ampl_file_name, num_scens=num_scens + ) + print(f"we have the ampl model for scenario {s}") + + # Use a path STUB (no extension) so AMPL writes .mps/.row/.col correctly + stub = f"{namebase}{s}" + write_mps_file(ampl, stub, name_maps=True) + + mps = f"{stub}.mps" + row = f"{stub}.row" + col = f"{stub}.col" + + print(f" wrote {mps}, {row}, and {col}.") + + # --- Write {stub}_nonants.json --- + # Scenario probability + if prob == "uniform": + scenProb = 1.0 / num_scens + else: + scenProb = float(prob) + + nonant_names = _nonant_names_from_mps(mps, nonants, col) + + data = { + "scenarioData": { + "name": f"scen{s}", + "scenProb": scenProb, + }, + "treeData": { + "globalNodeCount": 1, + "nodes": { + "ROOT": { + "serialNumber": 0, + "condProb": 1.0, + "nonAnts": nonant_names, + } + }, + }, + } + + with open(f"{stub}_nonants.json", "w", encoding="utf-8") as jf: + json.dump(data, jf, indent=2) + print(f" wrote {stub}_nonants.json") + + # --- Write {stub}_rho.csv --- + default_rho = 1.0 # or whatever value you want to use globally + rho_filename = f"{stub}_rho.csv" + with open(rho_filename, "w", encoding="utf-8") as csvf: + csvf.write("varname,rho\n") + for name in nonant_names: + csvf.write(f"{name},{default_rho}\n") + print(f" wrote {rho_filename}") diff --git a/examples/loose_agnostic/GAMS/colmap.py b/examples/loose_agnostic/GAMS/colmap.py new file mode 100644 index 000000000..d022999e0 --- /dev/null +++ b/examples/loose_agnostic/GAMS/colmap.py @@ -0,0 +1,179 @@ +############################################################################### +# mpi-sppy: MPI-based Stochastic Programming in PYthon +# +# Copyright (c) 2025, Lawrence Livermore National Security, LLC, Alliance for +# Sustainable Energy, LLC, The Regents of the University of California, et al. +# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for +# full copyright and license information. +############################################################################### +#!/usr/bin/env python3 +""" +Map (xN,value) CSV to (original GAMS var name,value) using GAMS CONVERT dict.txt. + +Example: + dict.txt contains lines like: + Variables 1 to 4 + x1 area(wheat) + x2 area(corn) + x3 area(sugarbeets) + x4 z + + Input CSV: + x1,0.0 + x2,0.0 + x3,0.0 + + Output CSV: + varname,value + area(wheat),0.0 + area(corn),0.0 + area(sugarbeets),0.0 +""" + +import argparse +import csv +import re +import sys +from pathlib import Path + + +def load_dict_mapping(dict_path: str) -> dict[str, str]: + """ + Parse GAMS CONVERT dict.txt and return a mapping: + scalar_name_lower -> original_name + Only entries from the Variables section(s) are returned. + We intentionally ignore equations (which are usually 'e1', 'e2', ...). + """ + pat_entry = re.compile(r"\s*([A-Za-z]\d+)\s+(.*\S)\s*$") + mapping: dict[str, str] = {} + in_variables_block = False + + with open(dict_path, "r", encoding="utf-8", errors="ignore") as f: + for line in f: + s = line.strip() + + # Detect start of a Variables block + if re.match(r"^Variables\s+\d+\s+to\s+\d+\s*$", s, flags=re.IGNORECASE): + in_variables_block = True + continue + + # Detect start of a different section -> leave variables mode + if re.match(r"^(Equations|Rows|RHS|Bounds|Nonzero counts|Equation counts|Variable counts)\b", + s, flags=re.IGNORECASE): + in_variables_block = False + + if not in_variables_block: + continue + + m = pat_entry.match(line) + if not m: + continue + + scalar, original = m.group(1), m.group(2) + # Skip equations just in case (they'd typically be 'eN') + if scalar[0].lower() == "e": + continue + + mapping[scalar.lower()] = original + + if not mapping: + raise ValueError(f"No variable mappings found in {dict_path}") + return mapping + + +def parse_scalar_label(label: str) -> str | None: + """ + Normalize labels like 'x1', 'X0001', 'b12' to a lower-cased compact form: 'x1', 'b12'. + Returns None if it can't parse. + """ + if not label: + return None + s = label.strip() + m = re.match(r"^([A-Za-z]+)0*([0-9]+)$", s) + if not m: + return None + prefix = m.group(1).lower() + idx = m.group(2).lstrip("0") + if idx == "": + idx = "0" + return f"{prefix}{idx}" + + +def main(): + ap = argparse.ArgumentParser( + description="Map (xN,value) CSV to (original GAMS var name,value) using dict.txt." + ) + ap.add_argument("dict_txt", help="Path to GAMS CONVERT dict.txt") + ap.add_argument("input_csv", help="CSV with rows like: x1,183.33") + ap.add_argument("output_csv", help="Output CSV with rows: varname,value") + ap.add_argument("--strict", action="store_true", + help="Error out if an input CSV label is missing in the dictionary.") + args = ap.parse_args() + + try: + mapping = load_dict_mapping(args.dict_txt) + except Exception as e: + print(f"Error reading dict.txt: {e}", file=sys.stderr) + sys.exit(1) + + missing = 0 + converted = 0 + + in_path = Path(args.input_csv) + out_path = Path(args.output_csv) + + try: + with open(in_path, "r", encoding="utf-8", newline="") as fin, \ + open(out_path, "w", encoding="utf-8", newline="") as fout: + + reader = csv.reader(fin) + writer = csv.writer(fout) + writer.writerow(["varname", "value"]) + + for rownum, row in enumerate(reader, start=1): + if not row: + continue + if len(row) < 2: + print(f"Warning: line {rownum} in {in_path} has fewer than 2 columns; skipping.", + file=sys.stderr) + continue + + raw_label = row[0] + value = row[1] + + key = parse_scalar_label(raw_label) + if key is None: + print(f"Warning: line {rownum}: cannot parse label '{raw_label}'; skipping.", + file=sys.stderr) + missing += 1 + if args.strict: + sys.exit(2) + continue + + name = mapping.get(key) + if name is None: + # Try literal lowercase (in case dict kept zero padding, rare) + name = mapping.get(raw_label.strip().lower()) + if name is None: + print(f"Warning: line {rownum}: label '{raw_label}' not in dict; skipping.", + file=sys.stderr) + missing += 1 + if args.strict: + sys.exit(2) + continue + + writer.writerow([name, value]) + converted += 1 + + except FileNotFoundError as e: + print(f"File not found: {e.filename}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error processing files: {e}", file=sys.stderr) + sys.exit(1) + + print(f"Done. Wrote {converted} rows to {out_path}. Skipped {missing}.", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/examples/loose_agnostic/GAMS/farmer_average.gms b/examples/loose_agnostic/GAMS/farmer_average.gms new file mode 100644 index 000000000..10af14a2c --- /dev/null +++ b/examples/loose_agnostic/GAMS/farmer_average.gms @@ -0,0 +1,91 @@ +$title The Farmer s Problem formulated for GAMS/DECIS (FARM,SEQ=199) + +$onText +This model helps a farmer to decide how to allocate +his or her land. The yields are uncertain. + + +Birge, R, and Louveaux, F V, Introduction to Stochastic Programming. +Springer, 1997. + +Keywords: linear programming, stochastic programming, agricultural cultivation, + farming, cropping +$offText + +*$if not set decisalg $set decisalg decism + +Set + crop / wheat, corn, sugarbeets / + cropr(crop) 'crops required for feeding cattle' / wheat, corn / + cropx / wheat + corn + beets1 'up to 6000 ton' + beets2 'in excess of 6000 ton' /; + +Parameter + yield(crop) 'tons per acre' / wheat 2.5 + corn 3 + sugarbeets 20 / + plantcost(crop) 'dollars per acre' / wheat 150 + corn 230 + sugarbeets 260 / + sellprice(cropx) 'dollars per ton' / wheat 170 + corn 150 + beets1 36 + beets2 10 / + purchprice(cropr) 'dollars per ton' / wheat 238 + corn 210 / + minreq(cropr) 'minimum requirements in ton' / wheat 200 + corn 240 /; + +Scalar + land 'available land' / 500 / + maxbeets1 'max allowed' / 6000 /; + +*-------------------------------------------------------------------------- +* First a non-stochastic version +*-------------------------------------------------------------------------- +Variable + x(crop) 'acres of land' + w(cropx) 'crops sold' + y(cropr) 'crops purchased' + yld(crop) 'yield' + profit 'objective variable'; + +Positive Variable x, w, y; + +Equation + profitdef 'objective function' + landuse 'capacity' + req(cropr) 'crop requirements for cattle feed' + ylddef 'calc yields' + beets 'total beet production'; + +$onText +The YLD variable and YLDDEF equation isolate the stochastic +YIELD parameter into one equation, making the DECIS setup +somewhat easier than if we would substitute YLD out of +the model. +$offText + +profitdef.. profit =e= - sum(crop, plantcost(crop)*x(crop)) + - sum(cropr, purchprice(cropr)*y(cropr)) + + sum(cropx, sellprice(cropx)*w(cropx)); + +landuse.. sum(crop, x(crop)) =l= land; + +ylddef(crop).. yld(crop) =e= yield(crop)*x(crop); + +req(cropr).. yld(cropr) + y(cropr) - sum(sameas(cropx,cropr),w(cropx)) =g= minreq(cropr); + +beets.. w('beets1') + w('beets2') =l= yld('sugarbeets'); + +x.up(crop) = land; +w.up('beets1') = maxbeets1; +$onText +__InsertPH__here_Model_defined_three_lines_later +$offText + +Model simple / profitdef, landuse, req, beets, ylddef /; + +solve simple using lp maximizing profit; \ No newline at end of file diff --git a/examples/loose_agnostic/GAMS/farmer_example.bash b/examples/loose_agnostic/GAMS/farmer_example.bash new file mode 100644 index 000000000..0f005894e --- /dev/null +++ b/examples/loose_agnostic/GAMS/farmer_example.bash @@ -0,0 +1,49 @@ +#!/bin/bash +# run the example where an GAMSpy script writes scenarios to be read by mpi-sppy + +set -e + +ODIR="_fromGAMS" +SOLVER="gurobi" +SOLBASE="farmer_solution_output" + +empty_or_create_dir() { + local d=$1 + + # Refuse dangerous/meaningless targets + if [[ -z "$d" || "$d" == "/" || "$d" == "." ]]; then + echo "Refusing to operate on empty path, /, or ." >&2 + return 1 + fi + + # If the path exists but isn't a directory, bail + if [[ -e "$d" && ! -d "$d" ]]; then + echo "Refusing: '$d' exists and is not a directory." >&2 + return 1 + fi + + if [[ -d "$d" ]]; then + # Empty contents but keep the directory node (preserves perms/ACLs) + find "$d" -mindepth 1 -exec rm -rf -- {} + + else + mkdir -p -- "$d" + fi +} + +# Note that farmer_average.gms has the "base-case" +# Note that using "x(" is based on looking at _fromGAMS/scen0_files/dict.txt +empty_or_create_dir $ODIR +echo "Create the files" +python farmer_writer.py --gms-file farmer_average.gms --nonant-prefix "x(" --output-directory=$ODIR + +echo "Use the files (just an interface demo)" +# This is perhaps too clever by about half: the module is the mps_module and its scenario_creator +# function assumes that mps-files-directory has been set on the command line. +# You can have any generic cylinders commands you like. +# Note that we don't use a lower bound (so only the trivial bound will be there) +###python ../../../mpisppy/generic_cylinders.py --module-name ../../../mpisppy/utils/mps_module --mps-files-directory $ODIR --solver-name ${SOLVER} --max-iterations 2 --default-rho 1 --solution-base-name $SOLBASE +mpiexec -np 2 python -m mpi4py ../../../mpisppy/generic_cylinders.py --module-name ../../../mpisppy/utils/mps_module --mps-files-directory $ODIR --solver-name ${SOLVER} --max-iterations 2 --default-rho 1 --solution-base-name $SOLBASE --xhatshuffle + +echo "write the nonant values with GAMS names to nonant_output.csv" +python colmap.py ${ODIR}/scen0_files/dict.txt farmer_solution_output.csv nonant_output.csv --strict + diff --git a/examples/loose_agnostic/GAMS/farmer_writer.py b/examples/loose_agnostic/GAMS/farmer_writer.py new file mode 100644 index 000000000..87ffa61b2 --- /dev/null +++ b/examples/loose_agnostic/GAMS/farmer_writer.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python3 +############################################################################### +# mpi-sppy: MPI-based Stochastic Programming in PYthon +############################################################################### + +import os +import sys +import json +import re +import subprocess +from pathlib import Path +from shutil import which + +from mpisppy.utils import config + +# ---------------- Helpers ---------------- +def check_empty_dir(dirname: str) -> bool: + if not os.path.isdir(dirname): + print(f"Error: '{dirname}' is not a valid directory path.", file=sys.stderr) + return False + if os.listdir(dirname): + print(f"Error: Directory '{dirname}' is not empty.", file=sys.stderr) + return False + return True + +def _find_case_insensitive(sdir: Path, *names: str) -> Path | None: + """Search top-level, then recursively (case-insensitive) for any of the given names.""" + lower_top = {p.name.lower(): p for p in sdir.iterdir() if p.is_file()} + for nm in names: + p = lower_top.get(nm.lower()) + if p: + return p + # recursive fallback + cands = [] + wanted = {n.lower() for n in names} + for p in sdir.rglob("*"): + if p.is_file() and p.name.lower() in wanted: + cands.append(p) + return sorted(cands)[0] if cands else None + + +def _pick_from_filelist(scen_dir: Path) -> Path | None: + """ + If CONVERT produced a file list, parse it and return the first plausible + model artifact (*.mps or *.lp). Search recursively for listed basenames. + """ + # file list names we accept (documented default is 'files.txt') + for name in ("files.txt", "filelist.txt", "file.txt"): + fl = next((p for p in scen_dir.rglob("*") if p.is_file() and p.name.lower() == name), None) + if fl: + break + else: + return None + + try: + lines = [ln.strip() for ln in fl.read_text(encoding="utf-8", errors="ignore").splitlines()] + except Exception: + return None + + prio_exts = (".mps", ".lp") + prio_names = ("fixed.mps", "cplex.mps", "model.mps", "cplex.lp", "model.lp") + + files = [] + for ln in lines: + if ln and not ln.startswith("*"): + files.append(Path(ln).name) # only the basename + + # name preference + for want in prio_names: + for f in files: + if f.lower() == want: + hits = list(scen_dir.rglob(f)) or list(scen_dir.rglob(f.upper())) or list(scen_dir.rglob(f.capitalize())) + if hits: + return sorted(hits)[0] + + # extension preference + for ext in prio_exts: + for f in files: + if f.lower().endswith(ext): + hits = list(scen_dir.rglob(f)) or list(scen_dir.rglob(f.upper())) or list(scen_dir.rglob(f.capitalize())) + if hits: + return sorted(hits)[0] + + # single-entry fallback + uniq = sorted(set(files)) + if len(uniq) == 1: + hits = list(scen_dir.rglob(uniq[0])) or list(scen_dir.rglob(uniq[0].upper())) or list(scen_dir.rglob(uniq[0].capitalize())) + if hits: + return sorted(hits)[0] + + return None + + +def _parse_gams_dict_for_nonants(dict_path: Path, + nonant_prefixes=("area(", "area[")): + mps_cols, orig_names = [], [] + with dict_path.open("r", encoding="utf-8", errors="ignore") as f: + for line in f: + m = re.match(r"\s*([exbi]\d+)\s+(.*\S)\s*$", line) + if not m: + continue + scalar_name, original = m.group(1), m.group(2) + if any(original.startswith(p) for p in nonant_prefixes): + mps_cols.append(scalar_name) + orig_names.append(original) + if not mps_cols: + raise RuntimeError( + f"No nonant variables found in {dict_path}. " + f"Looked for prefixes: {nonant_prefixes}." + ) + return mps_cols, orig_names + + +def _write_convert_opt(where: Path): + """Minimal options compatible with your CONVERT build.""" + (where / "convert.opt").write_text( + "FixedMPS 1\n" + "Dict dict.txt\n", + encoding="utf-8", + ) + + +def _resolve_gams_bin(cli_value: str | None) -> str: + if cli_value: + p = Path(cli_value).expanduser().resolve() + if p.exists() and os.access(p, os.X_OK): + return str(p) + raise RuntimeError(f"--gams_bin points to a non-executable: {p}") + + w = which("gams") + if w: + return w + + for envvar in ("GAMS", "GAMS_SYS_DIR"): + base = os.environ.get(envvar) + if base: + cand = Path(base) / "gams" + if cand.exists() and os.access(cand, os.X_OK): + return str(cand) + + raise RuntimeError( + "Could not locate the GAMS executable.\n" + "Install GAMS and ensure `gams` is on PATH, or pass --gams_bin /full/path/to/gams." + ) + + +def _run_gams_convert(gams_bin: str, gms_file: Path, workdir: Path): + """ + Run GAMS in workdir; the scenario .gms sets `option lp=convert;`. + We pass `optfile=1` so the solver reads convert.opt. + """ + gms_arg = gms_file.name # just 'scenX.gms' + cmd = [gams_bin, gms_arg, "lo=3", "o=convert.log", "optfile=1"] + res = subprocess.run(cmd, cwd=str(workdir), + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + if res.returncode != 0: + log_path = workdir / "convert.log" + log_tail = "" + if log_path.exists(): + try: + with log_path.open("r", encoding="utf-8", errors="ignore") as lf: + lines = lf.readlines() + log_tail = "\n--- convert.log (last 200 lines) ---\n" + "".join(lines[-200:]) + except Exception: + pass + raise RuntimeError( + f"GAMS CONVERT failed (rc={res.returncode}).\n" + f"Command: {' '.join(cmd)}\nCWD: {workdir}\n" + f"--- stdout/stderr ---\n{res.stdout}{log_tail}" + ) + + +def _detect_model_name(gms_path_or_text) -> str | None: + if isinstance(gms_path_or_text, Path): + txt = gms_path_or_text.read_text(encoding="utf-8", errors="ignore") + else: + txt = gms_path_or_text + m = re.search(r"(?im)^\s*Model\s+([A-Za-z_]\w*)\s*/", txt) + return m.group(1) if m else None + + +def _inject_before_solve(gms_text: str, to_insert: str) -> str: + m = re.search(r"(?im)^\s*solve\b", gms_text) + if not m: + return gms_text.rstrip() + "\n" + to_insert + "\n" + idx = m.start() + return gms_text[:idx] + to_insert + "\n" + gms_text[idx:] + + +def _find_dict_file(scen_dir: Path) -> Path | None: + # top-level, case-insensitive + top = {p.name.lower(): p for p in scen_dir.iterdir() if p.is_file()} + for name in ("dict.txt", "gamsdict.txt"): + if name in top: + return top[name] + # recursive fallback, case-insensitive + candidates = [] + for p in scen_dir.rglob("*"): + if p.is_file() and p.name.lower() in ("dict.txt", "gamsdict.txt"): + candidates.append(p) + return sorted(candidates)[0] if candidates else None + + +def _pick_converted_file(scen_dir: Path, model_name: str) -> tuple[Path | None, list[str]]: + """ + Case-insensitive, recursive artifact search. + Preference: + 1) fixed.mps + 2) .mps + 3) model.mps + 4) single *.mps anywhere + 5) .lp + 6) model.lp + 7) single *.lp anywhere + """ + present = [str(p.relative_to(scen_dir)) for p in sorted(scen_dir.rglob("*"))] + + def find_exact_ci(relname: str) -> Path | None: + target_lower = relname.lower() + for p in scen_dir.iterdir(): + if p.name.lower() == target_lower: + return p + return None + + for name in ("fixed.mps", f"{model_name}.mps", "model.mps"): + p = find_exact_ci(name) + if p: + return p, present + + mps_list = [p for p in scen_dir.rglob("*") if p.is_file() and p.suffix.lower() == ".mps"] + if len(mps_list) == 1: + return mps_list[0], present + + for name in (f"{model_name}.lp", "model.lp"): + p = find_exact_ci(name) + if p: + return p, present + + lp_list = [p for p in scen_dir.rglob("*") if p.is_file() and p.suffix.lower() == ".lp"] + if len(lp_list) == 1: + return lp_list[0], present + + return None, present + + +def _convert_write_mps_and_dict_from_gms(gams_bin: str, src_gms: Path, outdir: Path, stub: str): + """ + Run GAMS/CONVERT and copy the produced model artifact to .mps. + Works with older CONVERT that writes FixedMPS to a file literally named '1'. + """ + scen_dir = src_gms.parent + scen_dir.mkdir(parents=True, exist_ok=True) + + # Minimal, compatible convert.opt + _write_convert_opt(scen_dir) + + # Run GAMS + _run_gams_convert(gams_bin, src_gms, scen_dir) + + # Find dict.txt (case-insensitive, recursive) + def _find_dict_anywhere(root: Path) -> Path | None: + top = {p.name.lower(): p for p in root.iterdir() if p.is_file()} + for nm in ("dict.txt", "gamsdict.txt"): + if nm in top: + return top[nm] + hits = [p for p in root.rglob("*") if p.is_file() and p.name.lower() in ("dict.txt", "gamsdict.txt")] + return sorted(hits)[0] if hits else None + + dict_txt = _find_dict_anywhere(scen_dir) + if dict_txt is None: + present_top = [p.name for p in sorted(scen_dir.iterdir())] + raise RuntimeError( + "Dictionary file not found in scenario directory.\n" + f"Looked for dict.txt/gamsdict.txt under: {scen_dir}\n" + f"Top-level contents: {present_top}" + ) + + # Primary search: *.mps / *.lp anywhere under scen_dir (case-insensitive) + def _pick_any_artifact(root: Path) -> Path | None: + preferred = ("fixed.mps", "model.mps", "cplex.mps", "model.lp", "cplex.lp") + # preferred names at top-level + for p in root.glob("*"): + if p.is_file() and p.name.lower() in preferred: + return p + # any single .mps / .lp recursively + mps = [p for p in root.rglob("*") if p.is_file() and p.suffix.lower() == ".mps"] + if len(mps) == 1: + return mps[0] + lp = [p for p in root.rglob("*") if p.is_file() and p.suffix.lower() == ".lp"] + if len(lp) == 1: + return lp[0] + # if multiple, pick by preferred names + for nm in preferred: + cand = [p for p in root.rglob("*") if p.is_file() and p.name.lower() == nm] + if cand: + return sorted(cand)[0] + return None + + chosen = _pick_any_artifact(scen_dir) + + # Fallback for older CONVERT: it writes the FixedMPS to a file named '1' + if chosen is None: + # top-level numeric file + num_files = [p for p in scen_dir.iterdir() if p.is_file() and p.name.isdigit()] + if not num_files: + # recursive numeric file (rare, but try) + num_files = [p for p in scen_dir.rglob("*") if p.is_file() and p.name.isdigit()] + if num_files: + # choose the largest non-empty numeric file as the artifact + num_files = [p for p in num_files if p.stat().st_size > 0] + if num_files: + chosen = max(num_files, key=lambda p: p.stat().st_size) + + if chosen is None: + # show helpful diagnostics (tail of convert.log and listing) + log_tail = "" + log_path = scen_dir / "convert.log" + if log_path.exists(): + try: + lines = log_path.read_text(encoding="utf-8", errors="ignore").splitlines() + log_tail = "\n".join(lines[-200:]) + except Exception: + pass + listing = [str(p.relative_to(scen_dir)) for p in sorted(scen_dir.rglob("*"))] + raise RuntimeError( + "No MPS/LP produced by CONVERT (including numeric-file fallback).\n" + f"Scenario dir listing ({scen_dir}): {listing}\n" + + ("--- convert.log (last 200 lines) ---\n" + log_tail if log_tail else "") + ) + + # Copy to .mps (even if source has no extension) + target_mps = outdir / f"{stub}.mps" + with chosen.open("r", encoding="utf-8", errors="ignore") as fin, \ + target_mps.open("w", encoding="utf-8") as fout: + for line in fin: + if line.strip(): + fout.write(line) + + return target_mps, dict_txt + + +def _patch_yield_block(gms_text: str, wheat: float, corn: float, beets: float) -> str: + pattern = re.compile(r"(yield\s*\(\s*crop\s*\)[^/]*?/)([\s\S]*?)(/)", re.IGNORECASE) + records = ( + f" wheat {wheat}\n" + f" corn {corn}\n" + f" sugarbeets {beets} " + ) + def repl(m): + return m.group(1) + records + m.group(3) + new_text, nsub = pattern.subn(repl, gms_text, count=1) + if nsub != 1: + raise RuntimeError("Could not locate/replace yield(crop) record list in the .gms file.") + return new_text + + +def _make_scenario_gms(base_gms: Path, dest_gms: Path, scennum: int): + """ + Build a per-scenario .gms: + - Patch yields + - Embed convert.opt via $onecho + - Inject .optfile = 1; right before the first 'solve' + - Prepend 'option lp=convert;' + """ + if scennum == 0: + data = (2.0, 2.4, 16.0) + elif scennum == 1: + data = (2.5, 3.0, 20.0) + else: + data = (3.0, 3.6, 24.0) + + original = base_gms.read_text(encoding="utf-8") + patched = _patch_yield_block(original, *data) + + model_name = _detect_model_name(patched) or "simple" + + # Embed convert.opt *and* set optfile=1 right before SOLVE + embedded_opt = ( + "$onecho > convert.opt\n" + "KeepNames 1\n" + "MPS 1\n" + "FixedMPS 1\n" + "MPSName fixed.mps\n" + "Dict dict.txt\n" + "GamsDict dict.txt\n" + "$offecho\n" + f"{model_name}.optfile = 1;\n" + ) + patched = _inject_before_solve(patched, embedded_opt) + + # Use CONVERT as the solver; optfile is set per-model below + header = "option lp=convert;\n" + dest_gms.write_text(header + patched, encoding="utf-8") + + +# ---------------- Main driver ---------------- +def main(): + num_scens = 3 + + cfg = config.Config() + cfg.add_to_config( + "gms_file", + description="Path to the source GAMS model file (e.g., farmer_average.gms)", + domain=str, + default=None, + argparse_args={"required": True}, + ) + cfg.add_to_config( + "output_directory", + description="Directory where scenario files will be written", + domain=str, + default=None, + argparse_args={"required": True}, + ) + cfg.add_to_config( + "nonant_prefix", + description="Prefix of nonant variables (default area())", + domain=str, + default="area(", + ) + cfg.add_to_config( + "gams_bin", + description="Name/path of GAMS executable", + domain=str, + default=None, + ) + cfg.parse_command_line("farmer_gams_writer_from_gms.py") + + gms_file = Path(cfg.gms_file).resolve() + if not gms_file.exists(): + raise RuntimeError(f"GAMS file not found: {gms_file}") + + dirname = cfg.output_directory + if not check_empty_dir(dirname): + raise RuntimeError(f"{dirname} must exist and be empty") + outdir = Path(dirname) + + gams_bin = _resolve_gams_bin(cfg.gams_bin) + + default_rho = 1.0 + NONANT_PREFIXES = (cfg.nonant_prefix, cfg.nonant_prefix.replace("(", "[")) + + for s in range(num_scens): + scenario_name = f"scen{s}" + print(f"preparing scenario {s}") + + scen_dir = outdir / f"{scenario_name}_files" + scen_dir.mkdir(parents=True, exist_ok=True) + scen_gms = scen_dir / f"{scenario_name}.gms" + _make_scenario_gms(gms_file, scen_gms, s) + + mps_path, dict_path = _convert_write_mps_and_dict_from_gms( + gams_bin=gams_bin, src_gms=scen_gms, outdir=outdir, stub=scenario_name + ) + print(f" wrote {mps_path} and {dict_path}") + + nonant_cols, _ = _parse_gams_dict_for_nonants(dict_path, NONANT_PREFIXES) + + scenProb = 1.0 / num_scens + data = { + "scenarioData": {"name": scenario_name, "scenProb": scenProb}, + "treeData": { + "globalNodeCount": 1, + "nodes": { + "ROOT": { + "serialNumber": 0, + "condProb": 1.0, + "nonAnts": nonant_cols, + } + }, + }, + } + (outdir / f"{scenario_name}_nonants.json").write_text(json.dumps(data, indent=2), encoding="utf-8") + print(f" wrote {outdir / f'{scenario_name}_nonants.json'}") + + rho_path = outdir / f"{scenario_name}_rho.csv" + with rho_path.open("w", encoding="utf-8") as csvf: + csvf.write("varname,rho\n") + for name in nonant_cols: + csvf.write(f"{name},{default_rho}\n") + print(f" wrote {rho_path}") + + +if __name__ == "__main__": + main() diff --git a/examples/loose_agnostic/Readme.txt b/examples/loose_agnostic/Readme.txt new file mode 100644 index 000000000..3e2b9cd4b --- /dev/null +++ b/examples/loose_agnostic/Readme.txt @@ -0,0 +1,5 @@ +These examples were written mostly by ChatGPT. There is not need to follow their pattern; all +that matters is the creation of the scenario mps and json files. + +I don't think chatGPT really knew what it was going with gams. There is a lot of code that +looks around for files. The GAMS bash script is not part of the automated tests because I don't want to deal with the license diff --git a/mpisppy/agnostic/examples/farmer.mod b/mpisppy/agnostic/examples/farmer.mod index 5a5f147e9..3d75f7c5d 100644 --- a/mpisppy/agnostic/examples/farmer.mod +++ b/mpisppy/agnostic/examples/farmer.mod @@ -3,7 +3,7 @@ # Reference: # John R. Birge and Francois Louveaux. Introduction to Stochastic Programming. # -# AMPL coding by Victor Zverovich; ## modifed by dlw; now *minization* +# AMPL coding by Victor Zverovich; ## modifed by dlw; now *minimization* ##function expectation; ##function random; diff --git a/mpisppy/tests/examples/scen0_densenames.mps b/mpisppy/tests/examples/scen0_densenames.mps new file mode 100644 index 000000000..aa773507c --- /dev/null +++ b/mpisppy/tests/examples/scen0_densenames.mps @@ -0,0 +1,41 @@ +NAME scen0 +ROWS + L R0001 + G R0002 + G R0003 + G R0004 + L R0005 + N R0006 +COLUMNS + C0001 R0001 1 + C0001 R0002 2 + C0001 R0006 150 + C0002 R0001 1 + C0002 R0003 2.4 + C0002 R0006 230 + C0003 R0001 1 + C0003 R0004 16 + C0003 R0005 -16 + C0003 R0006 260 + C0004 R0002 -1 + C0004 R0006 -170 + C0005 R0003 -1 + C0005 R0006 -150 + C0006 R0004 -1 + C0006 R0005 1 + C0006 R0006 -36 + C0007 R0005 1 + C0007 R0006 -10 + C0008 R0002 1 + C0008 R0006 238 + C0009 R0003 1 + C0009 R0006 210 + C0010 R0004 1 + C0010 R0006 100 +RHS + B R0001 500 + B R0002 200 + B R0003 240 +BOUNDS + UP BOUND C0006 6000 +ENDATA diff --git a/mpisppy/tests/test_mps.py b/mpisppy/tests/test_mps.py index ce818f985..f01a59567 100644 --- a/mpisppy/tests/test_mps.py +++ b/mpisppy/tests/test_mps.py @@ -8,6 +8,7 @@ ############################################################################### # test mps utilities import unittest +from mip import OptimizationStatus import mpisppy.utils.mps_reader as mps_reader from mpisppy.tests.utils import get_solver import pyomo.environ as pyo @@ -33,9 +34,25 @@ def _reader_body(self, fname): m.read(fname) m.optimize() # returns a status, btw coin_obj = m.objective_value + + status = m.optimize() + # Optional: m.verbose = 1 # if you want CBC logging next time + if status not in (OptimizationStatus.OPTIMAL, OptimizationStatus.FEASIBLE): + # Drop helpful breadcrumbs + m.write("cbc_readback.lp") # what CBC thinks it read + m.write("cbc_solution.sol") # if any partial solution exists + self.fail(f"CBC status={status.name}, num_solutions={m.num_solutions}. " + f"Objective is {m.objective_value}. " + f'Wrote "cbc_readback.lp" for inspection.') + coin_obj = m.objective_value + + print(f"{fname=}, {pyomo_obj=}") self.assertAlmostEqual(coin_obj, pyomo_obj, places=3, delta=None, msg=None) + def test_mps_reader_scen0_densenames(self): + self._reader_body("examples/scen0_densenames.mps") + def test_mps_reader_test1(self): self._reader_body("examples/test1.mps") diff --git a/mpisppy/utils/mps_module.py b/mpisppy/utils/mps_module.py index 68c532c47..1b5cbbc7a 100644 --- a/mpisppy/utils/mps_module.py +++ b/mpisppy/utils/mps_module.py @@ -104,9 +104,9 @@ def scenario_names_creator(num_scens, start=None): except Exception as e: raise RuntimeError(f'mps files in {mps_files_directory} must end with an integer' f'found file {mps_files[0]} (error was: {e})') - - print("WARNING: one-based senario names might cause trouble" - f" found {first} for dir {mps_files_directory}") + if first != 0: + print("WARNING: non-zero-based senario names might cause trouble" + f" found {first=} for dir {mps_files_directory}") assert start+num_scens <= len(mps_files),\ f"Trying to create scenarios names with {start=}, {num_scens=} but {len(mps_files)=}" retval = [fn[:-4] for fn in mps_files[start:start+num_scens]] diff --git a/mpisppy/utils/mps_reader.py b/mpisppy/utils/mps_reader.py index 28c1a2655..f2cc0c36d 100644 --- a/mpisppy/utils/mps_reader.py +++ b/mpisppy/utils/mps_reader.py @@ -8,8 +8,67 @@ ############################################################################### # IMPORTANT: parens in variable names will become underscore (_) import mip # from coin-or (pip install mip) +from mip.exceptions import ParameterNotAvailable import pyomo.environ as pyo +# the following giant function is provided because CBC seems to have +# trouble parsing free format MPS files. +def _read_obj_terms_from_mps(mps_path: str): + """Return list of (var_name, coeff) tuples by parsing the MPS file directly.""" + obj_row = None + obj_terms = [] + section = None + + with open(mps_path, "r") as f: + for raw in f: + line = raw.strip() + if not line or line.startswith("*"): + continue + tok0 = line.split()[0] + + if tok0 in ("NAME",): + continue + if tok0 == "ROWS": + section = "ROWS" + continue + if tok0 == "COLUMNS": + section = "COLUMNS" + continue + if tok0 in ("RHS", "RANGES", "BOUNDS", "ENDATA"): + section = None + if tok0 != "COLUMNS": + # Once we reach RHS, we’re done collecting objective terms + if tok0 in ("RHS", "ENDATA"): + break + continue + + if section == "ROWS": + parts = line.split() + # Row type N marks the objective row + if parts[0] == "N": + obj_row = parts[1] + elif section == "COLUMNS": + parts = line.split() + # Skip integer markers if they appear + if parts[0] == "'MARKER'": + continue + col = parts[0] + rest = parts[1:] + # Free MPS permits one or two (row, val) pairs per line + # i.e., col row1 val1 [row2 val2] + if len(rest) < 2: + continue + # Walk pairs + for i in range(0, len(rest), 2): + if i + 1 >= len(rest): + break + row, val = rest[i], rest[i + 1] + if obj_row is not None and row == obj_row: + obj_terms.append((col, float(val))) + return obj_terms + + + def read_mps_and_create_pyomo_model(mps_path): """ Reads an MPS file using mip and converts it into a Pyomo ConcreteModel. @@ -71,7 +130,15 @@ def _domain_lookup(v): setattr(model, c.name, pyomoC) # objective function - obj_expr = sum(coeff * varDict[v] for v, coeff in m.objective.expr.items()) + try: + obj_items = list(m.objective.expr.items()) # usual path + obj_expr = sum(coeff * varDict[vname] for vname, coeff in obj_items if vname in varDict) + except ParameterNotAvailable: + # CBC didn’t expose objective coefficients — fall back to parsing the file + obj_items = _read_obj_terms_from_mps(mps_path) + if not obj_items: + raise RuntimeError("Could not retrieve objective coefficients from CBC or MPS file.") + obj_expr = sum(coeff * varDict[vname] for vname, coeff in obj_items if vname in varDict) if m.sense == mip.MINIMIZE: model.objective = pyo.Objective(expr=obj_expr, sense=pyo.minimize) else: