Repository: coin-or/CyLP
Branch: master
Commit: cada9af898ea
Files: 281
Total size: 62.8 MB
Directory structure:
gitextract_vql885vl/
├── .coin-or/
│ └── projDesc.xml
├── .github/
│ └── workflows/
│ ├── ci-cvxpy.yml
│ ├── cibuildwheel.yml
│ └── release.yml
├── .gitignore
├── AUTHORS
├── LICENSE
├── MANIFEST.in
├── README.rst
├── cylp/
│ ├── VERSION
│ ├── __init__.py
│ ├── cpp/
│ │ ├── .gitignore
│ │ ├── CbcCompareUser.cpp
│ │ ├── CbcCompareUser.hpp
│ │ ├── CyClpSimplex_api.h
│ │ ├── ICbc.cpp
│ │ ├── ICbc.hpp
│ │ ├── ICbcModel.cpp
│ │ ├── ICbcModel.hpp
│ │ ├── ICbcNode.cpp
│ │ ├── ICbcNode.hpp
│ │ ├── ICglCutGeneratorBase.cpp
│ │ ├── ICglCutGeneratorBase.h
│ │ ├── IClpDualRowPivotBase.cpp
│ │ ├── IClpDualRowPivotBase.h
│ │ ├── IClpPackedMatrix.cpp
│ │ ├── IClpPackedMatrix.hpp
│ │ ├── IClpPrimalColumnPivot.cpp
│ │ ├── IClpPrimalColumnPivot.h
│ │ ├── IClpPrimalColumnPivotBase.cpp
│ │ ├── IClpPrimalColumnPivotBase.h
│ │ ├── IClpSimplex.cpp
│ │ ├── IClpSimplex.hpp
│ │ ├── IClpSimplexPrimal.cpp
│ │ ├── IClpSimplexPrimal.hpp
│ │ ├── IClpSimplexPrimal_Wolfe.cpp
│ │ ├── IClpSimplexPrimal_Wolfe.hpp
│ │ ├── ICoinIndexedVector.cpp
│ │ ├── ICoinIndexedVector.hpp
│ │ ├── ICoinMP.cpp
│ │ ├── ICoinMP.hpp
│ │ ├── ICoinModel.hpp
│ │ ├── ICoinMpsIO.cpp
│ │ ├── ICoinMpsIO.hpp
│ │ ├── ICoinPackedMatrix.cpp
│ │ ├── ICoinPackedMatrix.hpp
│ │ ├── IOsiCuts.cpp
│ │ └── IOsiCuts.hpp
│ ├── cy/
│ │ ├── .gitignore
│ │ ├── CyCbcModel.pxd
│ │ ├── CyCbcModel.pyx
│ │ ├── CyCbcNode.pxd
│ │ ├── CyCbcNode.pyx
│ │ ├── CyCgl.pxd
│ │ ├── CyCgl.pyx
│ │ ├── CyCglCutGeneratorBase.pxd
│ │ ├── CyCglCutGeneratorBase.pyx
│ │ ├── CyCglTreeInfo.pxd
│ │ ├── CyCglTreeInfo.pyx
│ │ ├── CyClpDualRowPivotBase.pxd
│ │ ├── CyClpDualRowPivotBase.pyx
│ │ ├── CyClpPrimalColumnPivotBase.pxd
│ │ ├── CyClpPrimalColumnPivotBase.pyx
│ │ ├── CyClpPrimalColumnSteepest.pyx
│ │ ├── CyClpSimplex.pxd
│ │ ├── CyClpSimplex.pyx
│ │ ├── CyClpSimplex_api.h
│ │ ├── CyCoinIndexedVector.pxd
│ │ ├── CyCoinIndexedVector.pyx
│ │ ├── CyCoinModel.pxd
│ │ ├── CyCoinModel.pyx
│ │ ├── CyCoinMpsIO.pxd
│ │ ├── CyCoinMpsIO.pyx
│ │ ├── CyCoinPackedMatrix.pxd
│ │ ├── CyCoinPackedMatrix.pyx
│ │ ├── CyCutGeneratorPythonBase.pxd
│ │ ├── CyCutGeneratorPythonBase.pyx
│ │ ├── CyDantzigPivot.pxd
│ │ ├── CyDantzigPivot.pyx
│ │ ├── CyDualPivotPythonBase.pxd
│ │ ├── CyDualPivotPythonBase.pyx
│ │ ├── CyOsiCuts.pxd
│ │ ├── CyOsiCuts.pyx
│ │ ├── CyOsiSolverInterface.pxd
│ │ ├── CyOsiSolverInterface.pyx
│ │ ├── CyPEPivot.pxd
│ │ ├── CyPEPivot.pyx
│ │ ├── CyPivotPythonBase.pxd
│ │ ├── CyPivotPythonBase.pyx
│ │ ├── CySolve.py
│ │ ├── CyTest.pyx
│ │ ├── CyWolfePivot.pxd
│ │ ├── CyWolfePivot.pyx
│ │ ├── __init__.py
│ │ └── createCythonInterface.py
│ ├── doc/
│ │ ├── .gitignore
│ │ ├── Makefile
│ │ ├── mathjax.py
│ │ └── source/
│ │ ├── conf.py
│ │ ├── index.rst
│ │ └── modules/
│ │ ├── CyCbcModel.rst
│ │ ├── CyClpSimplex.rst
│ │ ├── CyCoinIndexedVector.rst
│ │ ├── CyCoinModel.rst
│ │ ├── CyCoinMpsIO.rst
│ │ ├── CyCoinPackedMatrix.rst
│ │ ├── CyPivotPythonBase.rst
│ │ ├── DantzigPivot.rst
│ │ ├── DualDantzigPivot.rst
│ │ ├── LIFOPivot.rst
│ │ ├── MostFrequentPivot.rst
│ │ ├── PivotPythonBase.rst
│ │ ├── PositiveEdgePivot.rst
│ │ ├── cy.rst
│ │ ├── modeling.rst
│ │ ├── pivots.rst
│ │ ├── py.rst
│ │ └── sparseUtil.rst
│ ├── input/
│ │ ├── aug3dcqp.qps
│ │ ├── cvxqp1_s.qps
│ │ ├── cvxqp2_s.qps
│ │ ├── fileDownloader.py
│ │ ├── h.qps
│ │ ├── hs268.qps
│ │ ├── hs268_2.qps
│ │ ├── hs268_p.qps
│ │ ├── hs35.qps
│ │ ├── hs35_2.qps
│ │ ├── netlib/
│ │ │ ├── 25fv47.mps
│ │ │ ├── 80bau3b.mps
│ │ │ ├── adlittle.mps
│ │ │ ├── afiro.mps
│ │ │ ├── agg.mps
│ │ │ ├── agg2.mps
│ │ │ ├── agg3.mps
│ │ │ ├── ascii
│ │ │ ├── bandm.mps
│ │ │ ├── beaconfd.mps
│ │ │ ├── blend.mps
│ │ │ ├── bnl1.mps
│ │ │ ├── bnl2.mps
│ │ │ ├── boeing1.mps
│ │ │ ├── boeing2.mps
│ │ │ ├── bore3d.mps
│ │ │ ├── brandy.mps
│ │ │ ├── capri.mps
│ │ │ ├── changes
│ │ │ ├── cycle.mps
│ │ │ ├── czprob.mps
│ │ │ ├── d2q06c.mps
│ │ │ ├── d6cube.mps
│ │ │ ├── degen2.mps
│ │ │ ├── degen3.mps
│ │ │ ├── dfl001.mps
│ │ │ ├── e226.mps
│ │ │ ├── etamacro.mps
│ │ │ ├── fffff800.mps
│ │ │ ├── fileDownloader.py
│ │ │ ├── finnis.mps
│ │ │ ├── fit1d.mps
│ │ │ ├── fit1p.mps
│ │ │ ├── fit2d.mps
│ │ │ ├── fit2p.mps
│ │ │ ├── forplan.mps
│ │ │ ├── ganges.mps
│ │ │ ├── gfrd-pnc.mps
│ │ │ ├── greenbea.mps
│ │ │ ├── greenbeb.mps
│ │ │ ├── grow15.mps
│ │ │ ├── grow22.mps
│ │ │ ├── grow7.mps
│ │ │ ├── israel.mps
│ │ │ ├── kb2.mps
│ │ │ ├── lotfi.mps
│ │ │ ├── maros
│ │ │ ├── maros-r7.mps
│ │ │ ├── maros.mps
│ │ │ ├── minos
│ │ │ ├── modszk1.mps
│ │ │ ├── nesm.mps
│ │ │ ├── perold.mps
│ │ │ ├── pilot.ja.mps
│ │ │ ├── pilot.mps
│ │ │ ├── pilot.we.mps
│ │ │ ├── pilot4.mps
│ │ │ ├── pilot87.mps
│ │ │ ├── pilotnov.mps
│ │ │ ├── readme
│ │ │ ├── recipe.mps
│ │ │ ├── sc105.mps
│ │ │ ├── sc205.mps
│ │ │ ├── sc50a.mps
│ │ │ ├── sc50b.mps
│ │ │ ├── scagr25.mps
│ │ │ ├── scagr7.mps
│ │ │ ├── scfxm1.mps
│ │ │ ├── scfxm2.mps
│ │ │ ├── scfxm3.mps
│ │ │ ├── scorpion.mps
│ │ │ ├── scrs8.mps
│ │ │ ├── scsd1.mps
│ │ │ ├── scsd6.mps
│ │ │ ├── scsd8.mps
│ │ │ ├── sctap1.mps
│ │ │ ├── sctap2.mps
│ │ │ ├── sctap3.mps
│ │ │ ├── seba.mps
│ │ │ ├── share1b.mps
│ │ │ ├── share2b.mps
│ │ │ ├── shell.mps
│ │ │ ├── ship04l.mps
│ │ │ ├── ship04s.mps
│ │ │ ├── ship08l.mps
│ │ │ ├── ship08s.mps
│ │ │ ├── ship12l.mps
│ │ │ ├── ship12s.mps
│ │ │ ├── sierra.mps
│ │ │ ├── stair.mps
│ │ │ ├── standata.mps
│ │ │ ├── standgub.mps
│ │ │ ├── standmps.mps
│ │ │ ├── stocfor1.mps
│ │ │ ├── stocfor2.mps
│ │ │ ├── stocfor3.old
│ │ │ ├── truss
│ │ │ ├── tuff.mps
│ │ │ ├── vtp.base.mps
│ │ │ ├── wood1p.mps
│ │ │ └── woodw.mps
│ │ ├── nsct1.mps
│ │ ├── p0033.mps
│ │ └── pp.qps
│ ├── py/
│ │ ├── Constants.py
│ │ ├── PySolve.py
│ │ ├── QP/
│ │ │ ├── GQP.py
│ │ │ ├── QP.py
│ │ │ ├── QPGen.py
│ │ │ ├── QPSReader.py
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ ├── mip/
│ │ │ ├── CyLPCutGenerator.py
│ │ │ ├── GomoryCutGenerator.py
│ │ │ ├── NodeCompareBase.py
│ │ │ ├── SimpleNodeCompare.py
│ │ │ └── __init__.py
│ │ ├── modeling/
│ │ │ ├── CyLPModel.py
│ │ │ ├── __init__.py
│ │ │ └── test_modeling.py
│ │ ├── pivots/
│ │ │ ├── DantzigPivot.py
│ │ │ ├── DualDantzigPivot.py
│ │ │ ├── DualPivotPythonBase.py
│ │ │ ├── LIFOPivot.py
│ │ │ ├── MostFrequentPivot.py
│ │ │ ├── PivotPythonBase.py
│ │ │ ├── PositiveEdgePivot.py
│ │ │ ├── PositiveEdgeWolfePivot.py
│ │ │ ├── WolfePivot.py
│ │ │ ├── WolfePivotPE.py
│ │ │ └── __init__.py
│ │ ├── test_PySolve.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── readSetcovering.py
│ │ ├── sparseUtil.py
│ │ └── util.py
│ └── tests/
│ ├── __init__.py
│ ├── test_CyClpSimplex.py
│ ├── test_CyClpSimplex_CyLPModel.py
│ ├── test_CyCoinIndexedVector.py
│ ├── test_CyCoinModel.py
│ ├── test_CyCoinMpsIO.py
│ ├── test_CySolve.py
│ ├── test_IO.py
│ ├── test_IndexFactory.py
│ ├── test_MIP.py
│ ├── test_QP.py
│ ├── test_argWeightedMax.py
│ ├── test_sparseUtil.py
│ └── test_warmStart.py
├── fixBinaries.py
├── pyproject.toml
└── setup.py
================================================
FILE CONTENTS
================================================
================================================
FILE: .coin-or/projDesc.xml
================================================
CyLP
CyLP
CyLP is a Python interface to COIN-OR's Linear and mixed-integer program solvers (CLP, CBC, and CGL).
CyLP's unique feature is that one can use it to alter the solution process of the solvers from within Python.
For example, one may define cut generators, branch-and-bound strategies, and primal/dual Simplex pivot rules completely in Python.
A Python interface to Cbc, Clp, and Cgl.
Mehdi Towhidi, mehdi dot towhidi at kronos dot com
https://github.com/coin-or/CyLP
Common Public License 1.0
https://opensource.org/licenses/cpl1.0.php
Cbc
Clp
Cgl
Numpy
http://www.numpy.org
Required
Scipy
http://www.scipy.org
Required
Python
Abandoned
5
Interfaces
Optimization deterministic linear continuous
Optimization deterministic linear discrete
http://mpy.github.io/CyLPdoc
https://github.com/coin-or/CyLP/releases
https://pypi.org/project/cylp/
================================================
FILE: .github/workflows/ci-cvxpy.yml
================================================
name: Integration testing with CVXPY
on:
pull_request:
types: [reopened, opened, synchronize]
push:
workflow_dispatch:
concurrency:
# Cancel previous runs of this workflow for the same branch
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
strategy:
fail-fast: false
matrix:
os: [macos-latest] # Add ubuntu-latest when cvxpy pytest issue is solved
numpy: [numpy] # We used to also test version 1.26.0, but now cvxpy requires numpy 2
runs-on: ${{ matrix.os }}
steps:
- uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install CBC
run: |
brew install coin-or-tools/coinor/cbc
echo "PKG_CONFIG_PATH=$(brew --prefix)/opt/cbc/lib/pkgconfig:$(brew --prefix)/opt/clp/lib/pkgconfig:$(brew --prefix)/opt/cgl/lib/pkgconfig:$(brew --prefix)/opt/osi/lib/pkgconfig:$(brew --prefix)/opt/coinutils/lib/pkgconfig:$PKG_CONFIG_PATH" >> $GITHUB_ENV
- name: Job context
run: |
echo "::group::macos context"
system_profiler SPSoftwareDataType
echo "::endgroup::"
python -V
echo "::group::brew cbc info"
brew info coin-or-tools/coinor/cbc
echo "::endgroup::"
- uses: actions/checkout@v2
with:
path: cylp
- name: Install CyLP
run: |
python3 -m pip install --break-system-packages ./cylp
- name: Check out CVXPY
uses: actions/checkout@v2
with:
repository: cvxpy/cvxpy
path: cvxpy
- name: Install CVXPY
run: |
python3 -m pip install --break-system-packages ./cvxpy
- name: Test CVXPY
run: |
python3 -m pip install --break-system-packages pytest hypothesis ${{ matrix.numpy }}
python -m pytest --pyargs cvxpy.tests
================================================
FILE: .github/workflows/cibuildwheel.yml
================================================
name: Build and upload to PyPI
# taken from https://github.com/pypa/cibuildwheel/blob/main/examples/github-deploy.yml
# Build on every branch push, tag push, and pull request change:
# on: [push, pull_request]
# Alternatively, to publish when a (published) GitHub Release is created, use the following:
on:
#push:
pull_request:
release:
types:
- published
concurrency:
# Cancel previous runs of this workflow for the same branch
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_wheels:
name: Build wheels on ${{ matrix.os }}, arch ${{ matrix.arch }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
arch: x86_64
- os: ubuntu-latest
arch: i686
- os: ubuntu-24.04-arm
arch: aarch64
- os: macos-15-intel
arch: x86_64
- os: macos-latest
arch: arm64
env:
CIBW_ARCHS: ${{ matrix.arch }}
steps:
- uses: actions/checkout@v4
- name: Build wheels
uses: pypa/cibuildwheel@v3.2.1
- uses: actions/upload-artifact@v4
with:
name: cibuildwheel-${{ matrix.os }}-${{ matrix.arch }}
path: ./wheelhouse/*.whl
build_sdist:
name: Build source distribution
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build sdist
run: pipx run build --sdist
- uses: actions/upload-artifact@v4
with:
name: cibuildwheel-sdist
path: dist/*.tar.gz
upload_pypi:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/cylp
permissions:
id-token: write
# upload to PyPI on every tag starting with 'v'
# if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v')
# alternatively, to publish when a GitHub Release is created, use the following rule:
if: github.event_name == 'release' && github.event.action == 'published'
steps:
- uses: actions/download-artifact@v4
with:
pattern: "cibuildwheel-*"
path: dist
merge-multiple: true
- uses: pypa/gh-action-pypi-publish@release/v1
upload_release_assets:
needs: [build_wheels, build_sdist]
runs-on: ubuntu-latest
if: ${{ github.event_name == 'release'}}
steps:
- name: Upload artifacts to release
uses: alexellis/upload-assets@0.4.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
asset_paths: '["./wheelhouse/*.whl"]'
================================================
FILE: .github/workflows/release.yml
================================================
---
name: Create Release
on:
push:
tags:
- 'v*'
jobs:
create_release:
runs-on: ubuntu-latest
steps:
- uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.TKRALPHS_RELEASE }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
================================================
FILE: .gitignore
================================================
*.swp
*~
.DS_Store
*.o
*.so
*.pyc
*.pyd
build/
MANIFEST
CyLP.egg*
cylp.egg-info
setup.cfg
dist/
wheelhouse/
================================================
FILE: AUTHORS
================================================
Mehdi Towhidi (mehdi.towhidi@gerad.ca)
Dominique Orban (dominique.orban@gerad.ca)
================================================
FILE: LICENSE
================================================
Eclipse Public License - v 2.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
LICENSE (“AGREEMENT”). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
“Contribution” means:
a) in the case of the initial Contributor, the initial content Distributed
under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from and
are Distributed by that particular Contributor. A Contribution
“originates” from a Contributor if it was added to the Program by such
Contributor itself or anyone acting on such Contributor's behalf.
Contributions do not include changes or additions to the Program that
are not Modified Works. “Contributor” means any person or entity that
Distributes the Program.
“Licensed Patents” mean patent claims licensable by a Contributor which are
necessarily infringed by the use or sale of its Contribution alone or when
combined with the Program.
“Program” means the Contributions Distributed in accordance with this
Agreement.
“Recipient” means anyone who receives the Program under this Agreement or
any Secondary License (as applicable), including Contributors.
“Derivative Works” shall mean any work, whether in Source Code or other
form, that is based on (or derived from) the Program and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship.
“Modified Works” shall mean any work in Source Code or other form that
results from an addition to, deletion from, or modification of the contents
of the Program, including, for purposes of clarity any new file in Source
Code form that contains any contents of the Program. Modified Works shall
not include works that contain only declarations, interfaces, types,
classes, structures, or files of the Program solely in each case in order
to link to, bind by name, or subclass the Program or Modified Works
thereof.
“Distribute” means the acts of a) distributing or b) making available in
any manner that enables the transfer of a copy.
“Source Code” means the form of a Program preferred for making
modifications, including but not limited to software source code,
documentation source, and configuration files.
“Secondary License” means either the GNU General Public License, Version
2.0, or any later versions of that license, including any exceptions or
additional permissions as identified by the initial Contributor.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free copyright license to
reproduce, prepare Derivative Works of, publicly display, publicly perform,
Distribute and sublicense the Contribution of such Contributor, if any, and
such Derivative Works.
b) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free patent license under
Licensed Patents to make, use, sell, offer to sell, import and otherwise
transfer the Contribution of such Contributor, if any, in Source Code or
other form. This patent license shall apply to the combination of the
Contribution and the Program if, at the time the Contribution is added by
the Contributor, such addition of the Contribution causes such combination
to be covered by the Licensed Patents. The patent license shall not apply
to any other combinations which include the Contribution. No hardware per
se is licensed hereunder.
c) Recipient understands that although each Contributor grants the licenses
to its Contributions set forth herein, no assurances are provided by any
Contributor that the Program does not infringe the patent or other
intellectual property rights of any other entity. Each Contributor
disclaims any liability to Recipient for claims brought by any other entity
based on infringement of intellectual property rights or otherwise. As a
condition to exercising the rights and licenses granted hereunder, each
Recipient hereby assumes sole responsibility to secure any other
intellectual property rights needed, if any. For example, if a third party
patent license is required to allow Recipient to Distribute the Program, it
is Recipient's responsibility to acquire that license before distributing
the Program.
d) Each Contributor represents that to its knowledge it has sufficient
copyright rights in its Contribution, if any, to grant the copyright
license set forth in this Agreement.
e) Notwithstanding the terms of any Secondary License, no Contributor makes
additional grants to any Recipient (other than those set forth in this
Agreement) as a result of such Recipient's receipt of the Program under the
terms of a Secondary License (if permitted under the terms of Section 3).
3. REQUIREMENTS
3.1 If a Contributor Distributes the Program in any form, then:
a) the Program must also be made available as Source Code, in accordance
with section 3.2, and the Contributor must accompany the Program with a
statement that the Source Code for the Program is available under this
Agreement, and informs Recipients how to obtain it in a reasonable manner
on or through a medium customarily used for software exchange; and
b) the Contributor may Distribute the Program under a license different
than this Agreement, provided that such license:
i) effectively disclaims on behalf of all other Contributors all
warranties and conditions, express and implied, including warranties or
conditions of title and non-infringement, and implied warranties or
conditions of merchantability and fitness for a particular purpose;
ii) effectively excludes on behalf of all other Contributors all
liability for damages, including direct, indirect, special, incidental
and consequential damages, such as lost profits;
iii) does not attempt to limit or alter the recipients' rights in the
Source Code under section 3.2; and
iv) requires any subsequent distribution of the Program by any party to
be under a license that satisfies the requirements of this section 3.
3.2 When the Program is Distributed as Source Code:
a) it must be made available under this Agreement, or if the Program (i)
is combined with other material in a separate file or files made available
under a Secondary License, and (ii) the initial Contributor attached to
the Source Code the notice described in Exhibit A of this Agreement, then
the Program may be made available under the terms of such Secondary
Licenses, and
b) a copy of this Agreement must be included with each copy of the
Program.
3.3 Contributors may not remove or alter any copyright, patent, trademark,
attribution notices, disclaimers of warranty, or limitations of liability
(‘notices’) contained within the Program from any copy of the Program which
they Distribute, provided that Contributors may add their own appropriate
notices.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities with
respect to end users, business partners and the like. While this license is
intended to facilitate the commercial use of the Program, the Contributor who
includes the Program in a commercial product offering should do so in a manner
which does not create potential liability for other Contributors. Therefore,
if a Contributor includes the Program in a commercial product offering, such
Contributor (“Commercial Contributor”) hereby agrees to defend and indemnify
every other Contributor (“Indemnified Contributor”) against any losses,
damages and costs (collectively “Losses”) arising from claims, lawsuits and
other legal actions brought by a third party against the Indemnified
Contributor to the extent caused by the acts or omissions of such Commercial
Contributor in connection with its distribution of the Program in a commercial
product offering. The obligations in this section do not apply to any claims
or Losses relating to any actual or alleged intellectual property
infringement. In order to qualify, an Indemnified Contributor must: a)
promptly notify the Commercial Contributor in writing of such claim, and b)
allow the Commercial Contributor to control, and cooperate with the Commercial
Contributor in, the defense and any related settlement negotiations. The
Indemnified Contributor may participate in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial product
offering, Product X. That Contributor is then a Commercial Contributor. If
that Commercial Contributor then makes performance claims, or offers
warranties related to Product X, those performance claims and warranties are
such Commercial Contributor's responsibility alone. Under this section, the
Commercial Contributor would have to defend claims against the other
Contributors related to those performance claims and warranties, and if a
court requires any other Contributor to pay any damages as a result, the
Commercial Contributor must pay those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT PERMITTED
BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN “AS IS” BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING,
WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely
responsible for determining the appropriateness of using and distributing the
Program and assumes all risks associated with its exercise of rights under
this Agreement, including but not limited to the risks and costs of program
errors, compliance with applicable laws, damage to or loss of data, programs
or equipment, and unavailability or interruption of operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT PERMITTED
BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY
LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of the
remainder of the terms of this Agreement, and without further action by the
parties hereto, such provision shall be reformed to the minimum extent
necessary to make such provision valid and enforceable.
If Recipient institutes patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Program itself
(excluding combinations of the Program with other software or hardware)
infringes such Recipient's patent(s), then such Recipient's rights granted
under Section 2(b) shall terminate as of the date such litigation is filed.
All Recipient's rights under this Agreement shall terminate if it fails to
comply with any of the material terms or conditions of this Agreement and does
not cure such failure in a reasonable period of time after becoming aware of
such noncompliance. If all Recipient's rights under this Agreement terminate,
Recipient agrees to cease use and distribution of the Program as soon as
reasonably practicable. However, Recipient's obligations under this Agreement
and any licenses granted by Recipient relating to the Program shall continue
and survive.
Everyone is permitted to copy and distribute copies of this Agreement, but in
order to avoid inconsistency the Agreement is copyrighted and may only be
modified in the following manner. The Agreement Steward reserves the right to
publish new versions (including revisions) of this Agreement from time to
time. No one other than the Agreement Steward has the right to modify this
Agreement. The Eclipse Foundation is the initial Agreement Steward. The
Eclipse Foundation may assign the responsibility to serve as the Agreement
Steward to a suitable separate entity. Each new version of the Agreement will
be given a distinguishing version number. The Program (including
Contributions) may always be Distributed subject to the version of the
Agreement under which it was received. In addition, after a new version of the
Agreement is published, Contributor may elect to Distribute the Program
(including its Contributions) under the new version.
Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives
no rights or licenses to the intellectual property of any Contributor under
this Agreement, whether expressly, by implication, estoppel or otherwise. All
rights in the Program not expressly granted under this Agreement are reserved.
Nothing in this Agreement is intended to be enforceable by any entity that is
not a Contributor or Recipient. No third-party beneficiary rights are created
under this Agreement.
================================================
FILE: MANIFEST.in
================================================
include README.rst
include AUTHORS
include LICENSE
include cylp/VERSION
recursive-include cylp/cpp *.hpp *.h *.cpp
================================================
FILE: README.rst
================================================
CyLP
====
CyLP is a Python interface to COIN-OR’s Linear and mixed-integer program solvers
(CLP, CBC, and CGL). CyLP’s unique feature is that you can use it to alter the
solution process of the solvers from within Python. For example, you may
define cut generators, branch-and-bound strategies, and primal/dual Simplex
pivot rules completely in Python.
You may read your LP from an mps file or use the CyLP’s easy modeling
facility. Please find examples in the `documentation
`_.
Docker
======
If you're comfortable with Docker, you can get started right away with the container
available on Dockerhub that comes with CyLP pre-installed.
https://hub.docker.com/repository/docker/coinor/cylp
Otherwise, read on.
Prerequisites and installation
==============================
On Windows: Installation as a binary wheel
------------------------------------------
On Windows, a binary wheel is available and it is not necessary to install Cbc.
Just do::
$ python -m pip install cylp
On Linux/macOS: Installation as a binary wheel
---------------------------------------------------------
Binary wheels are available for Linux and some versions of OS X
for some versions of Python. To see if there is a wheel available
for your platform, you can browse
https://pypi.org/project/cylp/#files
or just try::
$ python -m pip install cylp
In case this fails, it is most likely that there is no wheel for your platform.
In particular, there are no wheels for MacOS running on Apple Silicon.
If you are on Linux, this can probably be addressed by switching to
a supported Python version with, e.g., conda::
$ conda create -n cylp python=3.9
$ conda activate cylp
If all else fails, it is easy to install from source, but Cbc must be
installed first, as detailed below. The easiest route for this is to use
conda.
On Linux/macOS with conda: Installation from source
---------------------------------------------------
To install from source, you will need to install binaries for Cbc or also build Cbc from source.
The version should be 2.10 (recommended) or earlier
(current master branch of Cbc will not work with this version of CyLP).
The following commands will create and activate a new conda environment with all
these prerequisites installed::
$ conda create -n cylp coin-or-cbc cython numpy pkg-config scipy -c conda-forge
$ conda activate cylp
Now you can install CyLP from PyPI::
$ pip install --no-build-isolation cylp
(The option `--no-build-isolation` ensures that `cylp` uses the Python packages
installed by conda in the build phase.)
Alternatively, if you have cloned CyLP from GitHub::
$ pip install --no-build-isolation .
On Linux/macOS with pip: Installation from source
-------------------------------------------------
You will need to install binaries for Cbc. The version should be 2.10 (recommended) or earlier
(current master branch of Cbc will not work with this version of CyLP).
You can install Cbc by either by installing with your system's package manager, by downloading pre-built binaries,
or by building yourself from source using `coinbrew `_.
1. To install Cbc in Linux, the easiest way is to use a package manager. Install
`coinor-libcbc-dev` on Ubuntu/Debian or `coin-or-Cbc-devel` on Fedora, or the
`corresponding package on your distribution
`_.
#. On macOS, it is easiest to install Cbc with homebrew:
``$ brew install cbc pkg-config``
You should no longer need to build Cbc from source on any platform unless for some reason, none of the
above recipes applies to you. If you do need to build from source, please go to the `Cbc `_
project page and follow the instructions there. After building and installing, make sure to
either set the `COIN_INSTALL_DIR` variable to point to the installation or set `PKG_CONFIG_PATH` to point to
the directory where the `.pc` files are installed. You may also need to set either `LD_LIBRARY_PATH` (Linux)
or `DYLD_LIBRARY_PATH` (macOS).
Next, build and install CyLP::
$ python -m pip install cylp
This will build CyLP install the runtime dependencies (`install-requires`),
NumPy and `SciPy ` and build and install CyLP.
Testing your installation
=========================
Optional step:
If you want to run the doctests (i.e. ``make doctest`` in the ``doc`` directory)
you should also define::
$ export CYLP_SOURCE_DIR=/Path/to/cylp
Now you can use CyLP in your python code. For example::
>>> from cylp.cy import CyClpSimplex
>>> s = CyClpSimplex()
>>> s.readMps('../input/netlib/adlittle.mps')
0
>>> s.initialSolve()
'optimal'
>>> round(s.objectiveValue, 3)
225494.963
Or simply go to CyLP and run::
$ python -m unittest discover
to run all CyLP unit tests (this is currently broken).
Modeling Example
================
Here is an example of how to model with CyLP's modeling facility::
import numpy as np
from cylp.cy import CyClpSimplex
from cylp.py.modeling.CyLPModel import CyLPArray
s = CyClpSimplex()
# Add variables
x = s.addVariable('x', 3)
y = s.addVariable('y', 2)
# Create coefficients and bounds
A = np.matrix([[1., 2., 0],[1., 0, 1.]])
B = np.matrix([[1., 0, 0], [0, 0, 1.]])
D = np.matrix([[1., 2.],[0, 1]])
a = CyLPArray([5, 2.5])
b = CyLPArray([4.2, 3])
x_u= CyLPArray([2., 3.5])
# Add constraints
s += A * x <= a
s += 2 <= B * x + D * y <= b
s += y >= 0
s += 1.1 <= x[1:3] <= x_u
# Set the objective function
c = CyLPArray([1., -2., 3.])
s.objective = c * x + 2 * y.sum()
# Solve using primal Simplex
s.primal()
print(s.primalVariableSolution['x'])
This is the expected output::
Clp0006I 0 Obj 1.1 Primal inf 2.8999998 (2) Dual inf 5.01e+10 (5) w.o. free dual inf (4)
Clp0006I 5 Obj 1.3
Clp0000I Optimal - objective value 1.3
[ 0.2 2. 1.1]
Documentation
=============
You may access CyLP's documentation:
1. *Online* : Please visit http://coin-or.github.io/CyLP/
2. *Offline* : To install CyLP's documentation in your repository, you need
Sphinx (https://www.sphinx-doc.org/). You can generate the documentation by
going to cylp/doc and run ``make html`` or ``make latex`` and access the
documentation under cylp/doc/build. You can also run ``make doctest`` to
perform all the doctest.
Who uses CyLP
=============
The following software packages make use of CyLP:
#. `CVXPY `_, a Python-embedded modeling language for
convex optimization problems, uses CyLP for interfacing to CBC, which is one
of the `supported mixed-integer solvers
`_.
CyLP has been used in a wide range of practical and research fields. Some of the users include:
#. `PyArt `_, The Python ARM Radar Toolkit,
used by Atmospheric Radiation Measurement (U.S. Department of energy).
#. Meteorological Institute University of Bonn.
#. Sherbrooke university hospital (Centre hospitalier universitaire de Sherbrooke): CyLP is used for nurse scheduling.
#. Maisonneuve-Rosemont hospital (L'hôpital HMR): CyLP is used for physician scheduling with preferences.
#. Lehigh University: CyLP is used to teach mixed-integer cuts.
#. IBM T. J. Watson research center
#. Saarland University, Germany
================================================
FILE: cylp/VERSION
================================================
0.93.0
================================================
FILE: cylp/__init__.py
================================================
import os
from os.path import realpath, join
currentDir = os.path.dirname(realpath(__file__))
with open(join(currentDir, 'VERSION')) as f:
__version__ = f.read().strip()
================================================
FILE: cylp/cpp/.gitignore
================================================
back/
================================================
FILE: cylp/cpp/CbcCompareUser.cpp
================================================
// Copyright (C) 2004, International Business Machines
// Corporation and others. All Rights Reserved.
#if defined(_MSC_VER)
// Turn off compiler warning about long names
# pragma warning(disable:4786)
#endif
#include
#include
#include
//#define CBC_DEBUG
#include "CbcMessage.hpp"
#include "CbcModel.hpp"
#include "CbcTree.hpp"
#include "CbcCompareUser.hpp"
#include "CoinError.hpp"
#include "CoinHelperFunctions.hpp"
/** Default Constructor
*/
CbcCompareUser::CbcCompareUser(PyObject* obj, runTest_t runTest,
runNewSolution_t runNewSolution,
runEvery1000Nodes_t runEvery1000Nodes)
: CbcCompareBase(),
weight_(-1.0),
saveWeight_(0.0),
numberSolutions_(0),
count_(0),
treeSize_(0),
obj(obj),
runTest(runTest),
runNewSolution(runNewSolution),
runEvery1000Nodes(runEvery1000Nodes)
{
test_=this;
}
// Constructor with weight
//CbcCompareUser::CbcCompareUser (double weight)
// : CbcCompareBase(),
// weight_(weight) ,
// saveWeight_(0.0),
// numberSolutions_(0),
// count_(0),
// treeSize_(0)
//{
// test_=this;
//}
// Copy constructor
CbcCompareUser::CbcCompareUser ( const CbcCompareUser & rhs)
:CbcCompareBase(rhs)
{
weight_=rhs.weight_;
saveWeight_ = rhs.saveWeight_;
numberSolutions_=rhs.numberSolutions_;
count_ = rhs.count_;
treeSize_ = rhs.treeSize_;
runTest = rhs.runTest;
runNewSolution = rhs.runNewSolution;
runEvery1000Nodes = rhs.runEvery1000Nodes;
obj = rhs.obj;
}
// Clone
CbcCompareBase *
CbcCompareUser::clone() const
{
return new CbcCompareUser(*this);
}
// Assignment operator
CbcCompareUser &
CbcCompareUser::operator=( const CbcCompareUser& rhs)
{
if (this!=&rhs) {
CbcCompareBase::operator=(rhs);
weight_=rhs.weight_;
saveWeight_ = rhs.saveWeight_;
numberSolutions_=rhs.numberSolutions_;
count_ = rhs.count_;
treeSize_ = rhs.treeSize_;
runTest = rhs.runTest;
runNewSolution = rhs.runNewSolution;
runEvery1000Nodes = rhs.runEvery1000Nodes;
obj = rhs.obj;
}
return *this;
}
// Destructor
CbcCompareUser::~CbcCompareUser ()
{
}
// Returns true if y better than x
bool
CbcCompareUser::test (CbcNode * x, CbcNode * y)
{
return this->runTest(this->obj, (ICbcNode*) x,(ICbcNode*) y);
}
// This allows method to change behavior as it is called
// after each solution
bool
CbcCompareUser::newSolution(CbcModel * model,
double objectiveAtContinuous,
int numberInfeasibilitiesAtContinuous)
{
return this->runNewSolution(this->obj, (ICbcModel*)model,
objectiveAtContinuous,
numberInfeasibilitiesAtContinuous);
}
// This allows method to change behavior
bool
CbcCompareUser::every1000Nodes(CbcModel * model, int numberNodes)
{
return this->runEvery1000Nodes(this->obj,
(ICbcModel*)model,
numberNodes);
}
// Returns true if wants code to do scan with alternate criterion
bool
CbcCompareUser::fullScan() const
{
return false;
}
// This is alternate test function
bool
CbcCompareUser::alternateTest (CbcNode * x, CbcNode * y)
{
// not used
abort();
return false;
}
================================================
FILE: cylp/cpp/CbcCompareUser.hpp
================================================
// Copyright (C) 2002, International Business Machines
// Corporation and others. All Rights Reserved.
#ifndef CbcCompareUser_H
#define CbcCompareUser_H
//#define NPY_NO_DEPRECATED_API
#include "CbcNode.hpp"
#include "CbcCompareBase.hpp"
#include "ICbcModel.hpp"
#include "ICbcNode.hpp"
#include "Python.h"
#include
//class ICbcModel;
/* This is an example of a more complex rule with data
It is default after first solution
If weight is 0.0 then it is computed to hit first solution
less 2%
*/
class CbcCompareUser : public CbcCompareBase {
public:
// Default Constructor
CbcCompareUser (PyObject* obj, runTest_t runTest,
runNewSolution_t runNewSolution,
runEvery1000Nodes_t runEvery1000Nodes) ;
// Constructor with weight
//CbcCompareUser (double weight);
// Copy constructor
CbcCompareUser ( const CbcCompareUser &rhs);
// Assignment operator
CbcCompareUser & operator=( const CbcCompareUser& rhs);
/// Clone
virtual CbcCompareBase * clone() const;
~CbcCompareUser() ;
/* This returns true if weighted value of node y is less than
weighted value of node x */
// Returns true if y better than x
virtual bool test (CbcNode * x, CbcNode * y) ;
/// This is alternate test function
virtual bool alternateTest (CbcNode * x, CbcNode * y);
// This allows method to change behavior as it is called
// after each solution
virtual bool newSolution(CbcModel * model,
double objectiveAtContinuous,
int numberInfeasibilitiesAtContinuous) ;
/// Returns true if wants code to do scan with alternate criterion
virtual bool fullScan() const;
// This allows method to change behavior
// Return true if want tree re-sorted
virtual bool every1000Nodes(CbcModel * model,int numberNodes);
/* if weight == -1.0 then depth first (before solution)
if -2.0 then do breadth first just for first 1000 nodes
*/
inline double getWeight() const
{ return weight_;}
inline void setWeight(double weight)
{ weight_ = weight;}
protected:
// Weight for each infeasibility
double weight_;
// Weight for each infeasibility - computed from solution
double saveWeight_;
// Number of solutions
int numberSolutions_;
// count
mutable int count_;
// Tree size (at last check)
int treeSize_;
runTest_t runTest;
runEvery1000Nodes_t runEvery1000Nodes;
runNewSolution_t runNewSolution;
PyObject* obj;
};
#endif
================================================
FILE: cylp/cpp/CyClpSimplex_api.h
================================================
#ifndef __PYX_HAVE_API__CyClpSimplex
#define __PYX_HAVE_API__CyClpSimplex
#include "Python.h"
static void (*__pyx_f_12CyClpSimplex_CyPostPrimalRow)(IClpSimplex *) = 0;
#define CyPostPrimalRow __pyx_f_12CyClpSimplex_CyPostPrimalRow
static int (*__pyx_f_12CyClpSimplex_CyPivotIsAcceptable)(IClpSimplex *) = 0;
#define CyPivotIsAcceptable __pyx_f_12CyClpSimplex_CyPivotIsAcceptable
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = PyUnicode_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#ifndef __PYX_HAVE_RT_ImportFunction
#define __PYX_HAVE_RT_ImportFunction
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString(module, (char *)"__pyx_capi__");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, funcname);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%s does not export expected C function %s",
PyModule_GetName(module), funcname);
goto bad;
}
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C function %s.%s has wrong signature (expected %s, got %s)",
PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
goto bad;
}
tmp.p = PyCapsule_GetPointer(cobj, sig);
*f = tmp.fp;
if (!(*f))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
static int import_CyClpSimplex(void) {
PyObject *module = 0;
module = __Pyx_ImportModule("CyClpSimplex");
if (!module) goto bad;
if (__Pyx_ImportFunction(module, "CyPostPrimalRow", (void (**)(void))&__pyx_f_12CyClpSimplex_CyPostPrimalRow, "void (IClpSimplex *)") < 0) goto bad;
if (__Pyx_ImportFunction(module, "CyPivotIsAcceptable", (void (**)(void))&__pyx_f_12CyClpSimplex_CyPivotIsAcceptable, "int (IClpSimplex *)") < 0) goto bad;
Py_DECREF(module); module = 0;
return 0;
bad:
Py_XDECREF(module);
return -1;
}
#endif /* !__PYX_HAVE_API__CyClpSimplex */
================================================
FILE: cylp/cpp/ICbc.cpp
================================================
#include "ICbc.hpp"
ICbcModel* CbcSolveMIP(IClpSimplex* clpModel, PyObject* obj,
runTest_t runTest, runNewSolution_t runNewSolution,
runEvery1000Nodes_t runEvery1000Nodes){
OsiClpSolverInterface solver1(clpModel);
solver1.initialSolve();
ICbcModel* model = new ICbcModel(solver1);
CbcCompareUser compare(obj,
runTest,
runNewSolution,
runEvery1000Nodes);
model->setNodeComparison(compare);
model->branchAndBound();
return model;
}
ICbcModel* CbcSolveMIP(IClpSimplex* clpModel){
OsiClpSolverInterface solver1(clpModel);
solver1.initialSolve();
ICbcModel* model = new ICbcModel(solver1);
model->branchAndBound();
return model;
}
================================================
FILE: cylp/cpp/ICbc.hpp
================================================
//#define NPY_NO_DEPRECATED_API
#include "CbcConfig.h"
// For Branch and bound
#include "OsiSolverInterface.hpp"
#include "ICbcModel.hpp"
#include "OsiClpSolverInterface.hpp"
#include "ClpPresolve.hpp"
//#include "CbcCompareUser.hpp"
#include "CglProbing.hpp"
#include "IClpSimplex.hpp"
#include "CbcCompareUser.hpp"
ICbcModel* CbcSolveMIP(IClpSimplex* model,
PyObject* obj,
runTest_t runTest,
runNewSolution_t runNewSolution,
runEvery1000Nodes_t runEvery1000Nodes);
ICbcModel* CbcSolveMIP(IClpSimplex* model);
================================================
FILE: cylp/cpp/ICbcModel.cpp
================================================
#include "ICbcModel.hpp"
#include "CbcCompareUser.hpp"
#include "CbcSolver.hpp"
PyObject* ICbcModel::getPrimalVariableSolution(){
_import_array();
npy_intp dims = this->solver()->getNumCols();
double* d = (double*)(this->solver()->getColSolution());
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, d );
return Arr;
}
ICbcModel::ICbcModel(OsiClpSolverInterface& osiint):CbcModel(osiint){
_import_array();
}
void ICbcModel::setNodeCompare(PyObject* obj,
runTest_t runTest, runNewSolution_t runNewSolution,
runEvery1000Nodes_t runEvery1000Nodes){
CbcCompareUser compare(obj, runTest,
runNewSolution,runEvery1000Nodes);
setNodeComparison(compare);
}
int ICbcModel::cbcMain(){
// initialize
int returnCode = -1;
int logLevel = this->logLevel();
const char* argv[] = {"ICbcModel", "-solve","-quit"};
CbcMain0(*this);
this->setLogLevel(logLevel);
return CbcMain1(3, argv, *this);
//const char* argv = "-solve -quit";
//CbcSolverUsefulData solverData;
//CbcMain0(*this, solverData);
//this->setLogLevel(logLevel);
//return CbcMain1(3, argv, *this, NULL, solverData);
}
================================================
FILE: cylp/cpp/ICbcModel.hpp
================================================
#ifndef ICbcModel_H
#define ICbcModel_H
//#define NPY_NO_DEPRECATED_API
//#include "ClpModel.hpp"
#include "ClpPackedMatrix.hpp"
#include "Python.h"
#include
#include "CoinFinite.hpp"
#include "CoinPragma.hpp"
#include "CbcModel.hpp"
#include "Python.h"
#include
#include "OsiClpSolverInterface.hpp"
#include "ICbcNode.hpp"
//#include "CbcSolver.hpp"
//#include "CbcCompareUser.hpp"
class ICbcModel;
typedef int (*runTest_t)(void *instance, ICbcNode * x, ICbcNode * y);
typedef bool (*runNewSolution_t)(void *instance,ICbcModel * model,
double objectiveAtContinuous,
int numberInfeasibilitiesAtContinuous);
typedef int (*runEvery1000Nodes_t)(void *instance,
ICbcModel * model,int numberNodes);
class ICbcModel : public CbcModel{
public:
ICbcModel(OsiClpSolverInterface&);
PyObject * getPrimalVariableSolution();
void setNodeCompare(PyObject* obj,
runTest_t runTest, runNewSolution_t runNewSolution,
runEvery1000Nodes_t runEvery1000Nodes);
int cbcMain();
};
#endif
================================================
FILE: cylp/cpp/ICbcNode.cpp
================================================
#include "ICbcNode.hpp"
bool ICbcNode::breakTie(ICbcNode* y){
ICbcNode* x = this;
assert (x);
assert (y);
return (x->nodeNumber()>y->nodeNumber());
}
================================================
FILE: cylp/cpp/ICbcNode.hpp
================================================
#ifndef ICbcNode_H
#define ICbcNode_H
//#define NPY_NO_DEPRECATED_API
//#include "ClpModel.hpp"
#include "ClpPackedMatrix.hpp"
#include "Python.h"
#include
#include "CoinFinite.hpp"
#include "CoinPragma.hpp"
#include "CbcNode.hpp"
#include "Python.h"
#include
//#include "OsiClpSolverInterface.hpp"
class ICbcNode : public CbcNode{
public:
bool breakTie(ICbcNode* y);
};
#endif
================================================
FILE: cylp/cpp/ICglCutGeneratorBase.cpp
================================================
#include "ICglCutGeneratorBase.h"
void
CppCglCutGeneratorBase::generateCuts(const OsiSolverInterface & si, OsiCuts & cs,
const CglTreeInfo info)
{
//std::cout << "::Cy..Base::generateCuts()...\n";
if (this->obj && this->runGenerateCuts) {
this->runGenerateCuts(this->obj, &si, &cs, info);
return;
}
std::cout << "** generateCuts: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runGenerateCuts << "]\n";
}
CglCutGenerator * CppCglCutGeneratorBase::clone() const {
//std::cout << "::Cy..Base::clone()...\n";
if (this->obj && this->runCglClone) {
return this->runCglClone(this->obj);
}
std::cerr << "** clone: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runCglClone << "]\n";
return NULL;
}
CppCglCutGeneratorBase::CppCglCutGeneratorBase(PyObject *obj, runGenerateCuts_t runGenerateCuts,
runCglClone_t runCglClone) :
obj(obj),
runCglClone(runCglClone),
runGenerateCuts(runGenerateCuts)
{
}
CppCglCutGeneratorBase::~CppCglCutGeneratorBase()
{
}
CppCglCutGeneratorBase::CppCglCutGeneratorBase(const CglCutGenerator & source):
CglCutGenerator(source),
obj(obj),
runCglClone(runCglClone),
runGenerateCuts(runGenerateCuts)
{
}
CppCglCutGeneratorBase::CppCglCutGeneratorBase():
CglCutGenerator(),
obj(obj),
runCglClone(runCglClone),
runGenerateCuts(runGenerateCuts)
{
}
================================================
FILE: cylp/cpp/ICglCutGeneratorBase.h
================================================
#include "Python.h"
#include
using namespace std;
#include "CglCutGenerator.hpp"
//#include "CoinIndexedVector.hpp"
//#include "IClpSimplex.hpp"
//#include "ClpSimplex.hpp"
//#include "ClpFactorization.hpp"
#include "OsiSolverInterface.hpp"
typedef CglCutGenerator* (*runCglClone_t)(void *instance);
typedef void (*runGenerateCuts_t)(void *instance,
const OsiSolverInterface *si, OsiCuts *cs, const CglTreeInfo info);
class CppCglCutGeneratorBase : public CglCutGenerator
{
public:
PyObject *obj;
runCglClone_t runCglClone;
runGenerateCuts_t runGenerateCuts;
//IClpSimplex model_;
CppCglCutGeneratorBase(PyObject *obj, runGenerateCuts_t ,
runCglClone_t );
virtual ~CppCglCutGeneratorBase();
CppCglCutGeneratorBase(const CglCutGenerator & source);
CppCglCutGeneratorBase();
virtual CglCutGenerator * clone() const;
virtual void generateCuts(const OsiSolverInterface & si, OsiCuts & cs,
const CglTreeInfo info = CglTreeInfo());
};
================================================
FILE: cylp/cpp/IClpDualRowPivotBase.cpp
================================================
#include "IClpDualRowPivotBase.h"
#include "ICoinIndexedVector.hpp"
int
CppClpDualRowPivotBase::pivotRow()
{
//std::cout << "::Cy..Base::pivotRow()...\n";
if (this->obj && this->runPivotRow) {
return this->runPivotRow(this->obj);
}
std::cerr << "** pivotRow: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runPivotRow << "]\n";
return -100;
}
ClpDualRowPivot * CppClpDualRowPivotBase::clone(bool copyData) const {
//std::cout << "::Cy..Base::clone()...\n";
if (this->obj && this->runDualPivotClone) {
return this->runDualPivotClone(this->obj,copyData);
}
std::cerr << "** clone: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runDualPivotClone << "]\n";
return NULL;
}
double CppClpDualRowPivotBase::updateWeights(CoinIndexedVector * input,
CoinIndexedVector * spare,
CoinIndexedVector * spare2,
CoinIndexedVector * updatedColumn) {
if (this->obj && this->runUpdateWeights) {
return this->runUpdateWeights(this->obj, input, spare, spare2, updatedColumn);
}
std::cerr << "** clone: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runUpdateWeights << "]\n";
return -1;
}
void CppClpDualRowPivotBase::updatePrimalSolution(
CoinIndexedVector * primalUpdate,
double primalRatio,
double & objectiveChange){
if (this->obj && this->runUpdatePrimalSolution) {
return this->runUpdatePrimalSolution(this->obj, primalUpdate,
primalRatio, &objectiveChange);
}
std::cerr << "** clone: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runUpdatePrimalSolution << "]\n";
return;
}
CppClpDualRowPivotBase::CppClpDualRowPivotBase(PyObject *obj,
runPivotRow_t runPivotRow,
runDualPivotClone_t runDualPivotClone,
runUpdateWeights_t runUpdateWeights,
runUpdatePrimalSolution_t runUpdatePrimalSolution) :
obj(obj),
runPivotRow(runPivotRow),
runDualPivotClone(runDualPivotClone),
runUpdateWeights(runUpdateWeights),
runUpdatePrimalSolution(runUpdatePrimalSolution)
{
}
CppClpDualRowPivotBase::~CppClpDualRowPivotBase()
{
}
void CppClpDualRowPivotBase::setModel(IClpSimplex* m)
{
ClpSimplex* s = static_cast(m);
model_ = s;
}
IClpSimplex* CppClpDualRowPivotBase::model()
{
return static_cast (model_);
}
================================================
FILE: cylp/cpp/IClpDualRowPivotBase.h
================================================
#include "Python.h"
#include
using namespace std;
#include "ClpDualRowPivot.hpp"
#include "CoinIndexedVector.hpp"
#include "IClpSimplex.hpp"
//#include "ClpSimplex.hpp"
#include "ClpFactorization.hpp"
typedef int (*runPivotRow_t)(void *instance);
typedef ClpDualRowPivot* (*runDualPivotClone_t)(void *instance, bool copyData);
typedef double (*runUpdateWeights_t)(void *instance,
CoinIndexedVector * input,
CoinIndexedVector * spare,
CoinIndexedVector * spare2,
CoinIndexedVector * updatedColumn);
typedef void (*runUpdatePrimalSolution_t)(void *instance,
CoinIndexedVector * input,
double theta,
double * changeInObjective);
class CppClpDualRowPivotBase : public ClpDualRowPivot
{
public:
PyObject *obj;
runPivotRow_t runPivotRow;
runDualPivotClone_t runDualPivotClone;
runUpdateWeights_t runUpdateWeights;
runUpdatePrimalSolution_t runUpdatePrimalSolution;
//IClpSimplex model_;
CppClpDualRowPivotBase(PyObject *obj, runPivotRow_t ,
runDualPivotClone_t , runUpdateWeights_t, runUpdatePrimalSolution_t );
virtual ~CppClpDualRowPivotBase();
virtual ClpDualRowPivot * clone(bool copyData = true) const;
//virtual void saveWeights(IClpSimplex * model,int mode);
virtual double updateWeights(CoinIndexedVector * input,
CoinIndexedVector * spare,
CoinIndexedVector * spare2,
CoinIndexedVector * updatedColumn);
virtual void updatePrimalSolution(CoinIndexedVector * input,
double theta,
double& changeInObjective);
virtual int pivotRow();
void setModel(IClpSimplex* m);
IClpSimplex* model();
};
================================================
FILE: cylp/cpp/IClpPackedMatrix.cpp
================================================
#include "IClpPackedMatrix.hpp"
void
IClpPackedMatrix::transposeTimesSubsetAll( IClpSimplex* model, int number,
const long long int * which,
const double * COIN_RESTRICT x, double * COIN_RESTRICT y,
const double * COIN_RESTRICT rowScale,
const double * COIN_RESTRICT columnScale,
double * COIN_RESTRICT spare) const
{
// get matrix data pointers
const int * row = matrix_->getIndices();
const CoinBigIndex * columnStart = matrix_->getVectorStarts();
const double * elementByColumn = matrix_->getElements();
if (!spare||!rowScale) {
if (rowScale) {
for (int jColumn=0;jColumn model->getNumCols()){
int jRow = iColumn - model->getNumCols();
value = x[jRow]* -1 *rowScale[jRow];
}
else{
for (j=start;j model->getNumCols()){
int jRow = iColumn - model->getNumCols();
value = x[jRow]* -1;
}
else{
for (j=start;jgetNumRows();
for (iRow=0;iRow
#include "CoinFinite.hpp"
#include "CoinPragma.hpp"
#include "IClpSimplex.hpp"
class IClpPackedMatrix : public ClpPackedMatrix{
public:
IClpPackedMatrix();
void transposeTimesSubsetAll(IClpSimplex* model, int number,
const long long int * which,
const double * COIN_RESTRICT x, double * y,
const double * rowScale,
const double * columnScale,
double * spare) const;
};
#endif
================================================
FILE: cylp/cpp/IClpPrimalColumnPivot.cpp
================================================
#include "IClpPrimalColumnPivot.h"
int CppClpPrimalColumnPivotBase::pivotColumn(CoinIndexedVector * updates,
CoinIndexedVector * spareRow1,
CoinIndexedVector * spareRow2,
CoinIndexedVector * spareColumn1,
CoinIndexedVector * spareColumn2)
{
std::cout << "PivotColumn should be implemented in a derived class\n";
return -100;
}
CppClpPrimalColumnPivotBase::CppClpPrimalColumnPivotBase(PyObject *obj, RunFct fct) :
obj(obj),
fct(fct)
{
}
CppClpPrimalColumnPivotBase::~CppClpPrimalColumnPivotBase()
{
}
void CppClpPrimalColumnPivotBase::saveWeights(ClpSimplex * model,int mode)
{
std::cout << "saveWeight should be implemented in a derived class\n";
}
CppClpPrimalColumnPivotBase* CppClpPrimalColumnPivotBase::clone(bool copyData) const
{
std::cout << "clone should be implemented in a derived class\n";
return 0;
}
================================================
FILE: cylp/cpp/IClpPrimalColumnPivot.h
================================================
#include "ClpPrimalColumnPivot.hpp"
#include "CoinIndexedVector.hpp"
#include "Python.h"
#include
typedef void (*RunFct)(void *instance);
class CppClpPrimalColumnPivotBase : public ClpPrimalColumnPivot
{
public:
PyObject *obj;
RunFct fct;
CppClpPrimalColumnPivotBase(PyObject *obj, RunFct fct);
virtual ~CppClpPrimalColumnPivotBase();
virtual int pivotColumn(CoinIndexedVector * updates,
CoinIndexedVector * spareRow1,
CoinIndexedVector * spareRow2,
CoinIndexedVector * spareColumn1,
CoinIndexedVector * spareColumn2);
virtual void saveWeights(ClpSimplex * model,int mode);
virtual CppClpPrimalColumnPivotBase* clone(bool copyData = true) const;
};
================================================
FILE: cylp/cpp/IClpPrimalColumnPivotBase.cpp
================================================
#include "IClpPrimalColumnPivotBase.h"
#include "ICoinIndexedVector.hpp"
int
CppClpPrimalColumnPivotBase::pivotColumn(CoinIndexedVector* updates, CoinIndexedVector* spareRow1,
CoinIndexedVector* spareRow2, CoinIndexedVector* spareColumn1,
CoinIndexedVector* spareColumn2 )
{
//std::cout << "::Cy..Base::pivotColumn()...\n";
if (this->obj && this->runPivotColumn) {
return this->runPivotColumn(this->obj, updates, spareRow1, spareRow2, spareColumn1, spareColumn2);
}
std::cerr << "** pivotColumn: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runPivotColumn << "]\n";
return -100;
}
ClpPrimalColumnPivot * CppClpPrimalColumnPivotBase::clone(bool copyData) const {
//std::cout << "::Cy..Base::clone()...\n";
if (this->obj && this->runClone) {
return this->runClone(this->obj,copyData);
}
std::cerr << "** clone: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runClone << "]\n";
return NULL;
}
void CppClpPrimalColumnPivotBase::saveWeights(ClpSimplex * model,int mode)
{
IClpSimplex* m = static_cast(model);
if (this->obj && this->runSaveWeights) {
this->runSaveWeights(this->obj,m, mode);
return;
}
std::cerr << "** saveWeights: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runSaveWeights << "]\n";
return;
}
CppClpPrimalColumnPivotBase::CppClpPrimalColumnPivotBase(PyObject *obj, runPivotColumn_t runPivotColumn,
runClone_t runClone, runSaveWeights_t runSaveWeights) :
obj(obj),
runPivotColumn(runPivotColumn),
runClone(runClone),
runSaveWeights(runSaveWeights)
{
}
CppClpPrimalColumnPivotBase::~CppClpPrimalColumnPivotBase()
{
}
void CppClpPrimalColumnPivotBase::setModel(IClpSimplex* m)
{
ClpSimplex* s = static_cast(m);
model_ = s;
}
IClpSimplex* CppClpPrimalColumnPivotBase::model()
{
return static_cast (model_);
}
================================================
FILE: cylp/cpp/IClpPrimalColumnPivotBase.h
================================================
#include "Python.h"
#include
using namespace std;
#include "ClpPrimalColumnPivot.hpp"
#include "CoinIndexedVector.hpp"
#include "IClpSimplex.hpp"
//#include "ClpSimplex.hpp"
#include "ClpFactorization.hpp"
typedef int (*runPivotColumn_t)(void *instance, CoinIndexedVector*,CoinIndexedVector*
,CoinIndexedVector*,CoinIndexedVector*,CoinIndexedVector*);
typedef ClpPrimalColumnPivot* (*runClone_t)(void *instance, bool copyData);
typedef void (*runSaveWeights_t)(void *instance, IClpSimplex * model,int mode);
class CppClpPrimalColumnPivotBase : public ClpPrimalColumnPivot
{
public:
PyObject *obj;
runPivotColumn_t runPivotColumn;
runClone_t runClone;
runSaveWeights_t runSaveWeights;
//IClpSimplex model_;
CppClpPrimalColumnPivotBase(PyObject *obj, runPivotColumn_t ,
runClone_t , runSaveWeights_t );
virtual ~CppClpPrimalColumnPivotBase();
virtual ClpPrimalColumnPivot * clone(bool copyData = true) const;
//virtual void saveWeights(IClpSimplex * model,int mode);
virtual void saveWeights(ClpSimplex * model,int mode);
virtual int pivotColumn(CoinIndexedVector * updates,
CoinIndexedVector * spareRow1,
CoinIndexedVector * spareRow2,
CoinIndexedVector * spareColumn1,
CoinIndexedVector * spareColumn2);
void setModel(IClpSimplex* m);
IClpSimplex* model();
};
================================================
FILE: cylp/cpp/IClpSimplex.cpp
================================================
#include "IClpSimplex.hpp"
#include "ClpSimplexDual.hpp"
//#include "IClpSimplexPrimal.hpp"
#include "IClpSimplexPrimal.hpp"
#include "ClpSimplexPrimal.hpp"
#include "IClpPackedMatrix.hpp"
#include "OsiClpSolverInterface.hpp"
#include
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include
int IClpSimplex::argWeightedMax(PyObject* arr, PyObject* arr_ind, PyObject* w, PyObject* w_ind){
//_import_array();
npy_intp w_ind_len = PyArray_DIM(reinterpret_cast(w_ind), 0);
if (w_ind_len == 0)
return -1; //return PyArray_ArgMax(reinterpret_cast(arr));
int wIsNum = false;
int wholeArray = false;
double w_num_val;
if (PyLong_Check(w)){
wIsNum = true;
w_num_val = PyLong_AsDouble(w);
}else if (PyFloat_Check(w)){
wIsNum = true;
w_num_val = PyFloat_AsDouble(w);
}else if (!PyArray_Check(w)){
PyErr_SetString(PyExc_ValueError,
"weights should be a number or a numpy array.");
return -1;
}
if (PyLong_Check(arr_ind) || PyFloat_Check(arr_ind)){
wholeArray = true;
}else if (!PyArray_Check(arr_ind)){
PyErr_SetString(PyExc_ValueError,
"arr_ind should be a number(meaning 1..len(arr) or a numpy array.");
return -1;
}
if (!PyArray_Check(arr) || !PyArray_Check(w_ind)){
PyErr_SetString(PyExc_ValueError,
"arr and w_ind should be numpy arrays.");
return -1;
}
PyObject* arr_it = PyArray_IterNew(arr);
npy_intp arr_len = PyArray_DIM(reinterpret_cast(arr), 0);
if (arr_len == 0)
return 0;
double maxVal;// = *(double*)PyArray_ITER_DATA(arr_it);
int maxInd;// = 0;
double curVal;
double curInd;
//int w_ind_val;
//double w_val;
//consider 4 cases:
//1- whole array, weight is a single number
//2- whole array, weights array
//3- arr_ind, weights number
//4- arr_ind, weights array
if (wholeArray){
maxVal = *(double*)PyArray_ITER_DATA(arr_it);
maxInd = 0;
PyObject* w_ind_it= PyArray_IterNew(w_ind);
int w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
if (wIsNum){
if (w_ind_val == 0){
maxVal *= w_num_val;
PyArray_ITER_NEXT(w_ind_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
}
PyArray_ITER_NEXT(arr_it);
for (int i = 1 ; i < arr_len ; i++){
curVal = *(double*)PyArray_ITER_DATA(arr_it);
if (w_ind_val == i){
curVal *= w_num_val;
PyArray_ITER_NEXT(w_ind_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
}
if (curVal > maxVal){
maxVal = curVal;
maxInd = i;
}
PyArray_ITER_NEXT(arr_it);
}
}
else{ //look in whole array, weights array
npy_intp w_len = PyArray_DIM(reinterpret_cast(w), 0);
npy_intp w_ind_len = PyArray_DIM(reinterpret_cast(w_ind), 0);
if (w_ind_len != w_len){
PyErr_SetString(PyExc_ValueError,
"If w is a numpy array, w_ind should be a numpy array of the same size.");
return -1;
}
PyObject* w_it= PyArray_IterNew(w);
double w_val = *(double*)PyArray_ITER_DATA(w_it);
if (w_ind_val == 0){
maxVal *= w_val; //*(double *)PyArray_GETITEM(w, PyArray_GETPTR1(w, 0));
PyArray_ITER_NEXT(w_ind_it);
PyArray_ITER_NEXT(w_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
w_val = *(double*)PyArray_ITER_DATA(w_it);
}
PyArray_ITER_NEXT(arr_it);
for (int i = 1 ; i < arr_len ; i++){
curVal = *(double*)PyArray_ITER_DATA(arr_it);
if (w_ind_val == i){
curVal *= w_val;//*(double*)PyArray_GETITEM(w, PyArray_GETPTR1(w, i));
PyArray_ITER_NEXT(w_ind_it);
PyArray_ITER_NEXT(w_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
w_val = *(double*)PyArray_ITER_DATA(w_it);
}
if (curVal > maxVal){
maxVal = curVal;
maxInd = i;
}
PyArray_ITER_NEXT(arr_it);
}
}
}
else{ //only indices specified in arr_ind
npy_intp arr_ind_len = PyArray_DIM(reinterpret_cast(arr_ind), 0);
npy_intp arr_len = PyArray_DIM(reinterpret_cast(arr), 0);
if (arr_ind_len != arr_len){
PyErr_SetString(PyExc_ValueError,
"If a_ind is a numpy array, arr should be a numpy array of the same size.");
return -1;
}
PyObject* arr_ind_it = PyArray_IterNew(arr_ind);
int arr_ind_val = *(int*)PyArray_ITER_DATA(arr_ind_it);
PyObject* arr_it = PyArray_IterNew(arr);
double arr_val = *(double*)PyArray_ITER_DATA(arr_it);
maxVal = arr_val;
maxInd = 0 ; //arr_ind_val;
//std::cout << "maxVal = " << maxVal << "\n";
//std::cout << "maxInd = " << maxInd << "\n";
if (wIsNum){
PyObject* w_ind_it = PyArray_IterNew(w_ind);
int w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
while (PyArray_ITER_NOTDONE(w_ind_it) && arr_ind_val > w_ind_val){
PyArray_ITER_NEXT(w_ind_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
}
if (arr_ind_val == w_ind_val){
maxVal *= w_num_val;
PyArray_ITER_NEXT(w_ind_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
}
for (int i = 1 ; i < arr_ind_len ; i++){
PyArray_ITER_NEXT(arr_ind_it);
PyArray_ITER_NEXT(arr_it);
arr_ind_val = *(int*)PyArray_ITER_DATA(arr_ind_it);
arr_val = *(double*)PyArray_ITER_DATA(arr_it); //*(double*)PyArray_GETITEM(arr, PyArray_GETPTR1(arr, arr_ind_val));
while (PyArray_ITER_NOTDONE(w_ind_it) && arr_ind_val > w_ind_val){
PyArray_ITER_NEXT(w_ind_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
}
if (arr_ind_val == w_ind_val)
arr_val *= w_num_val;
if (arr_val > maxVal){
maxVal = arr_val;
maxInd = i;
}
}
}
else{ //just elements specified in arr_ind, weight's an array
npy_intp arr_ind_len = PyArray_DIM(reinterpret_cast(arr_ind), 0);
npy_intp arr_len = PyArray_DIM(reinterpret_cast(arr), 0);
if (arr_ind_len != arr_len){
PyErr_SetString(PyExc_ValueError,
"If a_ind is a numpy array, arr should be a numpy array of the same size.");
return -1;
}
npy_intp w_len = PyArray_DIM(reinterpret_cast(w), 0);
npy_intp w_ind_len = PyArray_DIM(reinterpret_cast(w_ind), 0);
if (w_ind_len != w_len){
PyErr_SetString(PyExc_ValueError,
"If w is a numpy array, w_ind should be a numpy array of the same size.");
return -1;
}
PyObject* w_ind_it = PyArray_IterNew(w_ind);
int w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
PyObject* w_it = PyArray_IterNew(w);
double w_val = *(double*)PyArray_ITER_DATA(w_it);
while (PyArray_ITER_NOTDONE(w_ind_it) && arr_ind_val > w_ind_val){
PyArray_ITER_NEXT(w_ind_it);
PyArray_ITER_NEXT(w_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
w_val = *(double*)PyArray_ITER_DATA(w_it);
}
if (arr_ind_val == w_ind_val){
maxVal *= w_val;
PyArray_ITER_NEXT(w_ind_it);
PyArray_ITER_NEXT(w_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
w_val = *(double*)PyArray_ITER_DATA(w_it);
}
for (int i = 1 ; i < arr_ind_len ; i++){
PyArray_ITER_NEXT(arr_ind_it);
PyArray_ITER_NEXT(arr_it);
arr_ind_val = *(int*)PyArray_ITER_DATA(arr_ind_it);
arr_val = *(double*)PyArray_ITER_DATA(arr_it); //*(double*)PyArray_GETITEM(arr, PyArray_GETPTR1(arr, arr_ind_val));
while (PyArray_ITER_NOTDONE(w_ind_it) && arr_ind_val > w_ind_val){
PyArray_ITER_NEXT(w_ind_it);
PyArray_ITER_NEXT(w_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
w_val = *(double*)PyArray_ITER_DATA(w_it);
}
if (arr_ind_val == w_ind_val){
arr_val *= w_val;
PyArray_ITER_NEXT(w_ind_it);
PyArray_ITER_NEXT(w_it);
w_ind_val = *(int*)PyArray_ITER_DATA(w_ind_it);
w_val = *(double*)PyArray_ITER_DATA(w_it);
}
if (arr_val > maxVal){
maxVal = arr_val;
maxInd = i;
}
}
}
}
return maxInd;
}
int IClpSimplex::argWeightedMax(PyObject* arr, PyObject* whr, double weight){
//_import_array();
if (!PyArray_Check(arr) || !PyArray_Check(whr)){
PyErr_SetString(PyExc_ValueError,
"Arguments of argWeightedMax should be numpy arrays.");
return -1;
}
PyObject* arr_it = PyArray_IterNew(arr);
PyObject* whr_it = PyArray_IterNew(whr);
npy_intp arr_len = PyArray_DIM(reinterpret_cast(arr), 0);
if (arr_len == 0)
return 0;
double maxVal = *(double*)PyArray_ITER_DATA(arr_it);
int maxInd = 0;
double curVal;
double curInd;
int curWhere = *(int*)PyArray_ITER_DATA(whr_it);
if (curWhere == 0){
maxVal *= weight;
PyArray_ITER_NEXT(whr_it);
curWhere = *(int*)PyArray_ITER_DATA(whr_it);
}
PyArray_ITER_NEXT(arr_it);
for (int i = 1 ; i < arr_len ; i++){
curVal = *(double*)PyArray_ITER_DATA(arr_it);
if (curWhere == i){
curVal *= weight;
PyArray_ITER_NEXT(whr_it);
curWhere = *(int*)PyArray_ITER_DATA(whr_it);
}
if (curVal > maxVal){
maxVal = curVal;
maxInd = i;
}
PyArray_ITER_NEXT(arr_it);
}
return maxInd;
}
bool IClpSimplex::varIsFree(int ind){
return getStatus(ind) == ClpSimplex::isFree;
}
bool IClpSimplex::varBasic(int ind){
return getStatus(ind) == ClpSimplex::basic;
}
bool IClpSimplex::varAtUpperBound(int ind){
return getStatus(ind) == ClpSimplex::atUpperBound;
}
bool IClpSimplex::varAtLowerBound(int ind){
return getStatus(ind) == ClpSimplex::atLowerBound;
}
bool IClpSimplex::varSuperBasic(int ind){
return getStatus(ind) == ClpSimplex::superBasic;
}
bool IClpSimplex::varIsFixed(int ind){
return getStatus(ind) == ClpSimplex::isFixed;
}
PyObject* IClpSimplex::getStatusArray(){
npy_intp dims = getNumCols() + getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_UINT8, this->status_ );
return Arr;
}
PyObject* IClpSimplex::getReducedCosts(){
npy_intp dims = getNumCols() + getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->djRegion() );
return Arr;
}
void IClpSimplex::setReducedCosts(double* rc){
int dim = getNumCols() + getNumRows();
for (int i = 0; i < dim; i++) {
dj_[i] = rc[i];
}
}
PyObject* IClpSimplex::getComplementarityList(){
npy_intp dims = getNumCols() + getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_INT32, QP_ComplementarityList );
return Arr;
}
PyObject* IClpSimplex::getPivotVariable(){
npy_intp dims = getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_INT32, this->pivotVariable() );
return Arr;
}
PyObject* IClpSimplex::getPrimalRowSolution(){
npy_intp dims = getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->primalRowSolution() );
return Arr;
}
PyObject* IClpSimplex::getPrimalColumnSolution(){
npy_intp dims = getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData(1, &dims, NPY_DOUBLE, this->primalColumnSolution() );
return Arr;
}
PyObject* IClpSimplex::getPrimalColumnSolutionAll(){
npy_intp dims = getNumCols() + getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData(1, &dims, NPY_DOUBLE, this->primalColumnSolution() );
return Arr;
}
PyObject* IClpSimplex::getSolutionRegion(){
npy_intp dims = getNumCols() + getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData(1, &dims, NPY_DOUBLE, this->solutionRegion() );
return Arr;
}
PyObject* IClpSimplex::getCostRegion(){
npy_intp dims = getNumCols() + getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData(1, &dims, NPY_DOUBLE, this->costRegion() );
return Arr;
}
PyObject* IClpSimplex::getDualRowSolution(){
npy_intp dims = getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->dualRowSolution() );
return Arr;
}
PyObject* IClpSimplex::getDualColumnSolution(){
npy_intp dims = getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->dualColumnSolution() );
return Arr;
}
PyObject* IClpSimplex::getObjective(){
npy_intp dims = getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->objective() );
return Arr;
}
PyObject* IClpSimplex::getRowLower(){
npy_intp dims = getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->rowLower() );
return Arr;
}
PyObject* IClpSimplex::getRowUpper(){
npy_intp dims = getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->rowUpper() );
return Arr;
}
PyObject* IClpSimplex::getUpper(){
npy_intp dims = getNumRows() + getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->upperRegion() );
return Arr;
}
PyObject* IClpSimplex::getLower(){
npy_intp dims = getNumRows() + getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->lowerRegion() );
return Arr;
}
PyObject* IClpSimplex::getColLower(){
npy_intp dims = getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->columnLower() );
return Arr;
}
PyObject* IClpSimplex::getColUpper(){
npy_intp dims = getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, this->columnUpper() );
return Arr;
}
PyObject* IClpSimplex::getColumnScale(){
npy_intp dims = getNumCols();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, columnScale_);
return Arr;
}
PyObject* IClpSimplex::getRowScale(){
npy_intp dims = getNumRows();
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_DOUBLE, rowScale_ );
return Arr;
}
PyObject* IClpSimplex::getIntegerInformation(){
npy_intp dims = getNumCols();
PyObject* Arr;
if (this->integerInformation())
Arr = PyArray_SimpleNewFromData(1, &dims, NPY_INT8, this->integerInformation());
else
Arr = PyArray_ZEROS(1, &dims, NPY_INT8, 0);
return Arr;
}
std::vector IClpSimplex::getVariableNames(){
if (lengthNames_)
return columnNames_;
return std::vector ();
}
void IClpSimplex::setVariableName(int varInd, char* name){
if (varInd >= getNumCols())
return;
if (lengthNames_ == 0){
unsigned int maxLength=0;
int iRow;
rowNames_ = std::vector ();
columnNames_ = std::vector ();
rowNames_.reserve(numberRows_);
for (iRow=0;iRow maxLength)
maxLength = rowName.length();
rowNames_.push_back(rowName);
}
columnNames_.reserve(numberColumns_);
int iColumn;
for (iColumn=0;iColumn maxLength)
maxLength = colName.length();
columnNames_.push_back(colName);
}
//
// int iColumn;
// columnNames_.reserve(numberColumns_);
// for (iColumn=0;iColumn (strlen(name)));
// columnNames_.push_back(name);
// }
lengthNames_=static_cast (maxLength);
// columnNames_.resize(getNumCols());
//std::cout << columnNamesAsChar()[10] << "<$$$$$$$$$$$$$\n";
}
std::string st(name);
columnNames_[varInd] = st;
}
void IClpSimplex::setConstraintName(int constInd, char* name){
if (constInd >= getNumRows())
return;
if (lengthNames_ == 0){
unsigned int maxLength=0;
int iRow;
rowNames_ = std::vector ();
columnNames_ = std::vector ();
rowNames_.reserve(numberRows_);
for (iRow=0;iRow maxLength)
maxLength = rowName.length();
rowNames_.push_back(rowName);
}
columnNames_.reserve(numberColumns_);
int iColumn;
for (iColumn=0;iColumn maxLength)
maxLength = colName.length();
columnNames_.push_back(colName);
}
lengthNames_=static_cast (maxLength);
}
std::string st(name);
rowNames_[constInd] = st;
}
void IClpSimplex::createTempArray(){
tempIntArray = new int[getNumCols() + getNumRows()];
tempArrayExists = true;
}
IClpSimplex::IClpSimplex(PyObject *obj_arg, runIsPivotAcceptable_t runIsPivotAcceptable_arg,
varSelCriteria_t runVarSelCriteria ):ClpSimplex()
{
_import_array();
tempArrayExists = false;
obj = obj_arg;
runIsPivotAcceptable = runIsPivotAcceptable_arg;
varSelCriteria = runVarSelCriteria;
customPrimal = 0;
createStatus();
pinfo = ClpPresolve();
tempRow = NULL;
tempRow_vector = NULL;
QP_BanList = NULL;
QP_ComplementarityList = NULL;
}
void IClpSimplex::useCustomPrimal(int u)
{
customPrimal = u;
}
int IClpSimplex::getUseCustomPrimal()
{
return customPrimal;
}
void IClpSimplex::setComplementarityList(int * cl)
{
QP_ComplementarityList = cl;
}
int* IClpSimplex::ComplementarityList()
{
return QP_ComplementarityList;
}
void IClpSimplex::setBasisStatus(const int* cstat, const int* rstat){
OsiClpSolverInterface osi(this, false);
osi.setBasisStatus(cstat, rstat);
return;
}
void IClpSimplex::setMaxNumIteration(int m){
setIntParam(ClpMaxNumIteration, m);
}
void IClpSimplex::getBasisStatus(int* cstat, int* rstat){
OsiClpSolverInterface osi(this, false);
osi.getBasisStatus(cstat, rstat);
return;
}
IClpSimplex::IClpSimplex (ClpSimplex * wholeModel,
int numberColumns, const int * whichColumns):
ClpSimplex(wholeModel, numberColumns, whichColumns)
{
_import_array();
tempArrayExists = false;
tempRow_vector = NULL;
tempRow = NULL;
QP_ComplementarityList = NULL;
QP_BanList = NULL;//new int[nvars];
pinfo = ClpPresolve();
}
IClpSimplex::~IClpSimplex(){
if (QP_ComplementarityList)
delete QP_ComplementarityList;
if (QP_BanList)
delete QP_BanList;
if (tempRow)
delete tempRow;
if (tempRow_vector)
delete tempRow_vector;
}
void IClpSimplex::dualExpanded(ClpSimplex * model,CoinIndexedVector * array,
double * other,int mode){
this->clpMatrix()->dualExpanded(model,array,other,mode);
}
int IClpSimplex::isPivotAcceptable()
{
if (this->obj && this->runIsPivotAcceptable) {
return this->runIsPivotAcceptable(this->obj);
}
std::cerr << "** pivotRow: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->runIsPivotAcceptable << "]\n";
return -1;
}
void IClpSimplex::setCriteria(varSelCriteria_t vsc){
varSelCriteria = vsc;
}
int IClpSimplex::checkVar(int varInd){
if (this->obj && this->varSelCriteria) {
return this->varSelCriteria(this->obj, varInd);
}
std::cerr << "** pivotRow: invalid cy-state: obj [" << this->obj << "] fct: ["
<< this->varSelCriteria << "]\n";
return -1;
}
ICbcModel* IClpSimplex::getICbcModel(){
// ?
matrix_->setDimensions(numberRows_, numberColumns_);
OsiClpSolverInterface solver1(this);
ICbcModel* model = new ICbcModel(solver1);
return model;
}
void IClpSimplex::writeLp(const char *filename,
const char *extension,
double epsilon,
int numberAcross,
int decimals,
double objSense,
bool useRowNames)
{
matrix_->setDimensions(numberRows_, numberColumns_);
OsiClpSolverInterface solver1(this);
solver1.writeLp(filename, extension, epsilon, numberAcross, decimals, objSense, useRowNames);
return ;
}
//Get a column of the tableau
void
IClpSimplex::getBInvACol(int col, double* vec)
{
if (!rowArray_[0]) {
printf("ClpSimplexPrimal or ClpSimplexDual should have been called with correct startFinishOption\n");
abort();
}
CoinIndexedVector * rowArray0 = rowArray(0);
CoinIndexedVector * rowArray1 = rowArray(1);
rowArray0->clear();
rowArray1->clear();
// get column of matrix
#ifndef NDEBUG
int n = numberColumns_+numberRows_;
if (col<0||col>=n) {
//indexError(col,"getBInvACol");
}
#endif
if (!rowScale_) {
if (colinsert(col-numberColumns_,1.0);
}
} else {
if (colgetNumElements();
int * index = rowArray1->getIndices();
double * array = rowArray1->denseVector();
for (int i=0;iinsert(col-numberColumns_,rowScale_[col-numberColumns_]);
}
}
factorization_->updateColumn(rowArray0,rowArray1,false);
// But swap if pivot variable was slack as clp stores slack as -1.0
double * array = rowArray1->denseVector();
if (!rowScale_) {
for (int i=0;iclear();
}
//Fetches the ncol th column into colArray
void IClpSimplex::getACol(int ncol, CoinIndexedVector * colArray){
//CoinIndexedVector * colArray = temp_rowArray[1];
colArray->clear();
// get column of matrix
#ifndef NDEBUG
//int n = numberColumns_+numberRows_;
//if (ncol<0||ncol>=n) {
// indexError(ncol,"getBInvACol");
//}
#endif
if (!rowScale()) {
if (ncolinsert(ncol- numberColumns(),1.0);
}
} else {
if (ncolgetNumElements();
int * index = colArray->getIndices();
double * array = colArray->denseVector();
for (int i=0;iinsert(ncol- numberColumns(),rowScale()[ncol-numberColumns()]);
}
}
}
void IClpSimplex::getRightHandSide(double* righthandside)
{
int nr=numberRows();
extractSenseRhsRange(righthandside);
int* basis_index = pivotVariable();
//FIXME: change these lines to be like getColSoution and getRowActivity in OsiClp
const double *solution = solutionRegion(1);
const double *row_act = solutionRegion(0);
//FIXME: This must be fixed. The first line causes seg fault
//So I'm allocating and deleting in this function
//double *slack_val = tempRow;
double *slack_val = new double[nr];
for(int i=0; i -inf) {
if (upper < inf) {
right = upper;
//if (upper==lower) {
//sense = 'E';
//} else {
//sense = 'R';
//range = upper - lower;
//}
} else {
//sense = 'G';
right = lower;
}
} else {
if (upper < inf) {
//sense = 'L';
right = upper;
} else {
//sense = 'N';
right = 0.0;
}
}
}
void IClpSimplex::vectorTimesB_1(CoinIndexedVector* vec){
factorization_->updateColumnTranspose(tempRow_vector, vec);
}
void IClpSimplex::transposeTimesSubset(int number, int* which, double* pi, double* y){
reinterpret_cast(matrix_)->transposeTimesSubset(number,
which, pi, y, rowScale(), columnScale(), NULL);
}
void IClpSimplex::transposeTimes(const ClpSimplex * model, double scalar,
const CoinIndexedVector * x,
CoinIndexedVector * y,
CoinIndexedVector * z){
reinterpret_cast(matrix_)->transposeTimes(
model, scalar, x, y, z);
}
void IClpSimplex::transposeTimesSubsetAll(int number, long long int* which, double* pi, double* y){
reinterpret_cast(matrix_)->transposeTimesSubsetAll(this, number,
which, pi, y, rowScale(), columnScale(), NULL);
}
// Copy constructor.
IClpSimplex::IClpSimplex(const ClpSimplex &rhs,PyObject *obj,
runIsPivotAcceptable_t runIsPivotAcceptable,
varSelCriteria_t varSelCriteria,
int useCustomPrimal, int scalingMode ) :
ClpSimplex(rhs,scalingMode),
obj(obj),
runIsPivotAcceptable(runIsPivotAcceptable),
varSelCriteria(varSelCriteria),
customPrimal(useCustomPrimal),
tempArrayExists(false),
tempRow(NULL),
tempRow_vector(NULL),
QP_BanList(NULL),
QP_ComplementarityList(NULL)
{
}
IClpSimplex* IClpSimplex::preSolve(IClpSimplex* si,
double feasibilityTolerance,
bool keepIntegers,
int numberPasses,
bool dropNames,
bool doRowObjective)
{
//ClpPresolve pinfo;
ClpSimplex* s = pinfo.presolvedModel(*si,feasibilityTolerance,
keepIntegers, numberPasses, dropNames, doRowObjective);
if (s)
{
IClpSimplex* ret = new IClpSimplex(*s, si->obj, si->runIsPivotAcceptable, si->varSelCriteria, si->customPrimal);
//ret->pinfo = pinfo;
//pinfo.postsolve();
return ret;
}
return NULL;
}
void IClpSimplex::postSolve(bool updateStatus){
pinfo.postsolve(updateStatus);
}
int IClpSimplex::dualWithPresolve(IClpSimplex* si,
double feasibilityTolerance,
bool keepIntegers,
int numberPasses,
bool dropNames,
bool doRowObjective)
{
ClpPresolve pinfoTemp;
ClpSimplex* s = pinfoTemp.presolvedModel(*si,feasibilityTolerance,
keepIntegers, numberPasses, dropNames, doRowObjective);
if (s)
{
int ret = s->dual();
pinfoTemp.postsolve();
delete s;
checkSolution();
dual();
return ret;
}
return -2000;
}
int IClpSimplex::primalWithPresolve(IClpSimplex* si,
double feasibilityTolerance,
bool keepIntegers,
int numberPasses,
bool dropNames,
bool doRowObjective)
{
ClpPresolve pinfoTemp;
ClpSimplex* s = pinfoTemp.presolvedModel(*si,feasibilityTolerance,
keepIntegers, numberPasses, dropNames, doRowObjective);
if (s)
{
int ret = s->primal();
pinfoTemp.postsolve();
delete s;
checkSolution();
primal();
return ret;
}
return -2000;
}
int IClpSimplex::initialSolve(int preSolveType){
ClpSolve options;
options.setPresolveType(static_cast(preSolveType));
return ClpSimplex::initialSolve(options);
}
int IClpSimplex::primal (int ifValuesPass , int startFinishOptions)
{
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
if (tempRow == NULL)
tempRow = new double[numberRows()];
if (tempRow_vector == NULL)
tempRow_vector = new CoinIndexedVector();
if (QP_BanList == NULL)
QP_BanList = new int[numberColumns() + numberRows()];
//FIXME: This is a crazy 1000 here.
//But whatever you do to fix this try it on adlittle, degen2
tempRow_vector->reserve(numberRows() + numberColumns() + numberExtraRows() + 1000);
//tempRow_vector->reserve(numberRows() + +numberColumns() + numberExtraRows());
//double savedPivotTolerance = factorization_->pivotTolerance();
#ifndef SLIM_CLP
// See if nonlinear
if (objective_->type()>1&&objective_->activated())
return reducedGradient();
#endif
CoinAssert ((ifValuesPass>=0&&ifValuesPass<3)||
(ifValuesPass>=12&&ifValuesPass<100)||
(ifValuesPass>=112&&ifValuesPass<200));
if (ifValuesPass>=12) {
int numberProblems = (ifValuesPass-10)%100;
ifValuesPass = (ifValuesPass<100) ? 1 : 2;
// Go parallel to do solve
// Only if all slack basis
int i;
for ( i=0;igetIndices();
const CoinBigIndex * columnStart = matrix_->getVectorStarts();
const int * columnLength = matrix_->getVectorLengths();
const double * element = matrix_->getElements();
for (int iColumn=0;iColumn(numberRows_))<1.0) {
// Could do better if can decompose
// correction to get feasible
double scaleFactor = 1.0/numberProblems;
double * correction = new double [numberRows_];
for (int iRow=0;iRowrowUpper_[iRow])
value = rowUpper_[iRow]-value;
else if (value-1.0e30)
rowLower_[iRow]=value;
}
model[i] = new ClpSimplex(this,numberRows_,whichRows,
endColumn-startColumn,whichColumns);
//#define FEB_TRY
#ifdef FEB_TRY
model[i]->setPerturbation(perturbation_);
#endif
startColumn=endColumn;
}
memcpy(rowLower_,saveLower,numberRows_*sizeof(double));
memcpy(rowUpper_,saveUpper,numberRows_*sizeof(double));
delete [] saveLower;
delete [] saveUpper;
delete [] correction;
// solve (in parallel)
for (int i=0;iprimal(1/*ifValuesPass*/);
}
startColumn=0;
int numberBasic=0;
// use whichRows as counter
for (int iRow=0;iRowrowLower_[iRow])
startValue++;
if (rowUpper_[iRow]>1.0e30)
startValue++;
if (rowLower_[iRow]<-1.0e30)
startValue++;
whichRows[iRow]=1000*startValue;
}
for (int i=0;igetColumnStatus(iColumn-startColumn);
setColumnStatus(iColumn,status);
if (status==basic)
numberBasic++;
}
for (int iRow=0;iRowgetRowStatus(iRow)==basic)
whichRows[iRow]++;
}
delete model[i];
startColumn=endColumn;
}
delete [] model;
for (int iRow=0;iRownumberRows_) {
double * away = new double [numberBasic];
numberBasic=0;
for (int iColumn=0;iColumn (this)->primal(ifValuesPass,startFinishOptions);
}
else {
//std::cout << "IClpSimplex L280: casting to IClpSimplexPrimal\n";
returnCode = reinterpret_cast (this)->primal(ifValuesPass,startFinishOptions);
}
//int lastAlgorithm=1;
if (problemStatus_==10) {
//lastAlgorithm=-1;
//printf("Cleaning up with dual\n");
int savePerturbation = perturbation_;
perturbation_=100;
bool denseFactorization = initialDenseFactorization();
// It will be safe to allow dense
setInitialDenseFactorization(true);
// check which algorithms allowed
int dummy;
baseIteration_=numberIterations_;
if ((matrix_->generalExpanded(this,4,dummy)&2)!=0&&(specialOptions_&8192)==0) {
double saveBound = dualBound_;
// upperOut_ has largest away from bound
dualBound_=std::min(std::max(2.0*upperOut_,1.0e8),dualBound_);
returnCode = reinterpret_cast (this)->dual(0,startFinishOptions);
dualBound_=saveBound;
} else {
if (!customPrimal){
std::cout << "IClpSimplex: continue with ClpSimplexPrimal \n";
returnCode = reinterpret_cast (this)->primal(0,startFinishOptions);
}else {
std::cout << "IClpSimplex: casting to IClpSimplexPrimal\n";
returnCode = reinterpret_cast (this)->primal(0,startFinishOptions);
}
}
baseIteration_=0;
setInitialDenseFactorization(denseFactorization);
perturbation_=savePerturbation;
if (problemStatus_==10)
problemStatus_=0;
}
//factorization_->pivotTolerance(savedPivotTolerance);
onStopped(); // set secondary status if stopped
//if (problemStatus_==1&&lastAlgorithm==1)
//returnCode=10; // so will do primal after postsolve
//delete tempRow;
//delete tempRow_vector;
//delete QP_BanList;
return returnCode;
}
double cdot(CoinIndexedVector* pv1, CoinIndexedVector* pv2){
double sum = 0;
int size = pv1->getNumElements();
int* indices = pv1->getIndices();
for (int i = 0; i < size; i++)
sum += (*pv1)[indices[i]] * (*pv2)[indices[i]];
return sum;
}
PyObject* IClpSimplex::filterVars(PyObject* inds){
if (!PyArray_Check(inds)){
PyErr_SetString(PyExc_ValueError,
"filterVars: inds should be a numpy array.");
return NULL;
}
npy_intp inds_len = PyArray_DIM(reinterpret_cast(inds), 0);
if (inds_len == 0){
npy_intp dims = 0;
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_INT, tempIntArray );
return Arr;
}
if (tempArrayExists == false)
createTempArray();
int ind_count = 0;
PyObject* inds_it= PyArray_IterNew(inds);
int i;
double bestRc = 0;
double* rc = djRegion();
while (PyArray_ITER_NOTDONE(inds_it)){
i = *(int*)PyArray_ITER_DATA(inds_it);
if (fabs(rc[i]) < bestRc){
PyArray_ITER_NEXT(inds_it);
continue;
}
if (checkVar(i)){
tempIntArray[ind_count++] = i;
bestRc = fabs(rc[i]);
}
PyArray_ITER_NEXT(inds_it);
}
npy_intp dims = ind_count;
PyObject *Arr = PyArray_SimpleNewFromData( 1, &dims, NPY_INT, tempIntArray );
return Arr;
}
================================================
FILE: cylp/cpp/IClpSimplex.hpp
================================================
#ifndef IClpSimplex_H
#define IClpSimplex_H
//#define NPY_NO_DEPRECATED_API
//#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
//#include "ClpModel.hpp"
#include "ClpSimplex.hpp"
#include "ClpPresolve.hpp"
#include "ClpLinearObjective.hpp"
#include "CoinIndexedVector.hpp"
#include "ClpFactorization.hpp"
#include "Python.h"
#include
#include "ICbcModel.hpp"
#include "ClpParameters.hpp"
#include "ICoinPackedMatrix.hpp"
//#include "ClpSimplexPrimal.hpp"
typedef int (*runIsPivotAcceptable_t)(void *instance);
typedef int (*varSelCriteria_t)(void *instance, int varInd);
class IClpSimplex : public ClpSimplex{
public:
ClpPresolve pinfo;
IClpSimplex(const ClpSimplex &rhs,PyObject *obj,
runIsPivotAcceptable_t runPivotRow,
varSelCriteria_t RunVarSelCriteria,
int useCustomPrimal, int scalingMode=-1 );
IClpSimplex(PyObject *obj, runIsPivotAcceptable_t runPivotRow,
varSelCriteria_t RunVarSelCriteria );
int initialSolve(int preSolveType=ClpSolve::presolveOn);
// For initializing instances that are build using ClpSimplex consructor (NOT IClpSimplex)
void setCriteria(varSelCriteria_t vsc);
int argWeightedMax(PyObject* arr, PyObject* whr, double weight);
int argWeightedMax(PyObject* arr, PyObject* arr_ind, PyObject* w, PyObject* w_ind);
PyObject *obj;
runIsPivotAcceptable_t runIsPivotAcceptable;
int isPivotAcceptable();
varSelCriteria_t varSelCriteria;
int checkVar(int varInd);
PyObject* filterVars(PyObject* inds);
int* tempIntArray;
bool tempArrayExists;
void createTempArray(void);
int* QP_ComplementarityList;
int* QP_BanList;
int QP_ExistsBannedVariable;
void setComplementarityList(int *);
int* ComplementarityList();
bool varIsFree(int ind);
bool varBasic(int ind);
bool varAtUpperBound(int ind);
bool varAtLowerBound(int ind);
bool varSuperBasic(int ind);
bool varIsFixed(int ind);
PyObject * getReducedCosts();
void setReducedCosts(double*);
PyObject * getStatusArray();
PyObject * getComplementarityList();
PyObject * getPivotVariable();
PyObject * getPrimalRowSolution();
PyObject * getPrimalColumnSolution();
PyObject * getPrimalColumnSolutionAll();
PyObject * getSolutionRegion();
PyObject * getCostRegion();
PyObject * getDualRowSolution();
PyObject * getDualColumnSolution();
PyObject * getObjective();
PyObject * getRowLower();
PyObject * getRowUpper();
PyObject * getColLower();
PyObject * getColUpper();
PyObject * getColumnScale();
PyObject * getRowScale();
PyObject* getLower();
PyObject* getUpper();
PyObject* getIntegerInformation();
void getBInvACol(int col, double* vec);
void getACol(int ncol, CoinIndexedVector * colArray);
int updateColumnFT(CoinIndexedVector * spare,
CoinIndexedVector * updatedColumn)
{
return this->factorization()->updateColumnFT(spare, updatedColumn);
}
int updateColumnTranspose (CoinIndexedVector * regionSparse,
CoinIndexedVector * regionSparse2){
return this->factorization()->updateColumnTranspose(regionSparse, regionSparse2);
}
int customPrimal;
void useCustomPrimal(int);
int getUseCustomPrimal();
void setPrimalColumnPivotAlgorithm(ClpPrimalColumnPivot *choice){ClpSimplex::setPrimalColumnPivotAlgorithm(*choice);}
void setDualRowPivotAlgorithm(ClpDualRowPivot *choice){ClpSimplex::setDualRowPivotAlgorithm(*choice);}
void loadQuadraticObjective(const CoinPackedMatrix* matrix){ClpModel::loadQuadraticObjective(*matrix);}
ICoinPackedMatrix* getMatrix(){return static_cast(ClpModel::matrix());}
int loadProblem (CoinModel * modelObject,bool tryPlusMinusOne=false){return ClpSimplex::loadProblem(*modelObject, tryPlusMinusOne);}
//double* infeasibilityRay();
void loadProblem (const CoinPackedMatrix* matrix,
const double* collb, const double* colub,
const double* obj,
const double* rowlb, const double* rowub,
const double * rowObjective=NULL){
ClpSimplex::loadProblem(*matrix, collb, colub, obj, rowlb, rowub, rowObjective);}
int primal(int ifValuesPass=0, int startFinishOptions=0);
IClpSimplex (ClpSimplex * wholeModel,
int numberColumns, const int * whichColumns);
~IClpSimplex();
void dualExpanded(ClpSimplex * model,CoinIndexedVector * array, double * other,int mode);
void convertBoundToSense(const double lower, const double upper,
double& right);
void extractSenseRhsRange(double* rhs_);
void getRightHandSide(double* righthandside);
double* tempRow;
CoinIndexedVector* tempRow_vector;
void vectorTimesB_1(CoinIndexedVector*);
void transposeTimesSubsetAll(int number, long long int* which, double* pi, double* y);
void transposeTimesSubset(int number, int* which, double* pi, double* y);
void transposeTimes(const ClpSimplex * model, double scalar,
const CoinIndexedVector * x,
CoinIndexedVector * y,
CoinIndexedVector * z);
IClpSimplex* preSolve(IClpSimplex* si,
double feasibilityTolerance=0.0,
bool keepIntegers=true,
int numberPasses=5,
bool dropNames=false,
bool doRowObjective=false);
void postSolve(bool updateStatus=true);
int dualWithPresolve(IClpSimplex* si,
double feasibilityTolerance=0.0,
bool keepIntegers=true,
int numberPasses=5,
bool dropNames=false,
bool doRowObjective=false);
int primalWithPresolve(IClpSimplex* si,
double feasibilityTolerance=0.0,
bool keepIntegers=true,
int numberPasses=5,
bool dropNames=false,
bool doRowObjective=false);
void setComplement(int var1, int var2){QP_ComplementarityList[var1] = var2; QP_ComplementarityList[var2] = var1;}
inline double getCoinInfinity(){return COIN_DBL_MAX;}
inline void setColumnUpperArray(double *cu){columnUpper_ = cu;}
inline void setColumnLowerArray(double *cl){columnLower_ = cl;}
inline void setRowUpperArray(double *ru){rowUpper_ = ru;}
inline void setRowLowerArray(double *rl){rowLower_ = rl;}
inline void setColumnUpperSubset(int n, int *indicesOfIndices, int *indices, double* values){
for (int i = 0 ; i < n ; i++)
setColumnUpper(indices[indicesOfIndices[i]], values[indicesOfIndices[i]]);
}
inline void setColumnLowerSubset(int n, int *indicesOfIndices, int *indices, double* values){
for (int i = 0 ; i < n ; i++)
setColumnLower(indices[indicesOfIndices[i]], values[indicesOfIndices[i]]);
}
inline void setColumnUpperFirstElements(int n, double* values){
for (int i = 0 ; i < n ; i++)
setColumnUpper(i, values[i]);
}
inline void setColumnLowerFirstElements(int n, double* values){
for (int i = 0 ; i < n ; i++)
setColumnLower(i, values[i]);
}
inline void setObjectiveArray(double *o, int numberColumns)
{
if (objective_)
delete objective_;
objective_ = new ClpLinearObjective(o, numberColumns);
}
void setVariableName(int varInd, char* name);
void setConstraintName(int constInd, char* name);
std::vector getVariableNames();
/// Partial pricing
int partialPrice(int start, int end, int* numberWanted)
{
int bestVarInd;
this->clpMatrix()->partialPricing(this,
static_cast(start),
static_cast(end),
bestVarInd,
*numberWanted);
return bestVarInd;
}
ICbcModel* getICbcModel();
void writeLp(const char *filename,
const char *extension = "lp",
double epsilon = 1e-5,
int numberAcross = 10,
int decimals = 5,
double objSense = 0.0,
bool useRowNames = true);
void setBasisStatus(const int* cstat, const int* rstat);
void getBasisStatus(int* cstat, int* rstat);
void setMaxNumIteration(int m);
};
double cdot(CoinIndexedVector* v1, CoinIndexedVector* v2);
#endif
================================================
FILE: cylp/cpp/IClpSimplexPrimal.cpp
================================================
// Copyright (C) 2002, International Business Machines
// Corporation and others. All Rights Reserved.
/* Notes on implementation of primal simplex algorithm.
When primal feasible(A):
If dual feasible, we are optimal. Otherwise choose an infeasible
basic variable to enter basis from a bound (B). We now need to find an
outgoing variable which will leave problem primal feasible so we get
the column of the tableau corresponding to the incoming variable
(with the correct sign depending if variable will go up or down).
We now perform a ratio test to determine which outgoing variable will
preserve primal feasibility (C). If no variable found then problem
is unbounded (in primal sense). If there is a variable, we then
perform pivot and repeat. Trivial?
-------------------------------------------
A) How do we get primal feasible? All variables have fake costs
outside their feasible region so it is trivial to declare problem
feasible. OSL did not have a phase 1/phase 2 approach but
instead effectively put an extra cost on infeasible basic variables
I am taking the same approach here, although it is generalized
to allow for non-linear costs and dual information.
In OSL, this weight was changed heuristically, here at present
it is only increased if problem looks finished. If problem is
feasible I check for unboundedness. If not unbounded we
could play with going into dual. As long as weights increase
any algorithm would be finite.
B) Which incoming variable to choose is a virtual base class.
For difficult problems steepest edge is preferred while for
very easy (large) problems we will need partial scan.
C) Sounds easy, but this is hardest part of algorithm.
1) Instead of stopping at first choice, we may be able
to allow that variable to go through bound and if objective
still improving choose again. These mini iterations can
increase speed by orders of magnitude but we may need to
go to more of a bucket choice of variable rather than looking
at them one by one (for speed).
2) Accuracy. Basic infeasibilities may be less than
tolerance. Pivoting on these makes objective go backwards.
OSL modified cost so a zero move was made, Gill et al
modified so a strictly positive move was made.
The two problems are that re-factorizations can
change rinfeasibilities above and below tolerances and that when
finished we need to reset costs and try again.
3) Degeneracy. Gill et al helps but may not be enough. We
may need more. Also it can improve speed a lot if we perturb
the rhs and bounds significantly.
References:
Forrest and Goldfarb, Steepest-edge simplex algorithms for
linear programming - Mathematical Programming 1992
Forrest and Tomlin, Implementing the simplex method for
the Optimization Subroutine Library - IBM Systems Journal 1992
Gill, Murray, Saunders, Wright A Practical Anti-Cycling
Procedure for Linear and Nonlinear Programming SOL report 1988
TODO:
a) Better recovery procedures. At present I never check on forward
progress. There is checkpoint/restart with reducing
re-factorization frequency, but this is only on singular
factorizations.
b) Fast methods for large easy problems (and also the option for
the code to automatically choose which method).
c) We need to be able to stop in various ways for OSI - this
is fairly easy.
*/
#include "CoinPragma.hpp"
#include
#include "CoinHelperFunctions.hpp"
#include "IClpSimplexPrimal.hpp"
#include "ClpFactorization.hpp"
#include "ClpNonLinearCost.hpp"
#include "CoinPackedMatrix.hpp"
#include "CoinIndexedVector.hpp"
#include "ClpPrimalColumnPivot.hpp"
#include "ClpMessage.hpp"
#include "ClpEventHandler.hpp"
#include
#include
#include
#include
#include
#include "CyClpSimplex_api.h"
// primal
int IClpSimplexPrimal::primal (int ifValuesPass , int startFinishOptions)
{
/*
Method
It tries to be a single phase approach with a weight of 1.0 being
given to getting optimal and a weight of infeasibilityCost_ being
given to getting primal feasible. In this version I have tried to
be clever in a stupid way. The idea of fake bounds in dual
seems to work so the primal analogue would be that of getting
bounds on reduced costs (by a presolve approach) and using
these for being above or below feasible region. I decided to waste
memory and keep these explicitly. This allows for non-linear
costs!
The code is designed to take advantage of sparsity so arrays are
seldom zeroed out from scratch or gone over in their entirety.
The only exception is a full scan to find incoming variable for
Dantzig row choice. For steepest edge we keep an updated list
of dual infeasibilities (actually squares).
On easy problems we don't need full scan - just
pick first reasonable.
One problem is how to tackle degeneracy and accuracy. At present
I am using the modification of costs which I put in OSL and which was
extended by Gill et al. I am still not sure of the exact details.
The flow of primal is three while loops as follows:
while (not finished) {
while (not clean solution) {
Factorize and/or clean up solution by changing bounds so
primal feasible. If looks finished check fake primal bounds.
Repeat until status is iterating (-1) or finished (0,1,2)
}
while (status==-1) {
Iterate until no pivot in or out or time to re-factorize.
Flow is:
choose pivot column (incoming variable). if none then
we are primal feasible so looks as if done but we need to
break and check bounds etc.
Get pivot column in tableau
Choose outgoing row. If we don't find one then we look
primal unbounded so break and check bounds etc. (Also the
pivot tolerance is larger after any iterations so that may be
reason)
If we do find outgoing row, we may have to adjust costs to
keep going forwards (anti-degeneracy). Check pivot will be stable
and if unstable throw away iteration and break to re-factorize.
If minor error re-factorize after iteration.
Update everything (this may involve changing bounds on
variables to stay primal feasible.
}
}
At present we never check we are going forwards. I overdid that in
OSL so will try and make a last resort.
Needs partial scan pivot in option.
May need other anti-degeneracy measures, especially if we try and use
loose tolerances as a way to solve in fewer iterations.
I like idea of dynamic scaling. This gives opportunity to decouple
different implications of scaling for accuracy, iteration count and
feasibility tolerance.
*/
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
//int listSize = numberRows() + numberColumns();
//QP_ComplementarityList = new int[listSize];
//for (int i = 0 ; i < listSize ; i++)
// QP_ComplementarityList[i] = i;
//QP_ComplementarityList[0] = 2;
//QP_ComplementarityList[2] = 0;
//QP_BanList = new int[listSize];
//for (int i = 0; i < listSize ; i++)
// QP_BanList[i] = 0;
//QP_ExistsBannedVariable = 0;
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
algorithm_ = +1;
moreSpecialOptions_ &= ~16; // clear check replaceColumn accuracy
// save data
ClpDataSave data = saveData();
matrix_->refresh(this); // make sure matrix okay
// Save so can see if doing after dual
int initialStatus=problemStatus_;
int initialIterations = numberIterations_;
int initialNegDjs=-1;
// initialize - maybe values pass and algorithm_ is +1
#if 0
// if so - put in any superbasic costed slacks
if (ifValuesPass&&specialOptions_<0x01000000) {
// Get column copy
const CoinPackedMatrix * columnCopy = matrix();
const int * row = columnCopy->getIndices();
const CoinBigIndex * columnStart = columnCopy->getVectorStarts();
const int * columnLength = columnCopy->getVectorLengths();
//const double * element = columnCopy->getElements();
int n=0;
for (int iColumn = 0;iColumnprimalTolerance_&&
fabs(value-columnUpper_[iColumn])>primalTolerance_) {
int iRow = row[columnStart[iColumn]];
if (getRowStatus(iRow)==basic) {
setRowStatus(iRow,superBasic);
setColumnStatus(iColumn,basic);
n++;
}
}
}
}
}
printf("%d costed slacks put in basis\n",n);
}
#endif
if (!startup(ifValuesPass,startFinishOptions)) {
// Set average theta
nonLinearCost_->setAverageTheta(1.0e3);
int lastCleaned=0; // last time objective or bounds cleaned up
// Say no pivot has occurred (for steepest edge and updates)
pivotRow_=-2;
// This says whether to restore things etc
int factorType=0;
if (problemStatus_<0&&perturbation_<100&&!ifValuesPass) {
perturb(0);
// Can't get here if values pass
assert (!ifValuesPass);
gutsOfSolution(NULL,NULL);
if (handler_->logLevel()>2) {
handler_->message(CLP_SIMPLEX_STATUS,messages_)
<printing(sumPrimalInfeasibilities_>0.0)
<printing(sumDualInfeasibilities_>0.0)
<printing(numberDualInfeasibilitiesWithoutFree_
message()<clear();
}
for (iColumn=0;iColumn<2;iColumn++) {
columnArray_[iColumn]->clear();
}
// give matrix (and model costs and bounds a chance to be
// refreshed (normally null)
matrix_->refresh(this);
// If getting nowhere - why not give it a kick
#if 1
if (perturbation_<101&&numberIterations_>2*(numberRows_+numberColumns_)&&(specialOptions_&4)==0
&&initialStatus!=10) {
perturb(1);
matrix_->rhsOffset(this,true,false);
}
#endif
// If we have done no iterations - special
if (lastGoodIteration_==numberIterations_&&factorType)
factorType=3;
if (saveModel) {
// Doing sprint
if (sequenceIn_<0||numberIterations_>=stopSprint) {
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
if (sequenceIn_<0&&numberIterations_100)
primalColumnPivot_->switchOffSprint();
//lastSprintIteration=numberIterations_;
printf("End small model\n");
}
}
// may factorize, checks if problem finished
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
if (initialStatus==10) {
// cleanup phase
if(initialIterations != numberIterations_) {
if (numberDualInfeasibilities_>10000&&numberDualInfeasibilities_>10*initialNegDjs) {
// getting worse - try perturbing
if (perturbation_<101&&(specialOptions_&4)==0) {
perturb(1);
matrix_->rhsOffset(this,true,false);
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
}
}
} else {
// save number of negative djs
if (!numberPrimalInfeasibilities_)
initialNegDjs=numberDualInfeasibilities_;
// make sure weight won't be changed
if (infeasibilityCost_==1.0e10)
infeasibilityCost_=1.000001e10;
}
}
// See if sprint says redo because of problems
if (numberDualInfeasibilities_==-776) {
// Need new set of variables
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
//lastSprintIteration=numberIterations_;
printf("End small model after\n");
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
}
int numberSprintIterations=0;
int numberSprintColumns = primalColumnPivot_->numberSprintColumns(numberSprintIterations);
if (problemStatus_==777) {
// problems so do one pass with normal
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
// Skip factorization
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,false,saveModel);
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
} else if (problemStatus_<0&&!saveModel&&numberSprintColumns&&firstFree_<0) {
int numberSort=0;
int numberFixed=0;
int numberBasic=0;
reasonableSprintIteration = numberIterations_ + 100;
int * whichColumns = new int[numberColumns_];
double * weight = new double[numberColumns_];
int numberNegative=0;
double sumNegative = 0.0;
// now massage weight so all basic in plus good djs
for (iColumn=0;iColumn-1.0e50) {
numberNegative++;
sumNegative -= dj;
}
weight[iColumn]=dj;
whichColumns[iColumn] = iColumn;
}
handler_->message(CLP_SPRINT,messages_)
<lastObjectiveValue-1.0e-7&&sprintPass>5) {
// switch off
printf("Switching off sprint\n");
primalColumnPivot_->switchOffSprint();
} else {
lastObjectiveValue = objectiveValue()*optimizationDirection_;
// sort
CoinSort_2(weight,weight+numberColumns_,whichColumns);
numberSort = std::min(numberColumns_-numberFixed,numberBasic+numberSprintColumns);
// Sort to make consistent ?
std::sort(whichColumns,whichColumns+numberSort);
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// FIXME: uncomment
saveModel = new IClpSimplex(this,numberSort,whichColumns);
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
delete [] whichColumns;
delete [] weight;
// Skip factorization
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,false,saveModel);
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,saveModel);
stopSprint = numberIterations_+numberSprintIterations;
printf("Sprint with %d columns for %d iterations\n",
numberSprintColumns,numberSprintIterations);
}
}
// Say good factorization
factorType=1;
// Say no pivot has occurred (for steepest edge and updates)
pivotRow_=-2;
// exit if victory declared
if (problemStatus_>=0)
break;
// test for maximum iterations
if (hitMaximumIterations()||(ifValuesPass==2&&firstFree_<0)) {
problemStatus_=3;
break;
}
if (firstFree_<0) {
if (ifValuesPass) {
// end of values pass
ifValuesPass=0;
int status = eventHandler_->event(ClpEventHandler::endOfValuesPass);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfValuesPass;
break;
}
//#define FEB_TRY
#ifdef FEB_TRY
if (perturbation_<100)
perturb(0);
#endif
}
}
// Check event
{
int status = eventHandler_->event(ClpEventHandler::endOfFactorization);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfFactorization;
break;
}
}
// Iterate
whileIterating(ifValuesPass ? 1 : 0);
}
}
// if infeasible get real values
//printf("XXXXY final cost %g\n",infeasibilityCost_);
progress_.initialWeight_=0.0;
if (problemStatus_==1&&secondaryStatus_!=6) {
infeasibilityCost_=0.0;
createRim(1+4);
delete nonLinearCost_;
nonLinearCost_ = new ClpNonLinearCost(this);
nonLinearCost_->checkInfeasibilities(0.0);
sumPrimalInfeasibilities_=nonLinearCost_->sumInfeasibilities();
numberPrimalInfeasibilities_= nonLinearCost_->numberInfeasibilities();
// and get good feasible duals
computeDuals(NULL);
}
// clean up
unflag();
finish(startFinishOptions);
restoreData(data);
return problemStatus_;
}
/*
Reasons to come out:
-1 iterations etc
-2 inaccuracy
-3 slight inaccuracy (and done iterations)
-4 end of values pass and done iterations
+0 looks optimal (might be infeasible - but we will investigate)
+2 looks unbounded
+3 max iterations
*/
int
IClpSimplexPrimal::whileIterating(int valuesOption)
{
// Say if values pass
int ifValuesPass=(firstFree_>=0) ? 1 : 0;
int returnCode=-1;
int superBasicType=1;
if (valuesOption>1)
superBasicType=3;
// status stays at -1 while iterating, >=0 finished, -2 to invert
// status -3 to go to top without an invert
while (problemStatus_==-1) {
//#define CLP_DEBUG 1
#ifdef CLP_DEBUG
{
int i;
// not [1] as has information
for (i=0;i<4;i++) {
if (i!=1)
rowArray_[i]->checkClear();
}
for (i=0;i<2;i++) {
columnArray_[i]->checkClear();
}
}
#endif
#if 0
{
int iPivot;
double * array = rowArray_[3]->denseVector();
int * index = rowArray_[3]->getIndices();
int i;
for (iPivot=0;iPivotupdateColumn(rowArray_[2],rowArray_[3]);
int number = rowArray_[3]->getNumElements();
for (i=0;iclear();
}
}
#endif
#if 0
nonLinearCost_->checkInfeasibilities(primalTolerance_);
printf("suminf %g number %d\n",nonLinearCost_->sumInfeasibilities(),
nonLinearCost_->numberInfeasibilities());
#endif
#if CLP_DEBUG>2
// very expensive
if (numberIterations_>0&&numberIterations_<100&&!ifValuesPass) {
handler_->setLogLevel(63);
double saveValue = objectiveValue_;
double * saveRow1 = new double[numberRows_];
double * saveRow2 = new double[numberRows_];
CoinMemcpyN(rowReducedCost_,numberRows_,saveRow1);
CoinMemcpyN(rowActivityWork_,numberRows_,saveRow2);
double * saveColumn1 = new double[numberColumns_];
double * saveColumn2 = new double[numberColumns_];
CoinMemcpyN(reducedCostWork_,numberColumns_,saveColumn1);
CoinMemcpyN(columnActivityWork_,numberColumns_,saveColumn2);
gutsOfSolution(NULL,NULL,false);
printf("xxx %d old obj %g, recomputed %g, sum primal inf %g\n",
numberIterations_,
saveValue,objectiveValue_,sumPrimalInfeasibilities_);
CoinMemcpyN(saveRow1,numberRows_,rowReducedCost_);
CoinMemcpyN(saveRow2,numberRows_,rowActivityWork_);
CoinMemcpyN(saveColumn1,numberColumns_,reducedCostWork_);
CoinMemcpyN(saveColumn2,numberColumns_,columnActivityWork_);
delete [] saveRow1;
delete [] saveRow2;
delete [] saveColumn1;
delete [] saveColumn2;
objectiveValue_=saveValue;
}
#endif
if (!ifValuesPass) {
// choose column to come in
// can use pivotRow_ to update weights
// pass in list of cost changes so can do row updates (rowArray_[1])
// NOTE rowArray_[0] is used by computeDuals which is a
// slow way of getting duals but might be used
primalColumn(rowArray_[1],rowArray_[2],rowArray_[3],
columnArray_[0],columnArray_[1]);
} else {
// in values pass
int sequenceIn=nextSuperBasic(superBasicType,columnArray_[0]);
if (valuesOption>1)
superBasicType=2;
if (sequenceIn<0) {
// end of values pass - initialize weights etc
handler_->message(CLP_END_VALUES_PASS,messages_)
<saveWeights(this,5);
problemStatus_=-2; // factorize now
pivotRow_=-1; // say no weights update
returnCode=-4;
// Clean up
int i;
for (i=0;iclear();
if (sequenceIn_>=0) {
// we found a pivot column
assert (!flagged(sequenceIn_));
#ifdef CLP_DEBUG
if ((handler_->logLevel()&32)) {
char x = isColumn(sequenceIn_) ? 'C' :'R';
std::cout<<"pivot column "<<
x<=0&&checkSequencecheckClear();
rowArray_[3]->checkClear();
double * array = rowArray_[3]->denseVector();
int * index = rowArray_[3]->getIndices();
unpackPacked(rowArray_[3],checkSequence);
factorization_->updateColumnForDebug(rowArray_[2],rowArray_[3]);
int number = rowArray_[3]->getNumElements();
double dualIn = cost_[checkSequence];
int i;
for (i=0;iclear();
if (numberIterations_>2000)
exit(1);
}
}
#endif
// do second half of iteration
returnCode = pivotResult(ifValuesPass);
if (returnCode<-1&&returnCode>-5) {
problemStatus_=-2; //
} else if (returnCode==-5) {
if ((moreSpecialOptions_&16)==0&&factorization_->pivots()) {
moreSpecialOptions_ |= 16;
problemStatus_=-2;
}
// otherwise something flagged - continue;
} else if (returnCode==2) {
problemStatus_=-5; // looks unbounded
} else if (returnCode==4) {
problemStatus_=-2; // looks unbounded but has iterated
} else if (returnCode!=-1) {
assert(returnCode==3);
if (problemStatus_!=5)
problemStatus_=3;
}
} else {
// no pivot column
#ifdef CLP_DEBUG
if (handler_->logLevel()&32)
printf("** no column pivot\n");
#endif
if (nonLinearCost_->numberInfeasibilities())
problemStatus_=-4; // might be infeasible
// Force to re-factorize early next time
int numberPivots = factorization_->pivots();
forceFactorization_=std::min(forceFactorization_,(numberPivots+1)>>1);
returnCode=0;
break;
}
}
if (valuesOption>1)
columnArray_[0]->setNumElements(0);
return returnCode;
}
/* Checks if finished. Updates status */
void
IClpSimplexPrimal::statusOfProblemInPrimal(int & lastCleaned,int type,
ClpSimplexProgress * progress,
bool doFactorization,
int ifValuesPass,
IClpSimplex * originalModel)
{
int dummy; // for use in generalExpanded
int saveFirstFree=firstFree_;
// number of pivots done
int numberPivots = factorization_->pivots();
if (type==2) {
// trouble - restore solution
CoinMemcpyN(saveStatus_,numberColumns_+numberRows_,status_);
CoinMemcpyN(savedSolution_+numberColumns_ ,
numberRows_,rowActivityWork_);
CoinMemcpyN(savedSolution_ ,
numberColumns_,columnActivityWork_);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
forceFactorization_=1; // a bit drastic but ..
pivotRow_=-1; // say no weights update
changeMade_++; // say change made
}
int saveThreshold = factorization_->sparseThreshold();
int tentativeStatus = problemStatus_;
int numberThrownOut=1; // to loop round on bad factorization in values pass
double lastSumInfeasibility=COIN_DBL_MAX;
if (numberIterations_)
lastSumInfeasibility=nonLinearCost_->sumInfeasibilities();
int nPass=0;
while (numberThrownOut) {
int nSlackBasic=0;
if (nPass) {
for (int i=0;i-3||problemStatus_==-4) {
// factorize
// later on we will need to recover from singularities
// also we could skip if first time
// do weights
// This may save pivotRow_ for use
if (doFactorization)
primalColumnPivot_->saveWeights(this,1);
if ((type&&doFactorization)||nSlackBasic==numberRows_) {
// is factorization okay?
int factorStatus = internalFactorize(1);
if (factorStatus) {
if (solveType_==2+8) {
// say odd
problemStatus_=5;
return;
}
if (type!=1||largestPrimalError_>1.0e3
||largestDualError_>1.0e3) {
// switch off dense
int saveDense = factorization_->denseThreshold();
factorization_->setDenseThreshold(0);
// Go to safe
factorization_->pivotTolerance(0.99);
// make sure will do safe factorization
pivotVariable_[0]=-1;
internalFactorize(2);
factorization_->setDenseThreshold(saveDense);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
} else {
// no - restore previous basis
// Keep any flagged variables
int i;
for (i=0;i=0&&getStatus(sequenceIn_)!=basic) {
setFlagged(sequenceIn_);
} else if (sequenceOut_>=0&&getStatus(sequenceOut_)!=basic) {
setFlagged(sequenceOut_);
}
double newTolerance = std::max(0.5 + 0.499*randomNumberGenerator_.randomDouble(),factorization_->pivotTolerance());
factorization_->pivotTolerance(newTolerance);
} else {
// Go to safe
factorization_->pivotTolerance(0.99);
}
CoinMemcpyN(savedSolution_+numberColumns_ ,
numberRows_,rowActivityWork_);
CoinMemcpyN(savedSolution_ ,
numberColumns_,columnActivityWork_);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
matrix_->generalExpanded(this,5,dummy);
forceFactorization_=1; // a bit drastic but ..
type = 2;
if (internalFactorize(2)!=0) {
largestPrimalError_=1.0e4; // force other type
}
}
changeMade_++; // say change made
}
}
if (problemStatus_!=-4)
problemStatus_=-3;
}
// at this stage status is -3 or -5 if looks unbounded
// get primal and dual solutions
// put back original costs and then check
// createRim(4); // costs do not change
// May need to do more if column generation
dummy=4;
matrix_->generalExpanded(this,9,dummy);
numberThrownOut=gutsOfSolution(NULL,NULL,(firstFree_>=0));
double sumInfeasibility = nonLinearCost_->sumInfeasibilities();
if (numberThrownOut||
(sumInfeasibility>1.0e7&&sumInfeasibility>100.0*lastSumInfeasibility
&&factorization_->pivotTolerance()<0.11)||(largestPrimalError_>1.0e10&&largestDualError_>1.0e10)) {
problemStatus_=tentativeStatus;
doFactorization=true;
if (numberPivots) {
// go back
numberThrownOut=-1;
// trouble - restore solution
CoinMemcpyN(saveStatus_,numberColumns_+numberRows_,status_);
CoinMemcpyN(savedSolution_+numberColumns_ ,
numberRows_,rowActivityWork_);
CoinMemcpyN(savedSolution_ ,
numberColumns_,columnActivityWork_);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
forceFactorization_=1; // a bit drastic but ..
// Go to safe
factorization_->pivotTolerance(0.99);
pivotRow_=-1; // say no weights update
changeMade_++; // say change made
if (numberPivots==1) {
// throw out something
if (sequenceIn_>=0&&getStatus(sequenceIn_)!=basic) {
setFlagged(sequenceIn_);
} else if (sequenceOut_>=0&&getStatus(sequenceOut_)!=basic) {
setFlagged(sequenceOut_);
}
}
numberPivots=0;
}
}
}
// Double check reduced costs if no action
if (progress->lastIterationNumber(0)==numberIterations_) {
if (primalColumnPivot_->looksOptimal()) {
numberDualInfeasibilities_ = 0;
sumDualInfeasibilities_ = 0.0;
}
}
// If in primal and small dj give up
if ((specialOptions_&1024)!=0&&!numberPrimalInfeasibilities_&&numberDualInfeasibilities_) {
double average = sumDualInfeasibilities_/(static_cast (numberDualInfeasibilities_));
if (numberIterations_>300&&average<1.0e-4) {
numberDualInfeasibilities_ = 0;
sumDualInfeasibilities_ = 0.0;
}
}
// Check if looping
int loop;
if (type!=2&&!ifValuesPass)
loop = progress->looping();
else
loop=-1;
if (loop>=0) {
if (!problemStatus_) {
// declaring victory
numberPrimalInfeasibilities_ = 0;
sumPrimalInfeasibilities_ = 0.0;
} else {
problemStatus_ = loop; //exit if in loop
problemStatus_ = 10; // instead - try other algorithm
numberPrimalInfeasibilities_ = nonLinearCost_->numberInfeasibilities();
}
problemStatus_ = 10; // instead - try other algorithm
return ;
} else if (loop<-1) {
// Is it time for drastic measures
if (nonLinearCost_->numberInfeasibilities()&&progress->badTimes()>5&&
progress->oddState()<10&&progress->oddState()>=0) {
progress->newOddState();
nonLinearCost_->zapCosts();
}
// something may have changed
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
// If progress then reset costs
if (loop==-1&&!nonLinearCost_->numberInfeasibilities()&&progress->oddState()<0) {
createRim(4,false); // costs back
delete nonLinearCost_;
nonLinearCost_ = new ClpNonLinearCost(this);
progress->endOddState();
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
// Flag to say whether to go to dual to clean up
bool goToDual=false;
// really for free variables in
//if((progressFlag_&2)!=0)
//problemStatus_=-1;;
progressFlag_ = 0; //reset progress flag
handler_->message(CLP_SIMPLEX_STATUS,messages_)
<feasibleReportCost();
handler_->printing(nonLinearCost_->numberInfeasibilities()>0)
<sumInfeasibilities()<numberInfeasibilities();
handler_->printing(sumDualInfeasibilities_>0.0)
<printing(numberDualInfeasibilitiesWithoutFree_
message()<checkInfeasibilities(primalTolerance_);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
nonLinearCost_->checkInfeasibilities(primalTolerance_);
}
if (nonLinearCost_->numberInfeasibilities()>0&&!progress->initialWeight_&&!ifValuesPass&&infeasibilityCost_==1.0e10) {
// first time infeasible - start up weight computation
double * oldDj = dj_;
double * oldCost = cost_;
int numberRows2 = numberRows_+numberExtraRows_;
int numberTotal = numberRows2+numberColumns_;
dj_ = new double[numberTotal];
cost_ = new double[numberTotal];
reducedCostWork_ = dj_;
rowReducedCost_ = dj_+numberColumns_;
objectiveWork_ = cost_;
rowObjectiveWork_ = cost_+numberColumns_;
double direction = optimizationDirection_*objectiveScale_;
const double * obj = objective();
memset(rowObjectiveWork_,0,numberRows_*sizeof(double));
int iSequence;
if (columnScale_)
for (iSequence=0;iSequenceprimalTolerance_) {
// Check if "free"
if (distanceDown>primalTolerance_) {
// free
if (value>dualTolerance_) {
numberFreeSame++;
} else if(value<-dualTolerance_) {
numberFreeDifferent++;
dj_[n++] = feasibleDj/infeasibleDj;
} else {
numberFreeZero++;
}
} else {
// should not be negative
if (value>dualTolerance_) {
numberSame++;
} else if(value<-dualTolerance_) {
numberDifferent++;
dj_[n++] = feasibleDj/infeasibleDj;
} else {
numberZero++;
}
}
} else if (distanceDown>primalTolerance_) {
// should not be positive
if (value>dualTolerance_) {
numberSame++;
} else if(value<-dualTolerance_) {
numberDifferent++;
dj_[n++] = feasibleDj/infeasibleDj;
} else {
numberZero++;
}
}
}
progress->initialWeight_=-1.0;
}
//printf("XXXX %d same, %d different, %d zero, -- free %d %d %d\n",
// numberSame,numberDifferent,numberZero,
// numberFreeSame,numberFreeDifferent,numberFreeZero);
// we want most to be same
if (n) {
double most = 0.95;
std::sort(dj_,dj_+n);
int which = static_cast ((1.0-most)*static_cast (n));
double take = -dj_[which]*infeasibilityCost_;
//printf("XXXXZ inf cost %g take %g (range %g %g)\n",infeasibilityCost_,take,-dj_[0]*infeasibilityCost_,-dj_[n-1]*infeasibilityCost_);
take = -dj_[0]*infeasibilityCost_;
infeasibilityCost_ = std::min(std::max(1000.0*take,1.0e8),1.0000001e10);;
//printf("XXXX increasing weight to %g\n",infeasibilityCost_);
}
delete [] dj_;
delete [] cost_;
dj_= oldDj;
cost_ = oldCost;
reducedCostWork_ = dj_;
rowReducedCost_ = dj_+numberColumns_;
objectiveWork_ = cost_;
rowObjectiveWork_ = cost_+numberColumns_;
if (n)
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
double trueInfeasibility =nonLinearCost_->sumInfeasibilities();
if (!nonLinearCost_->numberInfeasibilities()&&infeasibilityCost_==1.0e10&&!ifValuesPass&&true) {
// relax if default
infeasibilityCost_ = std::min(std::max(100.0*sumDualInfeasibilities_,1.0e8),1.00000001e10);
// reset looping criterion
progress->reset();
trueInfeasibility = 1.123456e10;
}
if (trueInfeasibility>1.0) {
// If infeasibility going up may change weights
double testValue = trueInfeasibility-1.0e-4*(10.0+trueInfeasibility);
double lastInf = progress->lastInfeasibility(1);
double lastInf3 = progress->lastInfeasibility(3);
double thisObj = progress->lastObjective(0);
double thisInf = progress->lastInfeasibility(0);
thisObj += infeasibilityCost_*2.0*thisInf;
double lastObj = progress->lastObjective(1);
lastObj += infeasibilityCost_*2.0*lastInf;
double lastObj3 = progress->lastObjective(3);
lastObj3 += infeasibilityCost_*2.0*lastInf3;
if (lastObjlogLevel()==63)
printf("lastobj %g this %g force %d ",lastObj,thisObj,forceFactorization_);
int maxFactor = factorization_->maximumPivots();
if (maxFactor>10) {
if (forceFactorization_<0)
forceFactorization_= maxFactor;
forceFactorization_ = std::max(1,(forceFactorization_>>2));
if (handler_->logLevel()==63)
printf("Reducing factorization frequency to %d\n",forceFactorization_);
}
} else if (lastObj3logLevel()==63)
printf("lastobj3 %g this3 %g `force %d ",lastObj3,thisObj,forceFactorization_);
int maxFactor = factorization_->maximumPivots();
if (maxFactor>10) {
if (forceFactorization_<0)
forceFactorization_= maxFactor;
forceFactorization_ = std::max(1,(forceFactorization_*2)/3);
if (handler_->logLevel()==63)
printf("Reducing factorization frequency to %d\n",forceFactorization_);
}
} else if(lastInfreset();
if (handler_->logLevel()==63)
printf("increasing weight to %g\n",infeasibilityCost_);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
}
}
// we may wish to say it is optimal even if infeasible
bool alwaysOptimal = (specialOptions_&1)!=0;
// give code benefit of doubt
if (sumOfRelaxedDualInfeasibilities_ == 0.0&&
sumOfRelaxedPrimalInfeasibilities_ == 0.0) {
// say optimal (with these bounds etc)
numberDualInfeasibilities_ = 0;
sumDualInfeasibilities_ = 0.0;
numberPrimalInfeasibilities_ = 0;
sumPrimalInfeasibilities_ = 0.0;
// But check if in sprint
if (originalModel) {
// Carry on and re-do
numberDualInfeasibilities_ = -776;
}
// But if real primal infeasibilities nonzero carry on
if (nonLinearCost_->numberInfeasibilities()) {
// most likely to happen if infeasible
double relaxedToleranceP=primalTolerance_;
// we can't really trust infeasibilities if there is primal error
double error = std::min(1.0e-2,largestPrimalError_);
// allow tolerance at least slightly bigger than standard
relaxedToleranceP = relaxedToleranceP + error;
int ninfeas = nonLinearCost_->numberInfeasibilities();
double sum = nonLinearCost_->sumInfeasibilities();
double average = sum/ static_cast (ninfeas);
#ifdef COIN_DEVELOP
if (handler_->logLevel()>0)
printf("nonLinearCost says infeasible %d summing to %g\n",
ninfeas,sum);
#endif
if (average>relaxedToleranceP) {
sumOfRelaxedPrimalInfeasibilities_ = sum;
numberPrimalInfeasibilities_ = ninfeas;
sumPrimalInfeasibilities_ = sum;
#ifdef COIN_DEVELOP
bool unflagged =
#endif
unflag();
#ifdef COIN_DEVELOP
if (unflagged&&handler_->logLevel()>0)
printf(" - but flagged variables\n");
#endif
}
}
}
// had ||(type==3&&problemStatus_!=-5) -- ??? why ????
if ((dualFeasible()||problemStatus_==-4)&&!ifValuesPass) {
// see if extra helps
if (nonLinearCost_->numberInfeasibilities()&&
(nonLinearCost_->sumInfeasibilities()>1.0e-3||sumOfRelaxedPrimalInfeasibilities_)
&&!alwaysOptimal) {
//may need infeasiblity cost changed
// we can see if we can construct a ray
// make up a new objective
double saveWeight = infeasibilityCost_;
// save nonlinear cost as we are going to switch off costs
ClpNonLinearCost * nonLinear = nonLinearCost_;
// do twice to make sure Primal solution has settled
// put non-basics to bounds in case tolerance moved
// put back original costs
createRim(4);
nonLinearCost_->checkInfeasibilities(0.0);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
infeasibilityCost_=1.0e100;
// put back original costs
createRim(4);
nonLinearCost_->checkInfeasibilities(primalTolerance_);
// may have fixed infeasibilities - double check
if (nonLinearCost_->numberInfeasibilities()==0) {
// carry on
problemStatus_ = -1;
infeasibilityCost_=saveWeight;
nonLinearCost_->checkInfeasibilities(primalTolerance_);
} else {
nonLinearCost_=NULL;
// scale
int i;
for (i=0;i=1.0e18||
numberDualInfeasibilities_==0)&&perturbation_==101) {
goToDual=unPerturb(); // stop any further perturbation
if (nonLinearCost_->sumInfeasibilities()>1.0e-1)
goToDual=false;
nonLinearCost_->checkInfeasibilities(primalTolerance_);
numberDualInfeasibilities_=1; // carry on
problemStatus_=-1;
} else if (numberDualInfeasibilities_==0&&largestDualError_>1.0e-2) {
goToDual=true;
factorization_->pivotTolerance(std::max(0.9,factorization_->pivotTolerance()));
}
if (!goToDual) {
if (infeasibilityCost_>=1.0e20||
numberDualInfeasibilities_==0) {
// we are infeasible - use as ray
delete [] ray_;
ray_ = new double [numberRows_];
CoinMemcpyN(dual_,numberRows_,ray_);
// and get feasible duals
infeasibilityCost_=0.0;
createRim(4);
nonLinearCost_->checkInfeasibilities(primalTolerance_);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
// so will exit
infeasibilityCost_=1.0e30;
// reset infeasibilities
sumPrimalInfeasibilities_=nonLinearCost_->sumInfeasibilities();;
numberPrimalInfeasibilities_=
nonLinearCost_->numberInfeasibilities();
}
if (infeasibilityCost_<1.0e20) {
infeasibilityCost_ *= 5.0;
// reset looping criterion
progress->reset();
changeMade_++; // say change made
handler_->message(CLP_PRIMAL_WEIGHT,messages_)
<checkInfeasibilities(0.0);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
problemStatus_=-1; //continue
goToDual=false;
} else {
// say infeasible
problemStatus_ = 1;
}
}
}
} else {
// may be optimal
if (perturbation_==101) {
goToDual=unPerturb(); // stop any further perturbation
if (numberRows_>20000&&!numberTimesOptimal_)
goToDual=0; // Better to carry on a bit longer
lastCleaned=-1; // carry on
}
bool unflagged = (unflag()!=0);
if ( lastCleaned!=numberIterations_||unflagged) {
handler_->message(CLP_PRIMAL_OPTIMAL,messages_)
<zeroTolerance(std::min(factorization_->zeroTolerance(),1.0e-15));
}
lastCleaned=numberIterations_;
if (primalTolerance_!=dblParam_[ClpPrimalTolerance])
handler_->message(CLP_PRIMAL_ORIGINAL,messages_)
<checkInfeasibilities(oldTolerance);
#if 0
int i;
for (i=0;inumberInfeasibilities()&&lastCleaned>=0)
problemStatus_=0;
else
problemStatus_ = -1;
} else {
problemStatus_=0; // optimal
if (lastCleanedmessage(CLP_SIMPLEX_GIVINGUP,messages_)
<numberInfeasibilities()) {
if (infeasibilityCost_>1.0e18&&perturbation_==101) {
// back off weight
infeasibilityCost_ = 1.0e13;
// reset looping criterion
progress->reset();
unPerturb(); // stop any further perturbation
}
//we need infeasiblity cost changed
if (infeasibilityCost_<1.0e20) {
infeasibilityCost_ *= 5.0;
// reset looping criterion
progress->reset();
changeMade_++; // say change made
handler_->message(CLP_PRIMAL_WEIGHT,messages_)
< (numberDualInfeasibilities_))<1.0e-5||
progress->lastIterationNumber(0)==numberIterations_) {
if (!numberPrimalInfeasibilities_) {
if (numberTimesOptimal_<4) {
numberTimesOptimal_++;
changeMade_++; // say change made
} else {
problemStatus_=0;
secondaryStatus_=5;
}
}
}
}
}
}
if (problemStatus_==0) {
double objVal = nonLinearCost_->feasibleCost();
double tol = 1.0e-10*std::max(fabs(objVal),fabs(objectiveValue_))+1.0e-8;
if (fabs(objVal-objectiveValue_)>tol) {
#ifdef COIN_DEVELOP
if (handler_->logLevel()>0)
printf("nonLinearCost has feasible obj of %g, objectiveValue_ is %g\n",
objVal,objectiveValue_);
#endif
objectiveValue_ = objVal;
}
}
// save extra stuff
matrix_->generalExpanded(this,5,dummy);
if (type==0||type==1) {
if (type!=1||!saveStatus_) {
// create save arrays
delete [] saveStatus_;
delete [] savedSolution_;
saveStatus_ = new unsigned char [numberRows_+numberColumns_];
savedSolution_ = new double [numberRows_+numberColumns_];
}
// save arrays
CoinMemcpyN(status_,numberColumns_+numberRows_,saveStatus_);
CoinMemcpyN(rowActivityWork_,
numberRows_,savedSolution_+numberColumns_);
CoinMemcpyN(columnActivityWork_,numberColumns_,savedSolution_);
}
// see if in Cbc etc
bool inCbcOrOther = (specialOptions_&0x03000000)!=0;
bool disaster=false;
if (disasterArea_&&inCbcOrOther&&disasterArea_->check()) {
disasterArea_->saveInfo();
disaster=true;
}
if (disaster)
problemStatus_=3;
if (problemStatus_<0&&!changeMade_) {
problemStatus_=4; // unknown
}
lastGoodIteration_ = numberIterations_;
if (numberIterations_>lastBadIteration_+100)
moreSpecialOptions_ &= ~16; // clear check accuracy flag
if (goToDual)
problemStatus_=10; // try dual
// make sure first free monotonic
if (firstFree_>=0&&saveFirstFree>=0) {
firstFree_=saveFirstFree;
nextSuperBasic(1,NULL);
}
if (doFactorization) {
// restore weights (if saved) - also recompute infeasibility list
if (tentativeStatus>-3)
primalColumnPivot_->saveWeights(this,(type <2) ? 2 : 4);
else
primalColumnPivot_->saveWeights(this,3);
if (saveThreshold) {
// use default at present
factorization_->sparseThreshold(0);
factorization_->goSparse();
}
}
// Allow matrices to be sorted etc
int fake=-999; // signal sort
matrix_->correctSequence(this,fake,fake);
}
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
void
IClpSimplexPrimal::primalRow(CoinIndexedVector * rowArray,
CoinIndexedVector * rhsArray,
CoinIndexedVector * spareArray,
CoinIndexedVector * spareArray2,
int valuesPass)
{
DantzigPrimalRow(rowArray, rhsArray, spareArray, spareArray2, valuesPass);
//import_CyClpSimplex();
//CyPostPrimalRow(static_cast(this));
}
/*
Row array has pivot column
This chooses pivot row.
For speed, we may need to go to a bucket approach when many
variables go through bounds
On exit rhsArray will have changes in costs of basic variables
*/
void
IClpSimplexPrimal::DantzigPrimalRow(CoinIndexedVector * rowArray,
CoinIndexedVector * rhsArray,
CoinIndexedVector * spareArray,
CoinIndexedVector * spareArray2,
int valuesPass)
{
double saveDj = dualIn_;
if (valuesPass&&objective_->type()<2) {
dualIn_ = cost_[sequenceIn_];
double * work=rowArray->denseVector();
int number=rowArray->getNumElements();
int * which=rowArray->getIndices();
int iIndex;
for (iIndex=0;iIndexdualTolerance_) {
directionIn_=-1;
} else {
// towards nearest bound
if (valueIn_-lowerIn_100)
acceptablePivot=acceptablePivot_;
if (factorization_->pivots()>10)
acceptablePivot=1.0e+3*acceptablePivot_; // if we have iterated be more strict
else if (factorization_->pivots()>5)
acceptablePivot=1.0e+2*acceptablePivot_; // if we have iterated be slightly more strict
else if (factorization_->pivots())
acceptablePivot=acceptablePivot_; // relax
double bestEverPivot=acceptablePivot;
int lastPivotRow = -1;
double lastPivot=0.0;
double lastTheta=1.0e50;
// use spareArrays to put ones looked at in
// First one is list of candidates
// We could compress if we really know we won't need any more
// Second array has current set of pivot candidates
// with a backup list saved in double * part of indexed vector
// pivot elements
double * spare;
// indices
int * index;
spareArray->clear();
spare = spareArray->denseVector();
index = spareArray->getIndices();
// we also need somewhere for effective rhs
double * rhs=rhsArray->denseVector();
// and we can use indices to point to alpha
// that way we can store fabs(alpha)
int * indexPoint = rhsArray->getIndices();
//int numberFlip=0; // Those which may change if flips
/*
First we get a list of possible pivots. We can also see if the
problem looks unbounded.
At first we increase theta and see what happens. We start
theta at a reasonable guess. If in right area then we do bit by bit.
We save possible pivot candidates
*/
// do first pass to get possibles
// We can also see if unbounded
double * work=rowArray->denseVector();
int number=rowArray->getNumElements();
int * which=rowArray->getIndices();
// we need to swap sign if coming in from ub
double way = directionIn_;
double maximumMovement;
if (way>0.0)
maximumMovement = std::min(1.0e30,upperIn_-valueIn_);
else
maximumMovement = std::min(1.0e30,valueIn_-lowerIn_);
double averageTheta = nonLinearCost_->averageTheta();
double tentativeTheta = std::min(10.0*averageTheta,maximumMovement);
double upperTheta = maximumMovement;
if (tentativeTheta>0.5*maximumMovement)
tentativeTheta=maximumMovement;
bool thetaAtMaximum=tentativeTheta==maximumMovement;
// In case tiny bounds increase
if (maximumMovement<1.0)
tentativeTheta *= 1.1;
double dualCheck = fabs(dualIn_);
// but make a bit more pessimistic
dualCheck=std::max(dualCheck-100.0*dualTolerance_,0.99*dualCheck);
int iIndex;
int pivotOne=-1;
//#define CLP_DEBUG
#ifdef CLP_DEBUG
if (numberIterations_==-3839||numberIterations_==-3840) {
double dj=cost_[sequenceIn_];
printf("cost in on %d is %g, dual in %g\n",sequenceIn_,dj,dualIn_);
for (iIndex=0;iIndex %g (cost %g, dj %g)\n",
iRow,iPivot,lower_[iPivot],solution_[iPivot],upper_[iPivot],
alpha, solution_[iPivot]-1.0e9*alpha,cost_[iPivot],dj);
}
}
#endif
while (true) {
pivotOne=-1;
totalThru=0.0;
// We also re-compute reduced cost
numberRemaining=0;
dualIn_ = cost_[sequenceIn_];
#ifndef NDEBUG
double tolerance = primalTolerance_*1.002;
#endif
for (iIndex=0;iIndex0.0) {
// basic variable going towards lower bound
double bound = lower_[iPivot];
// must be exactly same as when used
double change = tentativeTheta*alpha;
possible = (oldValue-change)<=bound+primalTolerance_;
oldValue -= bound;
} else {
// basic variable going towards upper bound
double bound = upper_[iPivot];
// must be exactly same as when used
double change = tentativeTheta*alpha;
possible = (oldValue-change)>=bound-primalTolerance_;
oldValue = bound-oldValue;
alpha = - alpha;
}
double value;
assert (oldValue>=-tolerance);
if (possible) {
value=oldValue-upperTheta*alpha;
if (value<-primalTolerance_&&alpha>=acceptablePivot) {
upperTheta = (oldValue+primalTolerance_)/alpha;
pivotOne=numberRemaining;
}
// add to list
spare[numberRemaining]=alpha;
rhs[numberRemaining]=oldValue;
indexPoint[numberRemaining]=iIndex;
index[numberRemaining++]=iRow;
totalThru += alpha;
setActive(iRow);
//} else if (value=1.0001*dualCheck) {
// Can pivot here
break;
} else if (!thetaAtMaximum) {
//printf("Going round with average theta of %g\n",averageTheta);
tentativeTheta=maximumMovement;
thetaAtMaximum=true; // seems to be odd compiler error
} else {
break;
}
}
totalThru=0.0;
theta_=maximumMovement;
bool goBackOne = false;
if (objective_->type()>1)
dualIn_=saveDj;
//printf("%d remain out of %d\n",numberRemaining,number);
int iTry=0;
#define MAXTRY 1000
if (numberRemaining&&upperTheta=0&&0) {
double thruCost = infeasibilityCost_*spare[pivotOne];
if (thruCost>=0.99*fabs(dualIn_))
printf("Could pivot on %d as change %g dj %g\n",
index[pivotOne],thruCost,dualIn_);
double alpha = spare[pivotOne];
double oldValue = rhs[pivotOne];
theta_ = oldValue/alpha;
pivotRow_=pivotOne;
// Stop loop
iTry=MAXTRY;
}
// first get ratio with tolerance
for ( ;iTry=acceptablePivot) {
upperTheta = (oldValue+primalTolerance_)/alpha;
iBest=iIndex; // just in case weird numbers
}
}
// now look at best in this lot
// But also see how infeasible small pivots will make
double sumInfeasibilities=0.0;
double bestPivot=acceptablePivot;
pivotRow_=-1;
for (iIndex=0;iIndexchangeInCost(pivotVariable_[iRow],trueAlpha,rhs[iIndex]);
setActive(iRow);
if (alpha>bestPivot) {
bestPivot=alpha;
theta_ = oldValue/bestPivot;
pivotRow_=iIndex;
} else if (alpha1.0e-6&& bestPivot<1.0e-3) {
// back to previous one
goBackOne = true;
break;
} else if (pivotRow_==-1&&upperTheta>largeValue_) {
if (lastPivot>acceptablePivot) {
// back to previous one
goBackOne = true;
} else {
// can only get here if all pivots so far too small
}
break;
} else if (totalThru>=dualCheck) {
if (sumInfeasibilities>primalTolerance_&&!nonLinearCost_->numberInfeasibilities()) {
// Looks a bad choice
if (lastPivot>acceptablePivot) {
goBackOne=true;
} else {
// say no good
dualIn_=0.0;
}
}
break; // no point trying another loop
} else {
lastPivotRow=pivotRow_;
lastTheta = theta_;
if (bestPivot>bestEverPivot)
bestEverPivot=bestPivot;
}
}
// can get here without pivotRow_ set but with lastPivotRow
if (goBackOne||(pivotRow_<0&&lastPivotRow>=0)) {
// back to previous one
pivotRow_=lastPivotRow;
theta_ = lastTheta;
}
} else if (pivotRow_<0&&maximumMovement>1.0e20) {
// looks unbounded
valueOut_=COIN_DBL_MAX; // say odd
if (nonLinearCost_->numberInfeasibilities()) {
// but infeasible??
// move variable but don't pivot
tentativeTheta=1.0e50;
for (iIndex=0;iIndex0.0) {
// basic variable going towards lower bound
double bound = lower_[iPivot];
oldValue -= bound;
} else {
// basic variable going towards upper bound
double bound = upper_[iPivot];
oldValue = bound-oldValue;
alpha = - alpha;
}
if (oldValue-tentativeTheta*alpha<0.0) {
tentativeTheta = oldValue/alpha;
}
}
// If free in then see if we can get to 0.0
if (lowerIn_<-1.0e20&&upperIn_>1.0e20) {
if (dualIn_*valueIn_>0.0) {
if (fabs(valueIn_)<1.0e-2&&(tentativeTheta1.0e20)) {
tentativeTheta = fabs(valueIn_);
}
}
}
if (tentativeTheta<1.0e10)
valueOut_=valueIn_+way*tentativeTheta;
}
}
//if (iTry>50)
//printf("** %d tries\n",iTry);
if (pivotRow_>=0) {
int position=pivotRow_; // position in list
pivotRow_=index[position];
alpha_=work[indexPoint[position]];
// translate to sequence
sequenceOut_ = pivotVariable_[pivotRow_];
valueOut_ = solution(sequenceOut_);
lowerOut_=lower_[sequenceOut_];
upperOut_=upper_[sequenceOut_];
#define MINIMUMTHETA 1.0e-12
// Movement should be minimum for anti-degeneracy - unless
// fixed variable out
double minimumTheta;
if (upperOut_>lowerOut_)
minimumTheta=MINIMUMTHETA;
else
minimumTheta=0.0;
// But can't go infeasible
double distance;
if (alpha_*way>0.0)
distance=valueOut_-lowerOut_;
else
distance=upperOut_-valueOut_;
if (distance-minimumTheta*fabs(alpha_)<-primalTolerance_)
minimumTheta = std::max(0.0,(distance+0.5*primalTolerance_)/fabs(alpha_));
// will we need to increase tolerance
//#define CLP_DEBUG
double largestInfeasibility = primalTolerance_;
if (theta_primalTolerance_&&(handler_->logLevel()&32)>-1)
printf("Primal tolerance increased from %g to %g\n",
primalTolerance_,largestInfeasibility);
#endif
//#undef CLP_DEBUG
primalTolerance_ = std::max(primalTolerance_,largestInfeasibility);
}
// Need to look at all in some cases
if (theta_>tentativeTheta) {
for (iIndex=0;iIndex1.0e-6||(specialOptions_&4)!=0) {
upperOut_ = nonLinearCost_->nearest(sequenceOut_,newValue);
} else {
upperOut_ = newValue;
}
} else {
directionOut_=1; // to lower bound
if (fabs(theta_)>1.0e-6||(specialOptions_&4)!=0) {
lowerOut_ = nonLinearCost_->nearest(sequenceOut_,newValue);
} else {
lowerOut_ = newValue;
}
}
dualOut_ = reducedCost(sequenceOut_);
} else if (maximumMovement<1.0e20) {
// flip
pivotRow_ = -2; // so we can tell its a flip
sequenceOut_ = sequenceIn_;
valueOut_ = valueIn_;
dualOut_ = dualIn_;
lowerOut_ = lowerIn_;
upperOut_ = upperIn_;
alpha_ = 0.0;
if (way<0.0) {
directionOut_=1; // to lower bound
theta_ = lowerOut_ - valueOut_;
} else {
directionOut_=-1; // to upper bound
theta_ = upperOut_ - valueOut_;
}
}
double theta1 = std::max(theta_,1.0e-12);
double theta2 = numberIterations_*nonLinearCost_->averageTheta();
// Set average theta
nonLinearCost_->setAverageTheta((theta1+theta2)/(static_cast (numberIterations_+1)));
//if (numberIterations_%1000==0)
//printf("average theta is %g\n",nonLinearCost_->averageTheta());
// clear arrays
CoinZeroN(spare,numberRemaining);
// put back original bounds etc
CoinMemcpyN(index,numberRemaining,rhsArray->getIndices());
rhsArray->setNumElements(numberRemaining);
rhsArray->setPacked();
nonLinearCost_->goBackAll(rhsArray);
rhsArray->clear();
}
/*
Chooses primal pivot column
updateArray has cost updates (also use pivotRow_ from last iteration)
Would be faster with separate region to scan
and will have this (with square of infeasibility) when steepest
For easy problems we can just choose one of the first columns we look at
*/
void
IClpSimplexPrimal::primalColumn(CoinIndexedVector * updates,
CoinIndexedVector * spareRow1,
CoinIndexedVector * spareRow2,
CoinIndexedVector * spareColumn1,
CoinIndexedVector * spareColumn2)
{
ClpMatrixBase * saveMatrix = matrix_;
double * saveRowScale = rowScale_;
if (scaledMatrix_) {
rowScale_=NULL;
matrix_ = scaledMatrix_;
}
sequenceIn_ = primalColumnPivot_->pivotColumn(updates,spareRow1,
spareRow2,spareColumn1,
spareColumn2);
if (scaledMatrix_) {
matrix_ = saveMatrix;
rowScale_ = saveRowScale;
}
if (sequenceIn_>=0) {
valueIn_=solution_[sequenceIn_];
dualIn_=dj_[sequenceIn_];
if (nonLinearCost_->lookBothWays()) {
// double check
IClpSimplex::Status status = getStatus(sequenceIn_);
switch(status) {
case IClpSimplex::atUpperBound:
if (dualIn_<0.0) {
// move to other side
printf("For %d U (%g, %g, %g) dj changed from %g",
sequenceIn_,lower_[sequenceIn_],solution_[sequenceIn_],
upper_[sequenceIn_],dualIn_);
dualIn_ -= nonLinearCost_->changeUpInCost(sequenceIn_);
printf(" to %g\n",dualIn_);
nonLinearCost_->setOne(sequenceIn_,upper_[sequenceIn_]+2.0*currentPrimalTolerance());
setStatus(sequenceIn_,IClpSimplex::atLowerBound);
}
break;
case IClpSimplex::atLowerBound:
if (dualIn_>0.0) {
// move to other side
printf("For %d L (%g, %g, %g) dj changed from %g",
sequenceIn_,lower_[sequenceIn_],solution_[sequenceIn_],
upper_[sequenceIn_],dualIn_);
dualIn_ -= nonLinearCost_->changeDownInCost(sequenceIn_);
printf(" to %g\n",dualIn_);
nonLinearCost_->setOne(sequenceIn_,lower_[sequenceIn_]-2.0*currentPrimalTolerance());
setStatus(sequenceIn_,IClpSimplex::atUpperBound);
}
break;
default:
break;
}
}
lowerIn_=lower_[sequenceIn_];
upperIn_=upper_[sequenceIn_];
if (dualIn_>0.0)
directionIn_ = -1;
else
directionIn_ = 1;
} else {
sequenceIn_ = -1;
}
}
/* The primals are updated by the given array.
Returns number of infeasibilities.
After rowArray will have list of cost changes
*/
int
IClpSimplexPrimal::updatePrimalsInPrimal(CoinIndexedVector * rowArray,
double theta,
double & objectiveChange,
int valuesPass)
{
// Cost on pivot row may change - may need to change dualIn
double oldCost=0.0;
if (pivotRow_>=0)
oldCost = cost_[sequenceOut_];
//rowArray->scanAndPack();
double * work=rowArray->denseVector();
int number=rowArray->getNumElements();
int * which=rowArray->getIndices();
int newNumber = 0;
int pivotPosition = -1;
nonLinearCost_->setChangeInCost(0.0);
//printf("XX 4138 sol %g lower %g upper %g cost %g status %d\n",
// solution_[4138],lower_[4138],upper_[4138],cost_[4138],status_[4138]);
// allow for case where bound+tolerance == bound
//double tolerance = 0.999999*primalTolerance_;
double relaxedTolerance = 1.001*primalTolerance_;
int iIndex;
if (!valuesPass) {
for (iIndex=0;iIndex0.0) {
// going down
if (value<=lower_[iPivot]+primalTolerance_) {
if (iPivot==sequenceOut_&&value>lower_[iPivot]-relaxedTolerance)
value=lower_[iPivot];
double difference = nonLinearCost_->setOne(iPivot,value);
assert (!difference||fabs(change)>1.0e9);
}
} else {
// going up
if (value>=upper_[iPivot]-primalTolerance_) {
if (iPivot==sequenceOut_&&valuesetOne(iPivot,value);
assert (!difference||fabs(change)>1.0e9);
}
}
}
#endif
if (active(iRow)||theta_<0.0) {
clearActive(iRow);
// But make sure one going out is feasible
if (change>0.0) {
// going down
if (value<=lower_[iPivot]+primalTolerance_) {
if (iPivot==sequenceOut_&&value>=lower_[iPivot]-relaxedTolerance)
value=lower_[iPivot];
double difference = nonLinearCost_->setOne(iPivot,value);
if (difference) {
if (iRow==pivotRow_)
pivotPosition=newNumber;
work[newNumber] = difference;
//change reduced cost on this
dj_[iPivot] = -difference;
which[newNumber++]=iRow;
}
}
} else {
// going up
if (value>=upper_[iPivot]-primalTolerance_) {
if (iPivot==sequenceOut_&&valuesetOne(iPivot,value);
if (difference) {
if (iRow==pivotRow_)
pivotPosition=newNumber;
work[newNumber] = difference;
//change reduced cost on this
dj_[iPivot] = -difference;
which[newNumber++]=iRow;
}
}
}
}
}
} else {
// values pass so look at all
for (iIndex=0;iIndex0.0) {
// going down
if (value<=lower_[iPivot]+primalTolerance_) {
if (iPivot==sequenceOut_&&value>lower_[iPivot]-relaxedTolerance)
value=lower_[iPivot];
double difference = nonLinearCost_->setOne(iPivot,value);
if (difference) {
if (iRow==pivotRow_)
pivotPosition=newNumber;
work[newNumber] = difference;
//change reduced cost on this
dj_[iPivot] = -difference;
which[newNumber++]=iRow;
}
}
} else {
// going up
if (value>=upper_[iPivot]-primalTolerance_) {
if (iPivot==sequenceOut_&&valuesetOne(iPivot,value);
if (difference) {
if (iRow==pivotRow_)
pivotPosition=newNumber;
work[newNumber] = difference;
//change reduced cost on this
dj_[iPivot] = -difference;
which[newNumber++]=iRow;
}
}
}
}
}
objectiveChange += nonLinearCost_->changeInCost();
rowArray->setPacked();
#if 0
rowArray->setNumElements(newNumber);
rowArray->expand();
if (pivotRow_>=0) {
dualIn_ += (oldCost-cost_[sequenceOut_]);
// update change vector to include pivot
rowArray->add(pivotRow_,-dualIn_);
// and convert to packed
rowArray->scanAndPack();
} else {
// and convert to packed
rowArray->scanAndPack();
}
#else
if (pivotRow_>=0) {
double dualIn = dualIn_+(oldCost-cost_[sequenceOut_]);
// update change vector to include pivot
if (pivotPosition>=0) {
work[pivotPosition] -= dualIn;
} else {
work[newNumber]=-dualIn;
which[newNumber++]=pivotRow_;
}
}
rowArray->setNumElements(newNumber);
#endif
return 0;
}
// Perturbs problem
void
IClpSimplexPrimal::perturb(int type)
{
if (perturbation_>100)
return; //perturbed already
if (perturbation_==100)
perturbation_=50; // treat as normal
int savePerturbation = perturbation_;
int i;
if (!numberIterations_)
cleanStatus(); // make sure status okay
// Make sure feasible bounds
if (nonLinearCost_)
nonLinearCost_->feasibleBounds();
// look at element range
double smallestNegative;
double largestNegative;
double smallestPositive;
double largestPositive;
matrix_->rangeOfElements(smallestNegative, largestNegative,
smallestPositive, largestPositive);
smallestPositive = std::min(fabs(smallestNegative),smallestPositive);
largestPositive = std::max(fabs(largestNegative),largestPositive);
double elementRatio = largestPositive/smallestPositive;
if (!numberIterations_&&perturbation_==50) {
// See if we need to perturb
double * sort = new double[numberRows_];
for (i=0;inumberRows_||elementRatio>1.0e12) {
perturbation_=100;
return; // good enough
}
}
// primal perturbation
double perturbation=1.0e-20;
int numberNonZero=0;
// maximum fraction of rhs/bounds to perturb
double maximumFraction = 1.0e-5;
if (perturbation_>=50) {
perturbation = 1.0e-4;
for (i=0;ilower_[i]+primalTolerance_) {
double lowerValue, upperValue;
if (lower_[i]>-1.0e20)
lowerValue = fabs(lower_[i]);
else
lowerValue=0.0;
if (upper_[i]<1.0e20)
upperValue = fabs(upper_[i]);
else
upperValue=0.0;
double value = std::max(fabs(lowerValue),fabs(upperValue));
value = std::min(value,upper_[i]-lower_[i]);
#if 1
if (value) {
perturbation += value;
numberNonZero++;
}
#else
perturbation = std::max(perturbation,value);
#endif
}
}
if (numberNonZero)
perturbation /= static_cast (numberNonZero);
else
perturbation = 1.0e-1;
if (perturbation_>50&&perturbation_<60) {
// reduce
while (perturbation_>50) {
perturbation_--;
perturbation *= 0.25;
}
}
} else if (perturbation_<100) {
perturbation = pow(10.0,perturbation_);
// user is in charge
maximumFraction = 1.0;
}
double largestZero=0.0;
double largest=0.0;
double largestPerCent=0.0;
bool printOut=(handler_->logLevel()==63);
printOut=false; //off
// Check if all slack
int number=0;
int iSequence;
for (iSequence=0;iSequence100.0) {
// tone down perturbation
maximumFraction *= 0.1;
}
if (number!=numberRows_)
type=1;
// modify bounds
// Change so at least 1.0e-5 and no more than 0.1
// For now just no more than 0.1
// printf("Pert type %d perturbation %g, maxF %g\n",type,perturbation,maximumFraction);
// seems much slower???#define SAVE_PERT
#ifdef SAVE_PERT
if (2*numberColumns_>maximumPerturbationSize_) {
delete [] perturbationArray_;
maximumPerturbationSize_ = 2* numberColumns_;
perturbationArray_ = new double [maximumPerturbationSize_];
for (int iColumn=0;iColumnlowerValue+tolerance) {
double solutionValue = solution_[iSequence];
double difference = upperValue-lowerValue;
difference = std::min(difference,perturbation);
difference = std::min(difference,fabs(solutionValue)+1.0);
double value = maximumFraction*(difference+1.0);
value = std::min(value,0.1);
#ifndef SAVE_PERT
value *= randomNumberGenerator_.randomDouble();
#else
value *= perturbationArray_[2*iSequence];
#endif
if (solutionValue-lowerValue<=primalTolerance_) {
lower_[iSequence] -= value;
} else if (upperValue-solutionValue<=primalTolerance_) {
upper_[iSequence] += value;
} else {
#if 0
if (iSequence>=numberColumns_) {
// may not be at bound - but still perturb (unless free)
if (upperValue>1.0e30&&lowerValue<-1.0e30)
value=0.0;
else
value = - value; // as -1.0 in matrix
} else {
value = 0.0;
}
#else
value=0.0;
#endif
}
if (value) {
if (printOut)
printf("col %d lower from %g to %g, upper from %g to %g\n",
iSequence,lower_[iSequence],lowerValue,upper_[iSequence],upperValue);
if (solutionValue) {
largest = std::max(largest,value);
if (value>(fabs(solutionValue)+1.0)*largestPerCent)
largestPerCent=value/(fabs(solutionValue)+1.0);
} else {
largestZero = std::max(largestZero,value);
}
}
}
}
}
} else {
double tolerance = 100.0*primalTolerance_;
for (i=0;ilowerValue+primalTolerance_) {
double value = perturbation*maximumFraction;
value = std::min(value,0.1);
#ifndef SAVE_PERT
value *= randomNumberGenerator_.randomDouble();
#else
value *= perturbationArray_[2*i+1];
#endif
value *= randomNumberGenerator_.randomDouble();
if (savePerturbation!=50) {
if (fabs(value)<=primalTolerance_)
value=0.0;
if (lowerValue>-1.0e20&&lowerValue)
lowerValue -= value * (std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
if (upperValue<1.0e20&&upperValue)
upperValue += value * (std::max(1.0e-2,1.0e-5*fabs(upperValue)));
} else if (value) {
double valueL =value *(std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
// get in range
if (valueL<=tolerance) {
valueL *= 10.0;
while (valueL<=tolerance)
valueL *= 10.0;
} else if (valueL>1.0) {
valueL *= 0.1;
while (valueL>1.0)
valueL *= 0.1;
}
if (lowerValue>-1.0e20&&lowerValue)
lowerValue -= valueL;
double valueU =value *(std::max(1.0e-2,1.0e-5*fabs(upperValue)));
// get in range
if (valueU<=tolerance) {
valueU *= 10.0;
while (valueU<=tolerance)
valueU *= 10.0;
} else if (valueU>1.0) {
valueU *= 0.1;
while (valueU>1.0)
valueU *= 0.1;
}
if (upperValue<1.0e20&&upperValue)
upperValue += valueU;
}
if (lowerValue!=lower_[i]) {
double difference = fabs(lowerValue-lower_[i]);
largest = std::max(largest,difference);
if (difference>fabs(lower_[i])*largestPerCent)
largestPerCent=fabs(difference/lower_[i]);
}
if (upperValue!=upper_[i]) {
double difference = fabs(upperValue-upper_[i]);
largest = std::max(largest,difference);
if (difference>fabs(upper_[i])*largestPerCent)
largestPerCent=fabs(difference/upper_[i]);
}
if (printOut)
printf("col %d lower from %g to %g, upper from %g to %g\n",
i,lower_[i],lowerValue,upper_[i],upperValue);
}
lower_[i]=lowerValue;
upper_[i]=upperValue;
}
for (;ilowerValue+tolerance) {
if (savePerturbation!=50) {
if (fabs(value)<=primalTolerance_)
value=0.0;
if (lowerValue>-1.0e20&&lowerValue)
lowerValue -= value * (std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
if (upperValue<1.0e20&&upperValue)
upperValue += value * (std::max(1.0e-2,1.0e-5*fabs(upperValue)));
} else if (value) {
double valueL =value *(std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
// get in range
if (valueL<=tolerance) {
valueL *= 10.0;
while (valueL<=tolerance)
valueL *= 10.0;
} else if (valueL>1.0) {
valueL *= 0.1;
while (valueL>1.0)
valueL *= 0.1;
}
if (lowerValue>-1.0e20&&lowerValue)
lowerValue -= valueL;
double valueU =value *(std::max(1.0e-2,1.0e-5*fabs(upperValue)));
// get in range
if (valueU<=tolerance) {
valueU *= 10.0;
while (valueU<=tolerance)
valueU *= 10.0;
} else if (valueU>1.0) {
valueU *= 0.1;
while (valueU>1.0)
valueU *= 0.1;
}
if (upperValue<1.0e20&&upperValue)
upperValue += valueU;
}
} else if (upperValue>0.0) {
upperValue -= value * (std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
lowerValue -= value * (std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
} else if (upperValue<0.0) {
upperValue += value * (std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
lowerValue += value * (std::max(1.0e-2,1.0e-5*fabs(lowerValue)));
} else {
}
if (lowerValue!=lower_[i]) {
double difference = fabs(lowerValue-lower_[i]);
largest = std::max(largest,difference);
if (difference>fabs(lower_[i])*largestPerCent)
largestPerCent=fabs(difference/lower_[i]);
}
if (upperValue!=upper_[i]) {
double difference = fabs(upperValue-upper_[i]);
largest = std::max(largest,difference);
if (difference>fabs(upper_[i])*largestPerCent)
largestPerCent=fabs(difference/upper_[i]);
}
if (printOut)
printf("row %d lower from %g to %g, upper from %g to %g\n",
i-numberColumns_,lower_[i],lowerValue,upper_[i],upperValue);
lower_[i]=lowerValue;
upper_[i]=upperValue;
}
}
// Clean up
for (i=0;imessage(CLP_SIMPLEX_PERTURB,messages_)
<<100.0*maximumFraction<checkInfeasibilities(0.0);
#if 1
// Try using dual
return true;
#else
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
return false;
#endif
}
// Unflag all variables and return number unflagged
int
IClpSimplexPrimal::unflag()
{
int i;
int number = numberRows_+numberColumns_;
int numberFlagged=0;
// we can't really trust infeasibilities if there is dual error
// allow tolerance bigger than standard to check on duals
double relaxedToleranceD=dualTolerance_ + std::min(1.0e-2,10.0*largestDualError_);
for (i=0;irelaxedToleranceD)
numberFlagged++;
}
}
numberFlagged += matrix_->generalExpanded(this,8,i);
if (handler_->logLevel()>2&&numberFlagged&&objective_->type()>1)
printf("%d unflagged\n",numberFlagged);
return numberFlagged;
}
// Do not change infeasibility cost and always say optimal
void
IClpSimplexPrimal::alwaysOptimal(bool onOff)
{
if (onOff)
specialOptions_ |= 1;
else
specialOptions_ &= ~1;
}
bool
IClpSimplexPrimal::alwaysOptimal() const
{
return (specialOptions_&1)!=0;
}
// Flatten outgoing variables i.e. - always to exact bound
void
IClpSimplexPrimal::exactOutgoing(bool onOff)
{
if (onOff)
specialOptions_ |= 4;
else
specialOptions_ &= ~4;
}
bool
IClpSimplexPrimal::exactOutgoing() const
{
return (specialOptions_&4)!=0;
}
/*
Reasons to come out (normal mode/user mode):
-1 normal
-2 factorize now - good iteration/ NA
-3 slight inaccuracy - refactorize - iteration done/ same but factor done
-4 inaccuracy - refactorize - no iteration/ NA
-5 something flagged - go round again/ pivot not possible
+2 looks unbounded
+3 max iterations (iteration done)
*/
int
IClpSimplexPrimal::pivotResult(int ifValuesPass)
{
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
bool roundAgain=true;
int returnCode=-1;
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
//int returnCodeBack = 1000;
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
//
// loop round if user setting and doing refactorization
while (roundAgain) {
roundAgain=false;
returnCode=-1;
pivotRow_=-1;
sequenceOut_=-1;
rowArray_[1]->clear();
#if 0
{
int seq[]={612,643};
int k;
for (k=0;kupdateColumn(rowArray_[2],rowArray_[1]);
djval = cost_[iSeq];
work=rowArray_[1]->denseVector();
number=rowArray_[1]->getNumElements();
which=rowArray_[1]->getIndices();
for (iIndex=0;iIndexcomp)
printf("Bad dj %g for %d - true is %g\n",
dj_[iSeq],iSeq,djval);
assert (fabs(djval)<1.0e-3||djval*dj_[iSeq]>0.0);
rowArray_[1]->clear();
}
}
}
#endif
// we found a pivot column
// update the incoming column
unpackPacked(rowArray_[1]);
// save reduced cost
double saveDj = dualIn_;
factorization_->updateColumnFT(rowArray_[2],rowArray_[1]);
// Get extra rows
matrix_->extendUpdated(this,rowArray_[1],0);
// do ratio test and re-compute dj
primalRow(rowArray_[1],rowArray_[3],rowArray_[2],rowArray_[0],
ifValuesPass);
//TODO: We are heading to an actual pivot. Clear the flags for the next iteration
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
//if (CyPivotIsAcceptable(static_cast(this)) == 0)
if (!isPivotAcceptable())
{
returnCode = -5;
clearAll();
pivotRow_=-1;
break;
}
if (ifValuesPass) {
saveDj=dualIn_;
//assert (fabs(alpha_)>=1.0e-5||(objective_->type()<2||!objective_->activated())||pivotRow_==-2);
if (pivotRow_==-1||(pivotRow_>=0&&fabs(alpha_)<1.0e-5)) {
if(fabs(dualIn_)<1.0e2*dualTolerance_&&objective_->type()<2) {
// try other way
directionIn_=-directionIn_;
primalRow(rowArray_[1],rowArray_[3],rowArray_[2],rowArray_[0],
0);
}
if (pivotRow_==-1||(pivotRow_>=0&&fabs(alpha_)<1.0e-5)) {
if (solveType_==1) {
// reject it
char x = isColumn(sequenceIn_) ? 'C' :'R';
handler_->message(CLP_SIMPLEX_FLAG,messages_)
<extendUpdated(this,rowArray_[1],1);
double checkValue=1.0e-2;
if (largestDualError_>1.0e-5)
checkValue=1.0e-1;
double test2 = dualTolerance_;
double test1 = 1.0e-20;
#if 0 //def FEB_TRY
if (factorization_->pivots()<1) {
test1 = -1.0e-4;
if ((saveDj<0.0&&dualIn_<-1.0e-5*dualTolerance_)||
(saveDj>0.0&&dualIn_>1.0e-5*dualTolerance_))
test2=0.0; // allow through
}
#endif
if (!ifValuesPass&&solveType_==1&&(saveDj*dualIn_checkValue*(1.0+fabs(saveDj))||
fabs(dualIn_)message(CLP_PRIMAL_DJ,messages_)
<checkInfeasibilities(0.0);
}
sequenceOut_=-1;
break;
} else {
// take on more relaxed criterion
if (saveDj*dualIn_2.0e-1*(1.0+fabs(dualIn_))||
fabs(dualIn_)message(CLP_SIMPLEX_FLAG,messages_)
<saferTolerances (1.0e-15,-1.03);
#endif
progress_.clearBadTimes();
lastBadIteration_ = numberIterations_; // say be more cautious
clearAll();
pivotRow_=-1;
returnCode=-5;
sequenceOut_=-1;
break;
}
}
}
if (pivotRow_>=0) {
if (solveType_==2) {
// **** Coding for user interface
// do ray
primalRay(rowArray_[1]);
// update duals
// as packed need to find pivot row
//assert (rowArray_[1]->packedMode());
//int i;
//alpha_ = rowArray_[1]->denseVector()[pivotRow_];
CoinAssert (fabs(alpha_)>1.0e-8);
double multiplier = dualIn_/alpha_;
rowArray_[0]->insert(pivotRow_,multiplier);
factorization_->updateColumnTranspose(rowArray_[2],rowArray_[0]);
// put row of tableau in rowArray[0] and columnArray[0]
matrix_->transposeTimes(this,-1.0,
rowArray_[0],columnArray_[1],columnArray_[0]);
// update column djs
int i;
int * index = columnArray_[0]->getIndices();
int number = columnArray_[0]->getNumElements();
double * element = columnArray_[0]->denseVector();
for (i=0;isetNumElements(0);
// and row djs
index = rowArray_[0]->getIndices();
number = rowArray_[0]->getNumElements();
element = rowArray_[0]->denseVector();
for (i=0;isetNumElements(0);
// check incoming
CoinAssert (fabs(dj_[sequenceIn_])<1.0e-1);
}
// if stable replace in basis
// If gub or odd then alpha and pivotRow may change
int updateType=0;
int updateStatus = matrix_->generalExpanded(this,3,updateType);
if (updateType>=0)
updateStatus = factorization_->replaceColumn(this,
rowArray_[2],
rowArray_[1],
pivotRow_,
alpha_,
(moreSpecialOptions_&16)!=0);
// if no pivots, bad update but reasonable alpha - take and invert
if (updateStatus==2&&
lastGoodIteration_==numberIterations_&&fabs(alpha_)>1.0e-5)
updateStatus=4;
if (updateStatus==1||updateStatus==4) {
// slight error
if (factorization_->pivots()>5||updateStatus==4) {
returnCode=-3;
}
} else if (updateStatus==2) {
// major error
// better to have small tolerance even if slower
factorization_->zeroTolerance(std::min(factorization_->zeroTolerance(),1.0e-15));
int maxFactor = factorization_->maximumPivots();
if (maxFactor>10) {
if (forceFactorization_<0)
forceFactorization_= maxFactor;
forceFactorization_ = std::max(1,(forceFactorization_>>1));
}
// later we may need to unwind more e.g. fake bounds
if(lastGoodIteration_ != numberIterations_) {
clearAll();
pivotRow_=-1;
if (solveType_==1) {
returnCode=-4;
break;
} else {
// user in charge - re-factorize
int lastCleaned=0;
ClpSimplexProgress dummyProgress;
if (saveStatus_)
statusOfProblemInPrimal(lastCleaned,1,&dummyProgress,true,ifValuesPass);
else
statusOfProblemInPrimal(lastCleaned,0,&dummyProgress,true,ifValuesPass);
roundAgain=true;
continue;
}
} else {
// need to reject something
if (solveType_==1) {
char x = isColumn(sequenceIn_) ? 'C' :'R';
handler_->message(CLP_SIMPLEX_FLAG,messages_)
<pivots()<
0.5*factorization_->maximumPivots()&&
factorization_->pivots()<200)
factorization_->areaFactor(
factorization_->areaFactor() * 1.1);
returnCode =-2; // factorize now
} else if (updateStatus==5) {
problemStatus_=-2; // factorize now
}
// here do part of steepest - ready for next iteration
if (!ifValuesPass)
primalColumnPivot_->updateWeights(rowArray_[1]);
} else {
if (pivotRow_==-1) {
// no outgoing row is valid
if (valueOut_!=COIN_DBL_MAX) {
double objectiveChange=0.0;
theta_=valueOut_-valueIn_;
updatePrimalsInPrimal(rowArray_[1],theta_, objectiveChange,ifValuesPass);
solution_[sequenceIn_] += theta_;
}
rowArray_[0]->clear();
if (!factorization_->pivots()&&acceptablePivot_<=1.0e-8) {
returnCode = 2; //say looks unbounded
// do ray
primalRay(rowArray_[1]);
} else if (solveType_==2) {
// refactorize
int lastCleaned=0;
ClpSimplexProgress dummyProgress;
if (saveStatus_)
statusOfProblemInPrimal(lastCleaned,1,&dummyProgress,true,ifValuesPass);
else
statusOfProblemInPrimal(lastCleaned,0,&dummyProgress,true,ifValuesPass);
roundAgain=true;
continue;
} else {
acceptablePivot_=1.0e-8;
returnCode = 4; //say looks unbounded but has iterated
}
break;
} else {
// flipping from bound to bound
}
}
double oldCost = 0.0;
if (sequenceOut_>=0)
oldCost=cost_[sequenceOut_];
// update primal solution
double objectiveChange=0.0;
// after this rowArray_[1] is not empty - used to update djs
// If pivot row >= numberRows then may be gub
int savePivot = pivotRow_;
if (pivotRow_>=numberRows_)
pivotRow_=-1;
updatePrimalsInPrimal(rowArray_[1],theta_, objectiveChange,ifValuesPass);
pivotRow_=savePivot;
double oldValue = valueIn_;
if (directionIn_==-1) {
// as if from upper bound
if (sequenceIn_!=sequenceOut_) {
// variable becoming basic
valueIn_ -= fabs(theta_);
} else {
valueIn_=lowerIn_;
}
} else {
// as if from lower bound
if (sequenceIn_!=sequenceOut_) {
// variable becoming basic
valueIn_ += fabs(theta_);
} else {
valueIn_=upperIn_;
}
}
objectiveChange += dualIn_*(valueIn_-oldValue);
// outgoing
if (sequenceIn_!=sequenceOut_) {
if (directionOut_>0) {
valueOut_ = lowerOut_;
} else {
valueOut_ = upperOut_;
}
if(valueOut_upper_[sequenceOut_]+primalTolerance_)
valueOut_=upper_[sequenceOut_]+0.9*primalTolerance_;
// may not be exactly at bound and bounds may have changed
// Make sure outgoing looks feasible
directionOut_=nonLinearCost_->setOneOutgoing(sequenceOut_,valueOut_);
// May have got inaccurate
//if (oldCost!=cost_[sequenceOut_])
//printf("costchange on %d from %g to %g\n",sequenceOut_,
// oldCost,cost_[sequenceOut_]);
if (solveType_!=2)
dj_[sequenceOut_]=cost_[sequenceOut_]-oldCost; // normally updated next iteration
solution_[sequenceOut_]=valueOut_;
}
// change cost and bounds on incoming if primal
nonLinearCost_->setOne(sequenceIn_,valueIn_);
int whatNext=housekeeping(objectiveChange);
//nonLinearCost_->validate();
#if CLP_DEBUG >1
{
double sum;
int ninf= matrix_->checkFeasible(this,sum);
if (ninf)
printf("infeas %d\n",ninf);
}
#endif
if (whatNext==1) {
returnCode =-2; // refactorize
} else if (whatNext==2) {
// maximum iterations or equivalent
returnCode=3;
} else if(numberIterations_ == lastGoodIteration_
+ 2 * factorization_->maximumPivots()) {
// done a lot of flips - be safe
returnCode =-2; // refactorize
}
// Check event
{
int status = eventHandler_->event(ClpEventHandler::endOfIteration);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfIteration;
returnCode=3;
}
}
}
if (solveType_==2&&(returnCode == -2||returnCode==-3)) {
// refactorize here
int lastCleaned=0;
ClpSimplexProgress dummyProgress;
if (saveStatus_)
statusOfProblemInPrimal(lastCleaned,1,&dummyProgress,true,ifValuesPass);
else
statusOfProblemInPrimal(lastCleaned,0,&dummyProgress,true,ifValuesPass);
if (problemStatus_==5) {
printf("Singular basis\n");
problemStatus_=-1;
returnCode=5;
}
}
#ifdef CLP_DEBUG
{
int i;
// not [1] as may have information
for (i=0;i<4;i++) {
if (i!=1)
rowArray_[i]->checkClear();
}
for (i=0;i<2;i++) {
columnArray_[i]->checkClear();
}
}
#endif
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// if (QP_ExistsBannedVariable && returnCode != -5 && returnCode != -4)
// {
// std::cout << "resetting banlist@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n";
// int listSize = numberRows() + numberColumns();
// for (int ii = 0 ; ii < listSize ; ii++)
// QP_BanList[ii] = 0;
// QP_ExistsBannedVariable = 0;
// }
// // @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// std::cout << "return code: " << returnCode << "\n";
//if (returnCode != -1 && returnCode!= -4 && returnCode != -5)
//{
// std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
//}
// if (returnCodeBack == -5 && returnCode != -5)
// assert(0);
// std::cout << "return code: " << returnCode << "\n";
return returnCode;
}
// Create primal ray
void
IClpSimplexPrimal::primalRay(CoinIndexedVector * rowArray)
{
delete [] ray_;
ray_ = new double [numberColumns_];
CoinZeroN(ray_,numberColumns_);
int number=rowArray->getNumElements();
int * index = rowArray->getIndices();
double * array = rowArray->denseVector();
double way=-directionIn_;
int i;
double zeroTolerance=1.0e-12;
if (sequenceIn_packedMode()) {
for (i=0;i=zeroTolerance)
ray_[iPivot] = way* arrayValue;
}
} else {
for (i=0;i=zeroTolerance)
ray_[iPivot] = way* arrayValue;
}
}
}
/* Get next superbasic -1 if none,
Normal type is 1
If type is 3 then initializes sorted list
if 2 uses list.
*/
int
IClpSimplexPrimal::nextSuperBasic(int superBasicType,CoinIndexedVector * columnArray)
{
if (firstFree_>=0&&superBasicType) {
int returnValue=-1;
bool finished=false;
while (!finished) {
returnValue=firstFree_;
int iColumn=firstFree_+1;
if (superBasicType>1) {
if (superBasicType>2) {
// Initialize list
// Wild guess that lower bound more natural than upper
int number=0;
double * work=columnArray->denseVector();
int * which=columnArray->getIndices();
for (iColumn=0;iColumn1.0e20) {
setStatus(iColumn,isFree);
break;
} else if (!flagged(iColumn)) {
// put ones near bounds at end after sorting
work[number]= - std::min(0.1*(solution_[iColumn]-lower_[iColumn]),
upper_[iColumn]-solution_[iColumn]);
which[number++] = iColumn;
}
}
}
}
CoinSort_2(work,work+number,which);
columnArray->setNumElements(number);
CoinZeroN(work,number);
}
int * which=columnArray->getIndices();
int number = columnArray->getNumElements();
if (!number) {
// finished
iColumn = numberRows_+numberColumns_;
returnValue=-1;
} else {
number--;
returnValue=which[number];
iColumn=returnValue;
columnArray->setNumElements(number);
}
} else {
for (;iColumn1.0e20) {
setStatus(iColumn,isFree);
break;
} else {
break;
}
}
}
}
}
firstFree_ = iColumn;
finished=true;
if (firstFree_==numberRows_+numberColumns_)
firstFree_=-1;
if (returnValue>=0&&getStatus(returnValue)!=superBasic&&getStatus(returnValue)!=isFree)
finished=false; // somehow picked up odd one
}
return returnValue;
} else {
return -1;
}
}
void
IClpSimplexPrimal::clearAll()
{
// Clean up any gub stuff
matrix_->extendUpdated(this,rowArray_[1],1);
int number=rowArray_[1]->getNumElements();
int * which=rowArray_[1]->getIndices();
int iIndex;
for (iIndex=0;iIndexclear();
// make sure any gub sets are clean
matrix_->generalExpanded(this,11,sequenceIn_);
}
// Sort of lexicographic resolve
int
IClpSimplexPrimal::lexSolve()
{
algorithm_ = +1;
//specialOptions_ |= 4;
// save data
ClpDataSave data = saveData();
matrix_->refresh(this); // make sure matrix okay
// Save so can see if doing after dual
int initialStatus=problemStatus_;
int initialIterations = numberIterations_;
int initialNegDjs=-1;
// initialize - maybe values pass and algorithm_ is +1
int ifValuesPass=0;
#if 0
// if so - put in any superbasic costed slacks
if (ifValuesPass&&specialOptions_<0x01000000) {
// Get column copy
const CoinPackedMatrix * columnCopy = matrix();
const int * row = columnCopy->getIndices();
const CoinBigIndex * columnStart = columnCopy->getVectorStarts();
const int * columnLength = columnCopy->getVectorLengths();
//const double * element = columnCopy->getElements();
int n=0;
for (int iColumn = 0;iColumnprimalTolerance_&&
fabs(value-columnUpper_[iColumn])>primalTolerance_) {
int iRow = row[columnStart[iColumn]];
if (getRowStatus(iRow)==basic) {
setRowStatus(iRow,superBasic);
setColumnStatus(iColumn,basic);
n++;
}
}
}
}
}
printf("%d costed slacks put in basis\n",n);
}
#endif
double * originalCost = NULL;
double * originalLower = NULL;
double * originalUpper = NULL;
if (!startup(0,0)) {
// Set average theta
nonLinearCost_->setAverageTheta(1.0e3);
int lastCleaned=0; // last time objective or bounds cleaned up
// Say no pivot has occurred (for steepest edge and updates)
pivotRow_=-2;
// This says whether to restore things etc
int factorType=0;
if (problemStatus_<0&&perturbation_<100) {
perturb(0);
// Can't get here if values pass
assert (!ifValuesPass);
gutsOfSolution(NULL,NULL);
if (handler_->logLevel()>2) {
handler_->message(CLP_SIMPLEX_STATUS,messages_)
<printing(sumPrimalInfeasibilities_>0.0)
<printing(sumDualInfeasibilities_>0.0)
<printing(numberDualInfeasibilitiesWithoutFree_
message()<clear();
}
for (iColumn=0;iColumn<2;iColumn++) {
columnArray_[iColumn]->clear();
}
// give matrix (and model costs and bounds a chance to be
// refreshed (normally null)
matrix_->refresh(this);
// If getting nowhere - why not give it a kick
#if 1
if (perturbation_<101&&numberIterations_>2*(numberRows_+numberColumns_)&&(specialOptions_&4)==0
&&initialStatus!=10) {
perturb(1);
matrix_->rhsOffset(this,true,false);
}
#endif
// If we have done no iterations - special
if (lastGoodIteration_==numberIterations_&&factorType)
factorType=3;
if (saveModel) {
// Doing sprint
if (sequenceIn_<0||numberIterations_>=stopSprint) {
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
if (sequenceIn_<0&&numberIterations_100)
primalColumnPivot_->switchOffSprint();
//lastSprintIteration=numberIterations_;
printf("End small model\n");
}
}
// may factorize, checks if problem finished
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
if (initialStatus==10) {
// cleanup phase
if(initialIterations != numberIterations_) {
if (numberDualInfeasibilities_>10000&&numberDualInfeasibilities_>10*initialNegDjs) {
// getting worse - try perturbing
if (perturbation_<101&&(specialOptions_&4)==0) {
perturb(1);
matrix_->rhsOffset(this,true,false);
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
}
}
} else {
// save number of negative djs
if (!numberPrimalInfeasibilities_)
initialNegDjs=numberDualInfeasibilities_;
// make sure weight won't be changed
if (infeasibilityCost_==1.0e10)
infeasibilityCost_=1.000001e10;
}
}
// See if sprint says redo because of problems
if (numberDualInfeasibilities_==-776) {
// Need new set of variables
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
//lastSprintIteration=numberIterations_;
printf("End small model after\n");
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
}
int numberSprintIterations=0;
int numberSprintColumns = primalColumnPivot_->numberSprintColumns(numberSprintIterations);
if (problemStatus_==777) {
// problems so do one pass with normal
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
// Skip factorization
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,false,saveModel);
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
} else if (problemStatus_<0&&!saveModel&&numberSprintColumns&&firstFree_<0) {
int numberSort=0;
int numberFixed=0;
int numberBasic=0;
reasonableSprintIteration = numberIterations_ + 100;
int * whichColumns = new int[numberColumns_];
double * weight = new double[numberColumns_];
int numberNegative=0;
double sumNegative = 0.0;
// now massage weight so all basic in plus good djs
for (iColumn=0;iColumn-1.0e50) {
numberNegative++;
sumNegative -= dj;
}
weight[iColumn]=dj;
whichColumns[iColumn] = iColumn;
}
handler_->message(CLP_SPRINT,messages_)
<lastObjectiveValue-1.0e-7&&sprintPass>5) {
// switch off
printf("Switching off sprint\n");
primalColumnPivot_->switchOffSprint();
} else {
lastObjectiveValue = objectiveValue()*optimizationDirection_;
// sort
CoinSort_2(weight,weight+numberColumns_,whichColumns);
numberSort = std::min(numberColumns_-numberFixed,numberBasic+numberSprintColumns);
// Sort to make consistent ?
std::sort(whichColumns,whichColumns+numberSort);
saveModel = new IClpSimplex(this,numberSort,whichColumns);
delete [] whichColumns;
delete [] weight;
// Skip factorization
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,false,saveModel);
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,saveModel);
stopSprint = numberIterations_+numberSprintIterations;
printf("Sprint with %d columns for %d iterations\n",
numberSprintColumns,numberSprintIterations);
}
}
// Say good factorization
factorType=1;
// Say no pivot has occurred (for steepest edge and updates)
pivotRow_=-2;
// exit if victory declared
if (problemStatus_>=0) {
if (originalCost) {
// find number nonbasic with zero reduced costs
int numberDegen=0;
int numberTotal = numberColumns_; //+numberRows_;
for (int i=0;i=-dualTolerance_) {
cost_[i]=(numberTotal-i)+randomNumberGenerator_.randomDouble()*0.5;
numberDegen++;
} else {
// fix
cost_[i]=-1.0e10;//lower_[i]=upper_[i];
}
} else if (getStatus(i)==basic) {
cost_[i] = (numberTotal-i)+randomNumberGenerator_.randomDouble()*0.5;
}
}
problemStatus_=-1;
lastObjectiveValue=COIN_DBL_MAX;
// Start check for cycles
progress_.fillFromModel(this);
progress_.startCheck();
printf("%d degenerate after %d iterations\n",numberDegen,
numberIterations_);
if (!numberDegen) {
CoinMemcpyN(originalCost,numberTotal,cost_);
delete [] originalCost;
originalCost=NULL;
CoinMemcpyN(originalLower,numberTotal,lower_);
delete [] originalLower;
CoinMemcpyN(originalUpper,numberTotal,upper_);
delete [] originalUpper;
}
delete nonLinearCost_;
nonLinearCost_ = new ClpNonLinearCost(this);
progress_.endOddState();
continue;
} else {
printf("exiting after %d iterations\n",numberIterations_);
break;
}
}
// test for maximum iterations
if (hitMaximumIterations()||(ifValuesPass==2&&firstFree_<0)) {
problemStatus_=3;
break;
}
if (firstFree_<0) {
if (ifValuesPass) {
// end of values pass
ifValuesPass=0;
int status = eventHandler_->event(ClpEventHandler::endOfValuesPass);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfValuesPass;
break;
}
}
}
// Check event
{
int status = eventHandler_->event(ClpEventHandler::endOfFactorization);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfFactorization;
break;
}
}
// Iterate
whileIterating(ifValuesPass ? 1 : 0);
}
}
assert (!originalCost);
// if infeasible get real values
//printf("XXXXY final cost %g\n",infeasibilityCost_);
progress_.initialWeight_=0.0;
if (problemStatus_==1&&secondaryStatus_!=6) {
infeasibilityCost_=0.0;
createRim(1+4);
nonLinearCost_->checkInfeasibilities(0.0);
sumPrimalInfeasibilities_=nonLinearCost_->sumInfeasibilities();
numberPrimalInfeasibilities_= nonLinearCost_->numberInfeasibilities();
// and get good feasible duals
computeDuals(NULL);
}
// clean up
unflag();
finish(0);
restoreData(data);
return problemStatus_;
}
================================================
FILE: cylp/cpp/IClpSimplexPrimal.hpp
================================================
// Copyright (C) 2002, International Business Machines
// Corporation and others. All Rights Reserved.
/*
Authors
John Forrest
*/
#ifndef IClpSimplexPrimal_H
#define IClpSimplexPrimal_H
#include "IClpSimplex.hpp"
/** This solves LPs using the primal simplex method
It inherits from IClpSimplex. It has no data of its own and
is never created - only cast from a IClpSimplex object at algorithm time.
*/
class IClpSimplexPrimal : public IClpSimplex{
public:
/**@name Description of algorithm */
//@{
/** Primal algorithm
Method
It tries to be a single phase approach with a weight of 1.0 being
given to getting optimal and a weight of infeasibilityCost_ being
given to getting primal feasible. In this version I have tried to
be clever in a stupid way. The idea of fake bounds in dual
seems to work so the primal analogue would be that of getting
bounds on reduced costs (by a presolve approach) and using
these for being above or below feasible region. I decided to waste
memory and keep these explicitly. This allows for non-linear
costs! I have not tested non-linear costs but will be glad
to do something if a reasonable example is provided.
The code is designed to take advantage of sparsity so arrays are
seldom zeroed out from scratch or gone over in their entirety.
The only exception is a full scan to find incoming variable for
Dantzig row choice. For steepest edge we keep an updated list
of dual infeasibilities (actually squares).
On easy problems we don't need full scan - just
pick first reasonable. This method has not been coded.
One problem is how to tackle degeneracy and accuracy. At present
I am using the modification of costs which I put in OSL and which was
extended by Gill et al. I am still not sure whether we will also
need explicit perturbation.
The flow of primal is three while loops as follows:
while (not finished) {
while (not clean solution) {
Factorize and/or clean up solution by changing bounds so
primal feasible. If looks finished check fake primal bounds.
Repeat until status is iterating (-1) or finished (0,1,2)
}
while (status==-1) {
Iterate until no pivot in or out or time to re-factorize.
Flow is:
choose pivot column (incoming variable). if none then
we are primal feasible so looks as if done but we need to
break and check bounds etc.
Get pivot column in tableau
Choose outgoing row. If we don't find one then we look
primal unbounded so break and check bounds etc. (Also the
pivot tolerance is larger after any iterations so that may be
reason)
If we do find outgoing row, we may have to adjust costs to
keep going forwards (anti-degeneracy). Check pivot will be stable
and if unstable throw away iteration and break to re-factorize.
If minor error re-factorize after iteration.
Update everything (this may involve changing bounds on
variables to stay primal feasible.
}
}
TODO's (or maybe not)
At present we never check we are going forwards. I overdid that in
OSL so will try and make a last resort.
Needs partial scan pivot in option.
May need other anti-degeneracy measures, especially if we try and use
loose tolerances as a way to solve in fewer iterations.
I like idea of dynamic scaling. This gives opportunity to decouple
different implications of scaling for accuracy, iteration count and
feasibility tolerance.
for use of exotic parameter startFinishoptions see IClpSimplex.hpp
*/
int primal(int ifValuesPass=0, int startFinishOptions=0);
//@}
/**@name For advanced users */
//@{
/// Do not change infeasibility cost and always say optimal
void alwaysOptimal(bool onOff);
bool alwaysOptimal() const;
/** Normally outgoing variables can go out to slightly negative
values (but within tolerance) - this is to help stability and
and degeneracy. This can be switched off
*/
void exactOutgoing(bool onOff);
bool exactOutgoing() const;
//@}
/**@name Functions used in primal */
//@{
/** This has the flow between re-factorizations
Returns a code to say where decision to exit was made
Problem status set to:
-2 re-factorize
-4 Looks optimal/infeasible
-5 Looks unbounded
+3 max iterations
valuesOption has original value of valuesPass
*/
int whileIterating(int valuesOption);
/** Do last half of an iteration. This is split out so people can
force incoming variable. If solveType_ is 2 then this may
re-factorize while normally it would exit to re-factorize.
Return codes
Reasons to come out (normal mode/user mode):
-1 normal
-2 factorize now - good iteration/ NA
-3 slight inaccuracy - refactorize - iteration done/ same but factor done
-4 inaccuracy - refactorize - no iteration/ NA
-5 something flagged - go round again/ pivot not possible
+2 looks unbounded
+3 max iterations (iteration done)
With solveType_ ==2 this should
Pivot in a variable and choose an outgoing one. Assumes primal
feasible - will not go through a bound. Returns step length in theta
Returns ray in ray_
*/
int pivotResult(int ifValuesPass=0);
/** The primals are updated by the given array.
Returns number of infeasibilities.
After rowArray will have cost changes for use next iteration
*/
int updatePrimalsInPrimal(CoinIndexedVector * rowArray,
double theta,
double & objectiveChange,
int valuesPass);
/**
Row array has pivot column
This chooses pivot row.
Rhs array is used for distance to next bound (for speed)
For speed, we may need to go to a bucket approach when many
variables go through bounds
If valuesPass non-zero then compute dj for direction
*/
void primalRow(CoinIndexedVector * rowArray,
CoinIndexedVector * rhsArray,
CoinIndexedVector * spareArray,
CoinIndexedVector * spareArray2,
int valuesPass);
//@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
void DantzigPrimalRow(CoinIndexedVector * rowArray,
CoinIndexedVector * rhsArray,
CoinIndexedVector * spareArray,
CoinIndexedVector * spareArray2,
int valuesPass);
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
/**
Chooses primal pivot column
updateArray has cost updates (also use pivotRow_ from last iteration)
Would be faster with separate region to scan
and will have this (with square of infeasibility) when steepest
For easy problems we can just choose one of the first columns we look at
*/
void primalColumn(CoinIndexedVector * updateArray,
CoinIndexedVector * spareRow1,
CoinIndexedVector * spareRow2,
CoinIndexedVector * spareColumn1,
CoinIndexedVector * spareColumn2);
/** Checks if tentative optimal actually means unbounded in primal
Returns -3 if not, 2 if is unbounded */
int checkUnbounded(CoinIndexedVector * ray,CoinIndexedVector * spare,
double changeCost);
/** Refactorizes if necessary
Checks if finished. Updates status.
lastCleaned refers to iteration at which some objective/feasibility
cleaning too place.
type - 0 initial so set up save arrays etc
- 1 normal -if good update save
- 2 restoring from saved
saveModel is normally NULL but may not be if doing Sprint
*/
void statusOfProblemInPrimal(int & lastCleaned, int type,
ClpSimplexProgress * progress,
bool doFactorization,
int ifValuesPass,
IClpSimplex * saveModel=NULL);
/// Perturbs problem (method depends on perturbation())
void perturb(int type);
/// Take off effect of perturbation and say whether to try dual
bool unPerturb();
/// Unflag all variables and return number unflagged
int unflag();
/** Get next superbasic -1 if none,
Normal type is 1
If type is 3 then initializes sorted list
if 2 uses list.
*/
int nextSuperBasic(int superBasicType,CoinIndexedVector * columnArray);
/// Create primal ray
void primalRay(CoinIndexedVector * rowArray);
/// Clears all bits and clears rowArray[1] etc
void clearAll();
/// Sort of lexicographic resolve
int lexSolve();
//@}
};
#endif
================================================
FILE: cylp/cpp/IClpSimplexPrimal_Wolfe.cpp
================================================
// Copyright (C) 2002, International Business Machines
// Corporation and others. All Rights Reserved.
/* Notes on implementation of primal simplex algorithm.
When primal feasible(A):
If dual feasible, we are optimal. Otherwise choose an infeasible
basic variable to enter basis from a bound (B). We now need to find an
outgoing variable which will leave problem primal feasible so we get
the column of the tableau corresponding to the incoming variable
(with the correct sign depending if variable will go up or down).
We now perform a ratio test to determine which outgoing variable will
preserve primal feasibility (C). If no variable found then problem
is unbounded (in primal sense). If there is a variable, we then
perform pivot and repeat. Trivial?
-------------------------------------------
A) How do we get primal feasible? All variables have fake costs
outside their feasible region so it is trivial to declare problem
feasible. OSL did not have a phase 1/phase 2 approach but
instead effectively put an extra cost on infeasible basic variables
I am taking the same approach here, although it is generalized
to allow for non-linear costs and dual information.
In OSL, this weight was changed heuristically, here at present
it is only increased if problem looks finished. If problem is
feasible I check for unboundedness. If not unbounded we
could play with going into dual. As long as weights increase
any algorithm would be finite.
B) Which incoming variable to choose is a virtual base class.
For difficult problems steepest edge is preferred while for
very easy (large) problems we will need partial scan.
C) Sounds easy, but this is hardest part of algorithm.
1) Instead of stopping at first choice, we may be able
to allow that variable to go through bound and if objective
still improving choose again. These mini iterations can
increase speed by orders of magnitude but we may need to
go to more of a bucket choice of variable rather than looking
at them one by one (for speed).
2) Accuracy. Basic infeasibilities may be less than
tolerance. Pivoting on these makes objective go backwards.
OSL modified cost so a zero move was made, Gill et al
modified so a strictly positive move was made.
The two problems are that re-factorizations can
change rinfeasibilities above and below tolerances and that when
finished we need to reset costs and try again.
3) Degeneracy. Gill et al helps but may not be enough. We
may need more. Also it can improve speed a lot if we perturb
the rhs and bounds significantly.
References:
Forrest and Goldfarb, Steepest-edge simplex algorithms for
linear programming - Mathematical Programming 1992
Forrest and Tomlin, Implementing the simplex method for
the Optimization Subroutine Library - IBM Systems Journal 1992
Gill, Murray, Saunders, Wright A Practical Anti-Cycling
Procedure for Linear and Nonlinear Programming SOL report 1988
TODO:
a) Better recovery procedures. At present I never check on forward
progress. There is checkpoint/restart with reducing
re-factorization frequency, but this is only on singular
factorizations.
b) Fast methods for large easy problems (and also the option for
the code to automatically choose which method).
c) We need to be able to stop in various ways for OSI - this
is fairly easy.
*/
#include "CoinPragma.hpp"
#include
#include "CoinHelperFunctions.hpp"
#include "IClpSimplexPrimal_Wolfe.hpp"
#include "ClpFactorization.hpp"
#include "ClpNonLinearCost.hpp"
#include "CoinPackedMatrix.hpp"
#include "CoinIndexedVector.hpp"
#include "ClpPrimalColumnPivot.hpp"
#include "ClpMessage.hpp"
#include "ClpEventHandler.hpp"
#include
#include
#include
#include
#include
// primal
int IClpSimplexPrimal_Wolfe::primal (int ifValuesPass , int startFinishOptions)
{
/*
Method
It tries to be a single phase approach with a weight of 1.0 being
given to getting optimal and a weight of infeasibilityCost_ being
given to getting primal feasible. In this version I have tried to
be clever in a stupid way. The idea of fake bounds in dual
seems to work so the primal analogue would be that of getting
bounds on reduced costs (by a presolve approach) and using
these for being above or below feasible region. I decided to waste
memory and keep these explicitly. This allows for non-linear
costs!
The code is designed to take advantage of sparsity so arrays are
seldom zeroed out from scratch or gone over in their entirety.
The only exception is a full scan to find incoming variable for
Dantzig row choice. For steepest edge we keep an updated list
of dual infeasibilities (actually squares).
On easy problems we don't need full scan - just
pick first reasonable.
One problem is how to tackle degeneracy and accuracy. At present
I am using the modification of costs which I put in OSL and which was
extended by Gill et al. I am still not sure of the exact details.
The flow of primal is three while loops as follows:
while (not finished) {
while (not clean solution) {
Factorize and/or clean up solution by changing bounds so
primal feasible. If looks finished check fake primal bounds.
Repeat until status is iterating (-1) or finished (0,1,2)
}
while (status==-1) {
Iterate until no pivot in or out or time to re-factorize.
Flow is:
choose pivot column (incoming variable). if none then
we are primal feasible so looks as if done but we need to
break and check bounds etc.
Get pivot column in tableau
Choose outgoing row. If we don't find one then we look
primal unbounded so break and check bounds etc. (Also the
pivot tolerance is larger after any iterations so that may be
reason)
If we do find outgoing row, we may have to adjust costs to
keep going forwards (anti-degeneracy). Check pivot will be stable
and if unstable throw away iteration and break to re-factorize.
If minor error re-factorize after iteration.
Update everything (this may involve changing bounds on
variables to stay primal feasible.
}
}
At present we never check we are going forwards. I overdid that in
OSL so will try and make a last resort.
Needs partial scan pivot in option.
May need other anti-degeneracy measures, especially if we try and use
loose tolerances as a way to solve in fewer iterations.
I like idea of dynamic scaling. This gives opportunity to decouple
different implications of scaling for accuracy, iteration count and
feasibility tolerance.
*/
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
// int listSize = numberRows() + numberColumns();
//
//
// QP_BanList = new int[listSize];
// for (int i = 0; i < listSize ; i++)
// QP_BanList[i] = 0;
//
// QP_ExistsBannedVariable = 0;
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
algorithm_ = +1;
moreSpecialOptions_ &= ~16; // clear check replaceColumn accuracy
// save data
ClpDataSave data = saveData();
matrix_->refresh(this); // make sure matrix okay
// Save so can see if doing after dual
int initialStatus=problemStatus_;
int initialIterations = numberIterations_;
int initialNegDjs=-1;
// initialize - maybe values pass and algorithm_ is +1
#if 0
// if so - put in any superbasic costed slacks
if (ifValuesPass&&specialOptions_<0x01000000) {
// Get column copy
const CoinPackedMatrix * columnCopy = matrix();
const int * row = columnCopy->getIndices();
const CoinBigIndex * columnStart = columnCopy->getVectorStarts();
const int * columnLength = columnCopy->getVectorLengths();
//const double * element = columnCopy->getElements();
int n=0;
for (int iColumn = 0;iColumnprimalTolerance_&&
fabs(value-columnUpper_[iColumn])>primalTolerance_) {
int iRow = row[columnStart[iColumn]];
if (getRowStatus(iRow)==basic) {
setRowStatus(iRow,superBasic);
setColumnStatus(iColumn,basic);
n++;
}
}
}
}
}
printf("%d costed slacks put in basis\n",n);
}
#endif
if (!startup(ifValuesPass,startFinishOptions)) {
// Set average theta
nonLinearCost_->setAverageTheta(1.0e3);
int lastCleaned=0; // last time objective or bounds cleaned up
// Say no pivot has occurred (for steepest edge and updates)
pivotRow_=-2;
// This says whether to restore things etc
int factorType=0;
if (problemStatus_<0&&perturbation_<100&&!ifValuesPass) {
perturb(0);
// Can't get here if values pass
assert (!ifValuesPass);
gutsOfSolution(NULL,NULL);
if (handler_->logLevel()>2) {
handler_->message(CLP_SIMPLEX_STATUS,messages_)
<printing(sumPrimalInfeasibilities_>0.0)
<printing(sumDualInfeasibilities_>0.0)
<printing(numberDualInfeasibilitiesWithoutFree_
message()<clear();
}
for (iColumn=0;iColumn<2;iColumn++) {
columnArray_[iColumn]->clear();
}
// give matrix (and model costs and bounds a chance to be
// refreshed (normally null)
matrix_->refresh(this);
// If getting nowhere - why not give it a kick
#if 1
if (perturbation_<101&&numberIterations_>2*(numberRows_+numberColumns_)&&(specialOptions_&4)==0
&&initialStatus!=10) {
perturb(1);
matrix_->rhsOffset(this,true,false);
}
#endif
// If we have done no iterations - special
if (lastGoodIteration_==numberIterations_&&factorType)
factorType=3;
if (saveModel) {
// Doing sprint
if (sequenceIn_<0||numberIterations_>=stopSprint) {
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
if (sequenceIn_<0&&numberIterations_100)
primalColumnPivot_->switchOffSprint();
//lastSprintIteration=numberIterations_;
printf("End small model\n");
}
}
// may factorize, checks if problem finished
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
if (initialStatus==10) {
// cleanup phase
if(initialIterations != numberIterations_) {
if (numberDualInfeasibilities_>10000&&numberDualInfeasibilities_>10*initialNegDjs) {
// getting worse - try perturbing
if (perturbation_<101&&(specialOptions_&4)==0) {
perturb(1);
matrix_->rhsOffset(this,true,false);
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
}
}
} else {
// save number of negative djs
if (!numberPrimalInfeasibilities_)
initialNegDjs=numberDualInfeasibilities_;
// make sure weight won't be changed
if (infeasibilityCost_==1.0e10)
infeasibilityCost_=1.000001e10;
}
}
// See if sprint says redo because of problems
if (numberDualInfeasibilities_==-776) {
// Need new set of variables
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
//lastSprintIteration=numberIterations_;
printf("End small model after\n");
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
}
int numberSprintIterations=0;
int numberSprintColumns = primalColumnPivot_->numberSprintColumns(numberSprintIterations);
if (problemStatus_==777) {
// problems so do one pass with normal
problemStatus_=-1;
originalModel(saveModel);
saveModel=NULL;
// Skip factorization
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,false,saveModel);
statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,ifValuesPass,saveModel);
} else if (problemStatus_<0&&!saveModel&&numberSprintColumns&&firstFree_<0) {
int numberSort=0;
int numberFixed=0;
int numberBasic=0;
reasonableSprintIteration = numberIterations_ + 100;
int * whichColumns = new int[numberColumns_];
double * weight = new double[numberColumns_];
int numberNegative=0;
double sumNegative = 0.0;
// now massage weight so all basic in plus good djs
for (iColumn=0;iColumn-1.0e50) {
numberNegative++;
sumNegative -= dj;
}
weight[iColumn]=dj;
whichColumns[iColumn] = iColumn;
}
handler_->message(CLP_SPRINT,messages_)
<lastObjectiveValue-1.0e-7&&sprintPass>5) {
// switch off
printf("Switching off sprint\n");
primalColumnPivot_->switchOffSprint();
} else {
lastObjectiveValue = objectiveValue()*optimizationDirection_;
// sort
CoinSort_2(weight,weight+numberColumns_,whichColumns);
numberSort = std::min(numberColumns_-numberFixed,numberBasic+numberSprintColumns);
// Sort to make consistent ?
std::sort(whichColumns,whichColumns+numberSort);
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
saveModel = new IClpSimplex(this,numberSort,whichColumns);
// @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
delete [] whichColumns;
delete [] weight;
// Skip factorization
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,false,saveModel);
//statusOfProblemInPrimal(lastCleaned,factorType,&progress_,true,saveModel);
stopSprint = numberIterations_+numberSprintIterations;
printf("Sprint with %d columns for %d iterations\n",
numberSprintColumns,numberSprintIterations);
}
}
// Say good factorization
factorType=1;
// Say no pivot has occurred (for steepest edge and updates)
pivotRow_=-2;
// exit if victory declared
if (problemStatus_>=0)
break;
// test for maximum iterations
if (hitMaximumIterations()||(ifValuesPass==2&&firstFree_<0)) {
problemStatus_=3;
break;
}
if (firstFree_<0) {
if (ifValuesPass) {
// end of values pass
ifValuesPass=0;
int status = eventHandler_->event(ClpEventHandler::endOfValuesPass);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfValuesPass;
break;
}
//#define FEB_TRY
#ifdef FEB_TRY
if (perturbation_<100)
perturb(0);
#endif
}
}
// Check event
{
int status = eventHandler_->event(ClpEventHandler::endOfFactorization);
if (status>=0) {
problemStatus_=5;
secondaryStatus_=ClpEventHandler::endOfFactorization;
break;
}
}
// Iterate
whileIterating(ifValuesPass ? 1 : 0);
}
}
// if infeasible get real values
//printf("XXXXY final cost %g\n",infeasibilityCost_);
progress_.initialWeight_=0.0;
if (problemStatus_==1&&secondaryStatus_!=6) {
infeasibilityCost_=0.0;
createRim(1+4);
delete nonLinearCost_;
nonLinearCost_ = new ClpNonLinearCost(this);
nonLinearCost_->checkInfeasibilities(0.0);
sumPrimalInfeasibilities_=nonLinearCost_->sumInfeasibilities();
numberPrimalInfeasibilities_= nonLinearCost_->numberInfeasibilities();
// and get good feasible duals
computeDuals(NULL);
}
// clean up
unflag();
finish(startFinishOptions);
restoreData(data);
return problemStatus_;
}
/*
Reasons to come out:
-1 iterations etc
-2 inaccuracy
-3 slight inaccuracy (and done iterations)
-4 end of values pass and done iterations
+0 looks optimal (might be infeasible - but we will investigate)
+2 looks unbounded
+3 max iterations
*/
int
IClpSimplexPrimal_Wolfe::whileIterating(int valuesOption)
{
// Say if values pass
int ifValuesPass=(firstFree_>=0) ? 1 : 0;
int returnCode=-1;
int superBasicType=1;
if (valuesOption>1)
superBasicType=3;
// status stays at -1 while iterating, >=0 finished, -2 to invert
// status -3 to go to top without an invert
while (problemStatus_==-1) {
//#define CLP_DEBUG 1
#ifdef CLP_DEBUG
{
int i;
// not [1] as has information
for (i=0;i<4;i++) {
if (i!=1)
rowArray_[i]->checkClear();
}
for (i=0;i<2;i++) {
columnArray_[i]->checkClear();
}
}
#endif
#if 0
{
int iPivot;
double * array = rowArray_[3]->denseVector();
int * index = rowArray_[3]->getIndices();
int i;
for (iPivot=0;iPivotupdateColumn(rowArray_[2],rowArray_[3]);
int number = rowArray_[3]->getNumElements();
for (i=0;iclear();
}
}
#endif
#if 0
nonLinearCost_->checkInfeasibilities(primalTolerance_);
printf("suminf %g number %d\n",nonLinearCost_->sumInfeasibilities(),
nonLinearCost_->numberInfeasibilities());
#endif
#if CLP_DEBUG>2
// very expensive
if (numberIterations_>0&&numberIterations_<100&&!ifValuesPass) {
handler_->setLogLevel(63);
double saveValue = objectiveValue_;
double * saveRow1 = new double[numberRows_];
double * saveRow2 = new double[numberRows_];
CoinMemcpyN(rowReducedCost_,numberRows_,saveRow1);
CoinMemcpyN(rowActivityWork_,numberRows_,saveRow2);
double * saveColumn1 = new double[numberColumns_];
double * saveColumn2 = new double[numberColumns_];
CoinMemcpyN(reducedCostWork_,numberColumns_,saveColumn1);
CoinMemcpyN(columnActivityWork_,numberColumns_,saveColumn2);
gutsOfSolution(NULL,NULL,false);
printf("xxx %d old obj %g, recomputed %g, sum primal inf %g\n",
numberIterations_,
saveValue,objectiveValue_,sumPrimalInfeasibilities_);
CoinMemcpyN(saveRow1,numberRows_,rowReducedCost_);
CoinMemcpyN(saveRow2,numberRows_,rowActivityWork_);
CoinMemcpyN(saveColumn1,numberColumns_,reducedCostWork_);
CoinMemcpyN(saveColumn2,numberColumns_,columnActivityWork_);
delete [] saveRow1;
delete [] saveRow2;
delete [] saveColumn1;
delete [] saveColumn2;
objectiveValue_=saveValue;
}
#endif
if (!ifValuesPass) {
// choose column to come in
// can use pivotRow_ to update weights
// pass in list of cost changes so can do row updates (rowArray_[1])
// NOTE rowArray_[0] is used by computeDuals which is a
// slow way of getting duals but might be used
primalColumn(rowArray_[1],rowArray_[2],rowArray_[3],
columnArray_[0],columnArray_[1]);
} else {
// in values pass
int sequenceIn=nextSuperBasic(superBasicType,columnArray_[0]);
if (valuesOption>1)
superBasicType=2;
if (sequenceIn<0) {
// end of values pass - initialize weights etc
handler_->message(CLP_END_VALUES_PASS,messages_)
<saveWeights(this,5);
problemStatus_=-2; // factorize now
pivotRow_=-1; // say no weights update
returnCode=-4;
// Clean up
int i;
for (i=0;iclear();
if (sequenceIn_>=0) {
// we found a pivot column
assert (!flagged(sequenceIn_));
#ifdef CLP_DEBUG
if ((handler_->logLevel()&32)) {
char x = isColumn(sequenceIn_) ? 'C' :'R';
std::cout<<"pivot column "<<
x<=0&&checkSequencecheckClear();
rowArray_[3]->checkClear();
double * array = rowArray_[3]->denseVector();
int * index = rowArray_[3]->getIndices();
unpackPacked(rowArray_[3],checkSequence);
factorization_->updateColumnForDebug(rowArray_[2],rowArray_[3]);
int number = rowArray_[3]->getNumElements();
double dualIn = cost_[checkSequence];
int i;
for (i=0;iclear();
if (numberIterations_>2000)
exit(1);
}
}
#endif
// do second half of iteration
returnCode = pivotResult(ifValuesPass);
if (returnCode<-1&&returnCode>-5) {
problemStatus_=-2; //
} else if (returnCode==-5) {
if ((moreSpecialOptions_&16)==0&&factorization_->pivots()) {
moreSpecialOptions_ |= 16;
problemStatus_=-2;
}
// otherwise something flagged - continue;
} else if (returnCode==2) {
problemStatus_=-5; // looks unbounded
} else if (returnCode==4) {
problemStatus_=-2; // looks unbounded but has iterated
} else if (returnCode!=-1) {
assert(returnCode==3);
if (problemStatus_!=5)
problemStatus_=3;
}
} else {
// no pivot column
#ifdef CLP_DEBUG
if (handler_->logLevel()&32)
printf("** no column pivot\n");
#endif
if (nonLinearCost_->numberInfeasibilities())
problemStatus_=-4; // might be infeasible
// Force to re-factorize early next time
int numberPivots = factorization_->pivots();
forceFactorization_=std::min(forceFactorization_,(numberPivots+1)>>1);
returnCode=0;
break;
}
}
if (valuesOption>1)
columnArray_[0]->setNumElements(0);
return returnCode;
}
/* Checks if finished. Updates status */
void
IClpSimplexPrimal_Wolfe::statusOfProblemInPrimal(int & lastCleaned,int type,
ClpSimplexProgress * progress,
bool doFactorization,
int ifValuesPass,
IClpSimplex * originalModel)
{
int dummy; // for use in generalExpanded
int saveFirstFree=firstFree_;
// number of pivots done
int numberPivots = factorization_->pivots();
if (type==2) {
// trouble - restore solution
CoinMemcpyN(saveStatus_,numberColumns_+numberRows_,status_);
CoinMemcpyN(savedSolution_+numberColumns_ ,
numberRows_,rowActivityWork_);
CoinMemcpyN(savedSolution_ ,
numberColumns_,columnActivityWork_);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
forceFactorization_=1; // a bit drastic but ..
pivotRow_=-1; // say no weights update
changeMade_++; // say change made
}
int saveThreshold = factorization_->sparseThreshold();
int tentativeStatus = problemStatus_;
int numberThrownOut=1; // to loop round on bad factorization in values pass
double lastSumInfeasibility=COIN_DBL_MAX;
if (numberIterations_)
lastSumInfeasibility=nonLinearCost_->sumInfeasibilities();
int nPass=0;
while (numberThrownOut) {
int nSlackBasic=0;
if (nPass) {
for (int i=0;i-3||problemStatus_==-4) {
// factorize
// later on we will need to recover from singularities
// also we could skip if first time
// do weights
// This may save pivotRow_ for use
if (doFactorization)
primalColumnPivot_->saveWeights(this,1);
if ((type&&doFactorization)||nSlackBasic==numberRows_) {
// is factorization okay?
int factorStatus = internalFactorize(1);
if (factorStatus) {
if (solveType_==2+8) {
// say odd
problemStatus_=5;
return;
}
if (type!=1||largestPrimalError_>1.0e3
||largestDualError_>1.0e3) {
// switch off dense
int saveDense = factorization_->denseThreshold();
factorization_->setDenseThreshold(0);
// Go to safe
factorization_->pivotTolerance(0.99);
// make sure will do safe factorization
pivotVariable_[0]=-1;
internalFactorize(2);
factorization_->setDenseThreshold(saveDense);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
} else {
// no - restore previous basis
// Keep any flagged variables
int i;
for (i=0;i=0&&getStatus(sequenceIn_)!=basic) {
setFlagged(sequenceIn_);
} else if (sequenceOut_>=0&&getStatus(sequenceOut_)!=basic) {
setFlagged(sequenceOut_);
}
double newTolerance = std::max(0.5 + 0.499*randomNumberGenerator_.randomDouble(),factorization_->pivotTolerance());
factorization_->pivotTolerance(newTolerance);
} else {
// Go to safe
factorization_->pivotTolerance(0.99);
}
CoinMemcpyN(savedSolution_+numberColumns_ ,
numberRows_,rowActivityWork_);
CoinMemcpyN(savedSolution_ ,
numberColumns_,columnActivityWork_);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
matrix_->generalExpanded(this,5,dummy);
forceFactorization_=1; // a bit drastic but ..
type = 2;
if (internalFactorize(2)!=0) {
largestPrimalError_=1.0e4; // force other type
}
}
changeMade_++; // say change made
}
}
if (problemStatus_!=-4)
problemStatus_=-3;
}
// at this stage status is -3 or -5 if looks unbounded
// get primal and dual solutions
// put back original costs and then check
// createRim(4); // costs do not change
// May need to do more if column generation
dummy=4;
matrix_->generalExpanded(this,9,dummy);
numberThrownOut=gutsOfSolution(NULL,NULL,(firstFree_>=0));
double sumInfeasibility = nonLinearCost_->sumInfeasibilities();
if (numberThrownOut||
(sumInfeasibility>1.0e7&&sumInfeasibility>100.0*lastSumInfeasibility
&&factorization_->pivotTolerance()<0.11)||(largestPrimalError_>1.0e10&&largestDualError_>1.0e10)) {
problemStatus_=tentativeStatus;
doFactorization=true;
if (numberPivots) {
// go back
numberThrownOut=-1;
// trouble - restore solution
CoinMemcpyN(saveStatus_,numberColumns_+numberRows_,status_);
CoinMemcpyN(savedSolution_+numberColumns_ ,
numberRows_,rowActivityWork_);
CoinMemcpyN(savedSolution_ ,
numberColumns_,columnActivityWork_);
// restore extra stuff
matrix_->generalExpanded(this,6,dummy);
forceFactorization_=1; // a bit drastic but ..
// Go to safe
factorization_->pivotTolerance(0.99);
pivotRow_=-1; // say no weights update
changeMade_++; // say change made
if (numberPivots==1) {
// throw out something
if (sequenceIn_>=0&&getStatus(sequenceIn_)!=basic) {
setFlagged(sequenceIn_);
} else if (sequenceOut_>=0&&getStatus(sequenceOut_)!=basic) {
setFlagged(sequenceOut_);
}
}
numberPivots=0;
}
}
}
// Double check reduced costs if no action
if (progress->lastIterationNumber(0)==numberIterations_) {
if (primalColumnPivot_->looksOptimal()) {
numberDualInfeasibilities_ = 0;
sumDualInfeasibilities_ = 0.0;
}
}
// If in primal and small dj give up
if ((specialOptions_&1024)!=0&&!numberPrimalInfeasibilities_&&numberDualInfeasibilities_) {
double average = sumDualInfeasibilities_/(static_cast (numberDualInfeasibilities_));
if (numberIterations_>300&&average<1.0e-4) {
numberDualInfeasibilities_ = 0;
sumDualInfeasibilities_ = 0.0;
}
}
// Check if looping
int loop;
if (type!=2&&!ifValuesPass)
loop = progress->looping();
else
loop=-1;
if (loop>=0) {
if (!problemStatus_) {
// declaring victory
numberPrimalInfeasibilities_ = 0;
sumPrimalInfeasibilities_ = 0.0;
} else {
problemStatus_ = loop; //exit if in loop
problemStatus_ = 10; // instead - try other algorithm
numberPrimalInfeasibilities_ = nonLinearCost_->numberInfeasibilities();
}
problemStatus_ = 10; // instead - try other algorithm
return ;
} else if (loop<-1) {
// Is it time for drastic measures
if (nonLinearCost_->numberInfeasibilities()&&progress->badTimes()>5&&
progress->oddState()<10&&progress->oddState()>=0) {
progress->newOddState();
nonLinearCost_->zapCosts();
}
// something may have changed
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
// If progress then reset costs
if (loop==-1&&!nonLinearCost_->numberInfeasibilities()&&progress->oddState()<0) {
createRim(4,false); // costs back
delete nonLinearCost_;
nonLinearCost_ = new ClpNonLinearCost(this);
progress->endOddState();
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
// Flag to say whether to go to dual to clean up
bool goToDual=false;
// really for free variables in
//if((progressFlag_&2)!=0)
//problemStatus_=-1;;
progressFlag_ = 0; //reset progress flag
handler_->message(CLP_SIMPLEX_STATUS,messages_)
<feasibleReportCost();
handler_->printing(nonLinearCost_->numberInfeasibilities()>0)
<sumInfeasibilities()<numberInfeasibilities();
handler_->printing(sumDualInfeasibilities_>0.0)
<printing(numberDualInfeasibilitiesWithoutFree_
message()<checkInfeasibilities(primalTolerance_);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
nonLinearCost_->checkInfeasibilities(primalTolerance_);
}
if (nonLinearCost_->numberInfeasibilities()>0&&!progress->initialWeight_&&!ifValuesPass&&infeasibilityCost_==1.0e10) {
// first time infeasible - start up weight computation
double * oldDj = dj_;
double * oldCost = cost_;
int numberRows2 = numberRows_+numberExtraRows_;
int numberTotal = numberRows2+numberColumns_;
dj_ = new double[numberTotal];
cost_ = new double[numberTotal];
reducedCostWork_ = dj_;
rowReducedCost_ = dj_+numberColumns_;
objectiveWork_ = cost_;
rowObjectiveWork_ = cost_+numberColumns_;
double direction = optimizationDirection_*objectiveScale_;
const double * obj = objective();
memset(rowObjectiveWork_,0,numberRows_*sizeof(double));
int iSequence;
if (columnScale_)
for (iSequence=0;iSequenceprimalTolerance_) {
// Check if "free"
if (distanceDown>primalTolerance_) {
// free
if (value>dualTolerance_) {
numberFreeSame++;
} else if(value<-dualTolerance_) {
numberFreeDifferent++;
dj_[n++] = feasibleDj/infeasibleDj;
} else {
numberFreeZero++;
}
} else {
// should not be negative
if (value>dualTolerance_) {
numberSame++;
} else if(value<-dualTolerance_) {
numberDifferent++;
dj_[n++] = feasibleDj/infeasibleDj;
} else {
numberZero++;
}
}
} else if (distanceDown>primalTolerance_) {
// should not be positive
if (value>dualTolerance_) {
numberSame++;
} else if(value<-dualTolerance_) {
numberDifferent++;
dj_[n++] = feasibleDj/infeasibleDj;
} else {
numberZero++;
}
}
}
progress->initialWeight_=-1.0;
}
//printf("XXXX %d same, %d different, %d zero, -- free %d %d %d\n",
// numberSame,numberDifferent,numberZero,
// numberFreeSame,numberFreeDifferent,numberFreeZero);
// we want most to be same
if (n) {
double most = 0.95;
std::sort(dj_,dj_+n);
int which = static_cast ((1.0-most)*static_cast (n));
double take = -dj_[which]*infeasibilityCost_;
//printf("XXXXZ inf cost %g take %g (range %g %g)\n",infeasibilityCost_,take,-dj_[0]*infeasibilityCost_,-dj_[n-1]*infeasibilityCost_);
take = -dj_[0]*infeasibilityCost_;
infeasibilityCost_ = std::min(std::max(1000.0*take,1.0e8),1.0000001e10);;
//printf("XXXX increasing weight to %g\n",infeasibilityCost_);
}
delete [] dj_;
delete [] cost_;
dj_= oldDj;
cost_ = oldCost;
reducedCostWork_ = dj_;
rowReducedCost_ = dj_+numberColumns_;
objectiveWork_ = cost_;
rowObjectiveWork_ = cost_+numberColumns_;
if (n)
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
double trueInfeasibility =nonLinearCost_->sumInfeasibilities();
if (!nonLinearCost_->numberInfeasibilities()&&infeasibilityCost_==1.0e10&&!ifValuesPass&&true) {
// relax if default
infeasibilityCost_ = std::min(std::max(100.0*sumDualInfeasibilities_,1.0e8),1.00000001e10);
// reset looping criterion
progress->reset();
trueInfeasibility = 1.123456e10;
}
if (trueInfeasibility>1.0) {
// If infeasibility going up may change weights
double testValue = trueInfeasibility-1.0e-4*(10.0+trueInfeasibility);
double lastInf = progress->lastInfeasibility(1);
double lastInf3 = progress->lastInfeasibility(3);
double thisObj = progress->lastObjective(0);
double thisInf = progress->lastInfeasibility(0);
thisObj += infeasibilityCost_*2.0*thisInf;
double lastObj = progress->lastObjective(1);
lastObj += infeasibilityCost_*2.0*lastInf;
double lastObj3 = progress->lastObjective(3);
lastObj3 += infeasibilityCost_*2.0*lastInf3;
if (lastObjlogLevel()==63)
printf("lastobj %g this %g force %d ",lastObj,thisObj,forceFactorization_);
int maxFactor = factorization_->maximumPivots();
if (maxFactor>10) {
if (forceFactorization_<0)
forceFactorization_= maxFactor;
forceFactorization_ = std::max(1,(forceFactorization_>>2));
if (handler_->logLevel()==63)
printf("Reducing factorization frequency to %d\n",forceFactorization_);
}
} else if (lastObj3logLevel()==63)
printf("lastobj3 %g this3 %g `force %d ",lastObj3,thisObj,forceFactorization_);
int maxFactor = factorization_->maximumPivots();
if (maxFactor>10) {
if (forceFactorization_<0)
forceFactorization_= maxFactor;
forceFactorization_ = std::max(1,(forceFactorization_*2)/3);
if (handler_->logLevel()==63)
printf("Reducing factorization frequency to %d\n",forceFactorization_);
}
} else if(lastInfreset();
if (handler_->logLevel()==63)
printf("increasing weight to %g\n",infeasibilityCost_);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
}
}
}
// we may wish to say it is optimal even if infeasible
bool alwaysOptimal = (specialOptions_&1)!=0;
// give code benefit of doubt
if (sumOfRelaxedDualInfeasibilities_ == 0.0&&
sumOfRelaxedPrimalInfeasibilities_ == 0.0) {
// say optimal (with these bounds etc)
numberDualInfeasibilities_ = 0;
sumDualInfeasibilities_ = 0.0;
numberPrimalInfeasibilities_ = 0;
sumPrimalInfeasibilities_ = 0.0;
// But check if in sprint
if (originalModel) {
// Carry on and re-do
numberDualInfeasibilities_ = -776;
}
// But if real primal infeasibilities nonzero carry on
if (nonLinearCost_->numberInfeasibilities()) {
// most likely to happen if infeasible
double relaxedToleranceP=primalTolerance_;
// we can't really trust infeasibilities if there is primal error
double error = std::min(1.0e-2,largestPrimalError_);
// allow tolerance at least slightly bigger than standard
relaxedToleranceP = relaxedToleranceP + error;
int ninfeas = nonLinearCost_->numberInfeasibilities();
double sum = nonLinearCost_->sumInfeasibilities();
double average = sum/ static_cast (ninfeas);
#ifdef COIN_DEVELOP
if (handler_->logLevel()>0)
printf("nonLinearCost says infeasible %d summing to %g\n",
ninfeas,sum);
#endif
if (average>relaxedToleranceP) {
sumOfRelaxedPrimalInfeasibilities_ = sum;
numberPrimalInfeasibilities_ = ninfeas;
sumPrimalInfeasibilities_ = sum;
#ifdef COIN_DEVELOP
bool unflagged =
#endif
unflag();
#ifdef COIN_DEVELOP
if (unflagged&&handler_->logLevel()>0)
printf(" - but flagged variables\n");
#endif
}
}
}
// had ||(type==3&&problemStatus_!=-5) -- ??? why ????
if ((dualFeasible()||problemStatus_==-4)&&!ifValuesPass) {
// see if extra helps
if (nonLinearCost_->numberInfeasibilities()&&
(nonLinearCost_->sumInfeasibilities()>1.0e-3||sumOfRelaxedPrimalInfeasibilities_)
&&!alwaysOptimal) {
//may need infeasiblity cost changed
// we can see if we can construct a ray
// make up a new objective
double saveWeight = infeasibilityCost_;
// save nonlinear cost as we are going to switch off costs
ClpNonLinearCost * nonLinear = nonLinearCost_;
// do twice to make sure Primal solution has settled
// put non-basics to bounds in case tolerance moved
// put back original costs
createRim(4);
nonLinearCost_->checkInfeasibilities(0.0);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
infeasibilityCost_=1.0e100;
// put back original costs
createRim(4);
nonLinearCost_->checkInfeasibilities(primalTolerance_);
// may have fixed infeasibilities - double check
if (nonLinearCost_->numberInfeasibilities()==0) {
// carry on
problemStatus_ = -1;
infeasibilityCost_=saveWeight;
nonLinearCost_->checkInfeasibilities(primalTolerance_);
} else {
nonLinearCost_=NULL;
// scale
int i;
for (i=0;i=1.0e18||
numberDualInfeasibilities_==0)&&perturbation_==101) {
goToDual=unPerturb(); // stop any further perturbation
if (nonLinearCost_->sumInfeasibilities()>1.0e-1)
goToDual=false;
nonLinearCost_->checkInfeasibilities(primalTolerance_);
numberDualInfeasibilities_=1; // carry on
problemStatus_=-1;
} else if (numberDualInfeasibilities_==0&&largestDualError_>1.0e-2) {
goToDual=true;
factorization_->pivotTolerance(std::max(0.9,factorization_->pivotTolerance()));
}
if (!goToDual) {
if (infeasibilityCost_>=1.0e20||
numberDualInfeasibilities_==0) {
// we are infeasible - use as ray
delete [] ray_;
ray_ = new double [numberRows_];
CoinMemcpyN(dual_,numberRows_,ray_);
// and get feasible duals
infeasibilityCost_=0.0;
createRim(4);
nonLinearCost_->checkInfeasibilities(primalTolerance_);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
// so will exit
infeasibilityCost_=1.0e30;
// reset infeasibilities
sumPrimalInfeasibilities_=nonLinearCost_->sumInfeasibilities();;
numberPrimalInfeasibilities_=
nonLinearCost_->numberInfeasibilities();
}
if (infeasibilityCost_<1.0e20) {
infeasibilityCost_ *= 5.0;
// reset looping criterion
progress->reset();
changeMade_++; // say change made
handler_->message(CLP_PRIMAL_WEIGHT,messages_)
<checkInfeasibilities(0.0);
gutsOfSolution(NULL,NULL,ifValuesPass!=0);
problemStatus_=-1; //continue
goToDual=false;
} else {
// say infeasible
problemStatus_ = 1;
}
}
}
} else {
// may be optimal
if (perturbation_==101) {
goToDual=unPerturb(); // stop any further perturbation
if (numberRows_>20000&&!numberTimesOptimal_)
goToDual=0; // Better to carry on a bit longer
lastCleaned=-1; // carry on
}
bool unflagged = (unflag()!=0);
if ( lastCleaned!=numberIterations_||unflagged) {
handler_->message(CLP_PRIMAL_OPTIMAL,messages_)
<zeroTolerance(std::min(factorization_->zeroTolerance(),1.0e-15));
}
lastCleaned=numberIterations_;
if (primalTolerance_!=dblParam_[ClpPrimalTolerance])
handler_->message(CLP_PRIMAL_ORIGINAL,messages_)
<checkInfeasibilities(oldTolerance);
#if 0
int i;
for (i=0;inumberInfeasibilities()&&lastCleaned>=0)
problemStatus_=0;
else
problemStatus_ = -1;
} else {
problemStatus_=0; // optimal
if (lastCleanedmessage(CLP_SIMPLEX_GIVINGUP,messages_)
<numberInfeasibilities()) {
if (infeasibilityCost_>1.0e18&&perturbation_==101) {
// back off weight
infeasibilityCost_ = 1.0e13;
// reset looping criterion
progress->reset();
unPerturb(); // stop any further perturbation
}
//we need infeasiblity cost changed
if (infeasibilityCost_<1.0e20) {
infeasibilityCost_ *= 5.0;
// reset looping criterion
progress->reset();
changeMade_++; // say change made
handler_->message(CLP_PRIMAL_WEIGHT,messages_)
< (numberDualInfeasibilities_))<1.0e-5||
progress->lastIterationNumber(0)==numberIterations_) {
if (!numberPrimalInfeasibilities_) {
if (numberTimesOptimal_<4) {
numberTimesOptimal_++;
changeMade_++; // say change made
} else {
problemStatus_=0;
secondaryStatus_=5;
}
}
}
}
}
}
if (problemStatus_==0) {
double objVal = nonLinearCost_->feasibleCost();
double tol = 1.0e-10*std::max(fabs(objVal),fabs(objectiveValue_))+1.0e-8;
if (fabs(objVal-objectiveValue_)>tol) {
#ifdef COIN_DEVELOP
if (handler_->logLevel()>0)
printf("nonLinearCost has feasible obj of %g, objectiveValue_ is %g\n",
objVal,objectiveValue_);
#endif
objectiveValue_ = objVal;
}
}
// save extra stuff
matrix_->generalExpanded(this,5,dummy);
if (type==0||type==1) {
if (type!=1||!saveStatus_) {
// create save arrays
delete [] saveStatus_;
delete [] savedSolution_;
saveStatus_ = new unsigned char [numberRows_+numberColumns_];
savedSolution_ = new double [numberRows_+numberColumns_];
}
// save arrays
CoinMemcpyN(status_,numberColumns_+numberRows_,saveStatus_);
CoinMemcpyN(rowActivityWork_,
numberRows_,savedSolution_+numberColumns_);
CoinMemcpyN(columnActivityWork_,numberColumns_,savedSolution_);
}
// see if in Cbc etc
bool inCbcOrOther = (specialOptions_&0x03000000)!=0;
bool disaster=false;
if (disasterArea_&&inCbcOrOther&&disasterArea_->check()) {
disasterArea_->saveInfo();
disaster=true;
}
if (disaster)
problemStatus_=3;
if (problemStatus_<0&&!changeMade_) {
problemStatus_=4; // unknown
}
lastGoodIteration_ = numberIterations_;
if (numberIterations_>lastBadIteration_+100)
moreSpecialOptions_ &= ~16; // clear check accuracy flag
if (goToDual)
problemStatus_=10; // try dual
// make sure first free monotonic
if (firstFree_>=0&&saveFirstFree>=0) {
firstFree_=saveFirstFree;
nextSuperBasic(1,NULL);
}
if (doFactorization) {
// restore weights (if saved) - also recompute infeasibility list
if (tentativeStatus>-3)
primalColumnPivot_->saveWeights(this,(type <2) ? 2 : 4);
else
primalColumnPivot_->saveWeights(this,3);
if (saveThreshold) {
// use default at present
factorization_->sparseThreshold(0);
factorization_->goSparse();
}
}
// Allow matrices to be sorted etc
int fake=-999; // signal sort
matrix_->correctSequence(this,fake,fake);
}
/*
Row array has pivot column
This chooses pivot row.
For speed, we may need to go to a bucket approach when many
variables go through bounds
On exit rhsArray will have changes in costs of basic variables
*/
void
IClpSimplexPrimal_Wolfe::primalRow(CoinIndexedVector * rowArray,
CoinIndexedVector * rhsArray,
CoinIndexedVector * spareArray,
CoinIndexedVector * spareArray2,
int valuesPass)
{
double saveDj = dualIn_;
if (valuesPass&&objective_->type()<2) {
dualIn_ = cost_[sequenceIn_];
double * work=rowArray->denseVector();
int number=rowArray->getNumElements();
int * which=rowArray->getIndices();
int iIndex;
for (iIndex=0;iIndexdualTolerance_) {
directionIn_=-1;
} else {
// towards nearest bound
if (valueIn_-lowerIn_100)
acceptablePivot=acceptablePivot_;
if (factorization_->pivots()>10)
acceptablePivot=1.0e+3*acceptablePivot_; // if we have iterated be more strict
else if (factorization_->pivots()>5)
acceptablePivot=1.0e+2*acceptablePivot_; // if we have iterated be slightly more strict
else if (factorization_->pivots())
acceptablePivot=acceptablePivot_; // relax
double bestEverPivot=acceptablePivot;
int lastPivotRow = -1;
double lastPivot=0.0;
double lastTheta=1.0e50;
// use spareArrays to put ones looked at in
// First one is list of candidates
// We could compress if we really know we won't need any more
// Second array has current set of pivot candidates
// with a backup list saved in double * part of indexed vector
// pivot elements
double * spare;
// indices
int * index;
spareArray->clear();
spare = spareArray->denseVector();
index = spareArray->getIndices();
// we also need somewhere for effective rhs
double * rhs=rhsArray->denseVector();
// and we can use indices to point to alpha
// that way we can store fabs(alpha)
int * indexPoint = rhsArray->getIndices();
//int numberFlip=0; // Those which may change if flips
/*
First we get a list of possible pivots. We can also see if the
problem looks unbounded.
At first we increase theta and see what happens. We start
theta at a reasonable guess. If in right area then we do bit by bit.
We save possible pivot candidates
*/
// do first pass to get possibles
// We can also see if unbounded
double * work=rowArray->denseVector();
int number=rowArray->getNumElements();
int * which=rowArray->getIndices();
// we need to swap sign if coming in from ub
double way = directionIn_;
double maximumMovement;
if (way>0.0)
maximumMovement = std::min(1.0e30,upperIn_-valueIn_);
else
maximumMovement = std::min(1.0e30,valueIn_-lowerIn_);
double averageTheta = nonLinearCost_->averageTheta();
double tentativeTheta = std::min(10.0*averageTheta,maximumMovement);
double upperTheta = maximumMovement;
if (tentativeTheta>0.5*maximumMovement)
tentativeTheta=maximumMovement;
bool thetaAtMaximum=tentativeTheta==maximumMovement;
// In case tiny bounds increase
if (maximumMovement<1.0)
tentativeTheta *= 1.1;
double dualCheck = fabs(dualIn_);
// but make a bit more pessimistic
dualCheck=std::max(dualCheck-100.0*dualTolerance_,0.99*dualCheck);
int iIndex;
int pivotOne=-1;
//#define CLP_DEBUG
#ifdef CLP_DEBUG
if (numberIterations_==-3839||numberIterations_==-3840) {
double dj=cost_[sequenceIn_];
printf("cost in on %d is %g, dual in %g\n",sequenceIn_,dj,dualIn_);
for (iIndex=0;iIndex %g (cost %g, dj %g)\n",
iRow,iPivot,lower_[iPivot],solution_[iPivot],upper_[iPivot],
alpha, solution_[iPivot]-1.0e9*alpha,cost_[iPivot],dj);
}
}
#endif
while (true) {
pivotOne=-1;
totalThru=0.0;
// We also re-compute reduced cost
numberRemaining=0;
dualIn_ = cost_[sequenceIn_];
#ifndef NDEBUG
double tolerance = primalTolerance_*1.002;
#endif
for (iIndex=0;iIndex0.0) {
// basic variable going towards lower bound
double bound = lower_[iPivot];
// must be exactly same as when used
double change = tentativeTheta*alpha;
possible = (oldValue-change)<=bound+primalTolerance_;
oldValue -= bound;
} else {
// basic variable going towards upper bound
double bound = upper_[iPivot];
// must be exactly same as when used
double change = tentativeTheta*alpha;
possible = (oldValue-change)>=bound-primalTolerance_;
oldValue = bound-oldValue;
alpha = - alpha;
}
double value;
assert (oldValue>=-tolerance);
if (possible) {
value=oldValue-upperTheta*alpha;
if (value<-primalTolerance_&&alpha>=acceptablePivot) {
upperTheta = (oldValue+primalTolerance_)/alpha;
pivotOne=numberRemaining;
}
// add to list
spare[numberRemaining]=alpha;
rhs[numberRemaining]=oldValue;
indexPoint[numberRemaining]=iIndex;
index[numberRemaining++]=iRow;
totalThru += alpha;
setActive(iRow);
//} else if (value=1.0001*dualCheck) {
// Can pivot here
break;
} else if (!thetaAtMaximum) {
//printf("Going round with average theta of %g\n",averageTheta);
tentativeTheta=maximumMovement;
thetaAtMaximum=true; // seems to be odd compiler error
} else {
break;
}
}
totalThru=0.0;
theta_=maximumMovement;
bool goBackOne = false;
if (objective_->type()>1)
dualIn_=saveDj;
//printf("%d remain out of %d\n",numberRemaining,number);
int iTry=0;
#define MAXTRY 1000
if (numberRemaining&&upperTheta=0&&0) {
double thruCost = infeasibilityCost_*spare[pivotOne];
if (thruCost>=0.99*fabs(dualIn_))
printf("Could pivot on %d as change %g dj %g\n",
index[pivotOne],thruCost,dualIn_);
double alpha = spare[pivotOne];
double oldValue = rhs[pivotOne];
theta_ = oldValue/alpha;
pivotRow_=pivotOne;
// Stop loop
iTry=MAXTRY;
}
// first get ratio with tolerance
for ( ;iTry=acceptablePivot) {
upperTheta = (oldValue+primalTolerance_)/alpha;
iBest=iIndex; // just in case weird numbers
}
}
// now look at best in this lot
// But also see how infeasible small pivots will make
double sumInfeasibilities=0.0;
double bestPivot=acceptablePivot;
pivotRow_=-1;
for (iIndex=0;iIndexchangeInCost(pivotVariable_[iRow],trueAlpha,rhs[iIndex]);
setActive(iRow);
if (alpha>bestPivot) {
bestPivot=alpha;
theta_ = oldValue/bestPivot;
pivotRow_=iIndex;
} else if (alpha1.0e-6&& bestPivot<1.0e-3) {
// back to previous one
goBackOne = true;
break;
} else if (pivotRow_==-1&&upperTheta>largeValue_) {
if (lastPivot>acceptablePivot) {
// back to previous one
goBackOne = true;
} else {
// can only get here if all pivots so far too small
}
break;
} else if (totalThru>=dualCheck) {
if (sumInfeasibilities>primalTolerance_&&!nonLinearCost_->numberInfeasibilities()) {
// Looks a bad choice
if (lastPivot>acceptablePivot) {
goBackOne=true;
} else {
// say no good
dualIn_=0.0;
}
}
break; // no point trying another loop
} else {
lastPivotRow=pivotRow_;
lastTheta = theta_;
if (bestPivot>bestEverPivot)
bestEverPivot=bestPivot;
}
}
// can get here without pivotRow_ set but with lastPivotRow
if (goBackOne||(pivotRow_<0&&lastPivotRow>=0)) {
// back to previous one
pivotRow_=lastPivotRow;
theta_ = lastTheta;
}
} else if (pivotRow_<0&&maximumMovement>1.0e20) {
// looks unbounded
valueOut_=COIN_DBL_MAX; // say odd
if (nonLinearCost_->numberInfeasibilities()) {
// but infeasible??
// move variable but don't pivot
tentativeTheta=1.0e50;
for (iIndex=0;iIndex0.0) {
// basic variable going towards lower bound
double bound = lower_[iPivot];
oldValue -= bound;
} else {
// basic variable going towards upper bound
double bound = upper_[iPivot];
oldValue = bound-oldValue;
alpha = - alpha;
}
if (oldValue-tentativeTheta*alpha<0.0) {
tentativeTheta = oldValue/alpha;
}
}
// If free in then see if we can get to 0.0
if (lowerIn_<-1.0e20&&upperIn_>1.0e20) {
if (dualIn_*valueIn_>0.0) {
if (fabs(valueIn_)<1.0e-2&&(tentativeTheta1.0e20)) {
tentativeTheta = fabs(valueIn_);
}
}
}
if (tentativeTheta<1.0e10)
valueOut_=valueIn_+way*tentativeTheta;
}
}
//if (iTry>50)
//printf("** %d tries\n",iTry);
if (pivotRow_>=0) {
int position=pivotRow_; // position in list
pivotRow_=index[position];
alpha_=work[indexPoint[position]];
// translate to sequence
sequenceOut_ = pivotVariable_[pivotRow_];
valueOut_ = solution(sequenceOut_);
lowerOut_=lower_[sequenceOut_];
upperOut_=upper_[sequenceOut_];
#define MINIMUMTHETA 1.0e-12
// Movement should be minimum for anti-degeneracy - unless
// fixed variable out
double minimumTheta;
if (upperOut_>lowerOut_)
minimumTheta=MINIMUMTHETA;
else
minimumTheta=0.0;
// But can't go infeasible
double distance;
if (alpha_*way>0.0)
distance=valueOut_-lowerOut_;
else
distance=upperOut_-valueOut_;
if (distance-minimumTheta*fabs(alpha_)<-primalTolerance_)
minimumTheta = std::max(0.0,(distance+0.5*primalTolerance_)/fabs(alpha_));
// will we need to increase tolerance
//#define CLP_DEBUG
double largestInfeasibility = primalTolerance_;
if (theta_primalTolerance_&&(handler_->logLevel()&32)>-1)
printf("Primal tolerance increased from %g to %g\n",
primalTolerance_,largestInfeasibility);
#endif
//#undef CLP_DEBUG
primalTolerance_ = std::max(primalTolerance_,largestInfeasibility);
}
// Need to look at all in some cases
if (theta_>tentativeTheta) {
for (iIndex=0;iIndex1.0e-6||(specialOptions_&4)!=0) {
upperOut_ = nonLinearCost_->nearest(sequenceOut_,newValue);
} else {
upperOut_ = newValue;
}
} else {
directionOut_=1; // to lower bound
if (fabs(theta_)>1.0e-6||(specialOptions_&4)!=0) {
lowerOut_ = nonLinearCost_->nearest(sequenceOut_,newValue);
} else {
lowerOut_ = newValue;
}
}
dualOut_ = reducedCost(sequenceOut_);
} else if (maximumMovement<1.0e20) {
// flip
pivotRow_ = -2; // so we can tell its a flip
sequenceOut_ = sequenceIn_;
valueOut_ = valueIn_;
dualOut_ = dualIn_;
lowerOut_ = lowerIn_;
upperOut_ = upperIn_;
alpha_ = 0.0;
if (way<0.0) {
directionOut_=1; // to lower bound
theta_ = lowerOut_ - valueOut_;
} else {
directionOut_=-1; // to upper bound
theta_ = upperOut_ - valueOut_;
}
}
double theta1 = std::max(theta_,1.0e-12);
double theta2 = numberIterations_*nonLinearCost_->averageTheta();
// Set average theta
nonLinearCost_->setAverageTheta((theta1+theta2)/(static_cast (numberIterations_+1)));
//if (numberIterations_%1000==0)
//printf("average theta is %g\n",nonLinearCost_->averageTheta());
// clear arrays
CoinZeroN(spare,numberRemaining);
// put back original bounds etc
CoinMemcpyN(index,numberRemaining,rhsArray->getIndices());
rhsArray->setNumElements(numberRemaining);
rhsArray->setPacked();
nonLinearCost_->goBackAll(rhsArray);
rhsArray->clear();
}
/*
Chooses primal pivot column
updateArray has cost updates (also use pivotRow_ from last iteration)
Would be faster with separate region to scan
and will have this (with square of infeasibility) when steepest
For easy problems we can just choose one of the first columns we look at
*/
void
IClpSimplexPrimal_Wolfe::primalColumn(CoinIndexedVector * updates,
CoinIndexedVector * spareRow1,
CoinIndexedVector * spareRow2,
CoinIndexedVector * spareColumn1,
CoinIndexedVector * spareColumn2)
{
ClpMatrixBase * saveMatrix = matrix_;
double * saveRowScale = rowScale_;
if (scaledMatrix_) {
rowScale_=NULL;
matrix_ = scaledMatrix_;
}
sequenceIn_ = primalColumnPivot_->pivotColumn(updates,spareRow1,
spareRow2,spareColumn1,
spareColumn2);
if (scaledMatrix_) {
matrix_ = saveMatrix;
rowScale_ = saveRowScale;
}
if (sequenceIn_>=0) {
valueIn_=solution_[sequenceIn_];
dualIn_=dj_[sequenceIn_];
if (nonLinearCost_->lookBothWays()) {
// double check
IClpSimplex::Status status = getStatus(sequenceIn_);
switch(status) {
case IClpSimplex::atUpperBound:
if (dualIn_<0.0) {
// move to other side
printf("For %d U (%g, %g, %g) dj changed from %g",
sequenceIn_,lower_[sequenceIn_],solution_[sequenceIn_],
upper_[sequenceIn_],dualIn_);
dualIn_ -= nonLinearCost_->changeUpInCost(sequenceIn_);
printf(" to %g\n",dualIn_);
nonLinearCost_->setOne(sequenceIn_,upper_[sequenceIn_]+2.0*currentPrimalTolerance());
setStatus(sequenceIn_,IClpSimplex::atLowerBound);
}
break;
case IClpSimplex::atLowerBound:
if (dualIn_>0.0) {
// move to other side
printf("For %d L (%g, %g, %g) dj changed from %g",
sequenceIn_,lower_[sequenceIn_],solution_[sequenceIn_],
upper_[sequenceIn_],dualIn_);
dualIn_ -= nonLinearCost_->changeDownInCost(sequenceIn_);
printf(" to %g\n",dualIn_);
nonLinearCost_->setOne(sequenceIn_,lower_[sequenceIn_]-2.0*currentPrimalTolerance());
setStatus(sequenceIn_,IClpSimplex::atUpperBound);
}
break;
default:
break;
}
}
lowerIn_=lower_[sequenceIn_];
upperIn_=upper_[sequenceIn_];
if (dualIn_>0.0)
directionIn_ = -1;
else
directionIn_ = 1;
} else {
sequenceIn_ = -1;
}
}
/* The primals are updated by the given array.
Returns number of infeasibilities.
After rowArray will have list of cost changes
*/
int
IClpSimplexPrimal_Wolfe::updatePrimalsInPrimal(CoinIndexedVector * rowArray,
double theta,
double & objectiveChange,
int valuesPass)
{
// Cost on pivot row may change - may need to change dualIn
double oldCost=0.0;
if (pivotRow_>=0)
oldCost = cost_[sequenceOut_];
//rowArray->scanAndPack();
double * work=rowArray->denseVector();
int number=rowArray->getNumElements();
int * which=rowArray->getIndices();
int newNumber = 0;
int pivotPosition = -1;
nonLinearCost_->setChangeInCost(0.0);
//printf("XX 4138 sol %g lower %g upper %g cost %g status %d\n",
// solution_[4138],lower_[4138],upper_[4138],cost_[4138],status_[4138]);
// allow for case where bound+tolerance == bound
//double tolerance = 0.999999*primalTolerance_;
double relaxedTolerance = 1.001*primalTolerance_;
int iIndex;
if (!valuesPass) {
for (iIndex=0;iIndex0.0) {
// going down
if (value<=lower_[iPivot]+primalTolerance_) {
if (iPivot==sequenceOut_&&value>lower_[iPivot]-relaxedTolerance)
value=lower_[iPivot];
double difference = nonLinearCost_->setOne(iPivot,value);
assert (!difference||fabs(change)>1.0e9);
}
} else {
// going up
if (value>=upper_[iPivot]-primalTolerance_) {
if (iPivot==sequenceOut_&&valuesetOne(iPivot,value);
assert (!difference||fabs(change)>1.0e9);
}
}
}
#endif
if (active(iRow)||theta_<0.0) {
clearActive(iRow);
// But make sure one going out is feasible
if (change>0.0) {
// going down
if (value<=lower_[iPivot]+primalTolerance_) {
if (iPivot==sequenceOut_&&value>=lower_[iPivot]-relaxedTolerance)
value=lower_[iPivot];
double difference = nonLinearCost_->setOne(iPivot,value);
if (difference) {
if (iRow==pivotRow_)
pivotPosition=newNumber;
work[newNumber] = difference;
//change reduced cost on this
dj_[iPivot] = -difference;
which[newNumber++]=iRow;
}
}
} else {
// going up
if (value>=upper_[iPivot]-primalTolerance_) {
if (iPivot==sequenceOut_&&value