diff --git a/.gitignore b/.gitignore
index b49e4dd..64b07fd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -107,6 +107,3 @@ ENV/
# In-tree generated files
*/_version.py
-
-# Do not track ecole for now
-ecole/
diff --git a/ecole/AUTHORS b/ecole/AUTHORS
new file mode 100644
index 0000000..0388718
--- /dev/null
+++ b/ecole/AUTHORS
@@ -0,0 +1,4 @@
+Antoine Prouvost
+Maxime Gasse
+Didier Chételat
+Justin Dumouchelle
diff --git a/ecole/CMakeLists.txt b/ecole/CMakeLists.txt
new file mode 100644
index 0000000..8319d48
--- /dev/null
+++ b/ecole/CMakeLists.txt
@@ -0,0 +1,55 @@
+cmake_minimum_required(VERSION 3.16)
+
+# Adapt compiler flags if using Conda compiler packages. Before project so they are not modified.
+include(cmake/Conda.cmake)
+
+# Read the version from file
+include(cmake/Version.cmake)
+read_version("VERSION" Ecole_VERSION)
+
+# Set default parameters. Assumes Ecole user,
+include(cmake/DefaultSettings.cmake)
+
+project(
+ Ecole
+ VERSION "${Ecole_VERSION}"
+ LANGUAGES CXX
+ DESCRIPTION "Extensible Combinatorial Optimization Learning Environments"
+)
+
+# Add option to enable interprocedural optimization
+include(cmake/InterproceduralOptimization.cmake)
+
+# Define a target Ecole::warnings with all compiler warnings.
+include(cmake/CompilerWarnings.cmake)
+
+# Define a target Ecole::sanitizers with enabled sanitizers.
+include(cmake/Sanitizers.cmake)
+
+# Define a target Ecole::coverage with coverage options.
+include(cmake/Coverage.cmake)
+
+# Utilities to automatically download missing dependencies
+include(cmake/DependenciesResolver.cmake)
+
+# Adapt which Python is found
+include(cmake/Python.cmake)
+
+# Enable CTest for registering tests
+include(CTest)
+
+# Ecole library
+if(ECOLE_BUILD_LIB)
+ # Build the Ecole library
+ add_subdirectory(libecole)
+else()
+ # Find the Ecole library of same version already installed
+ option(ECOLE_DOWNLOAD_DEPENDENCIES "Download the static and header libraries used in Ecole public interface" ON)
+ find_package(Ecole ${Ecole_VERSION} EXACT REQUIRED)
+endif()
+
+# Ecole Python extension
+if(ECOLE_BUILD_PY_EXT)
+ add_subdirectory(python/extension-helper)
+ add_subdirectory(python/ecole)
+endif()
diff --git a/ecole/LICENSE b/ecole/LICENSE
new file mode 100644
index 0000000..23fe966
--- /dev/null
+++ b/ecole/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2019, Antoine Prouvost
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ecole/README.rst b/ecole/README.rst
new file mode 100644
index 0000000..9699bc7
--- /dev/null
+++ b/ecole/README.rst
@@ -0,0 +1,124 @@
+⚠️ **Warning** ⚠️
+
+*Ecole is looking for a new home.*
+*It is not being actively developed, only critical issues will be investigated.*
+
+
+.. image:: https://raw.githubusercontent.com/ds4dm/ecole/master/docs/_static/images/ecole-logo.svg
+ :target: https://www.ecole.ai
+ :alt: Ecole logo
+ :width: 30 %
+ :align: right
+
+Ecole
+=====
+
+.. image:: https://github.com/ds4dm/ecole/actions/workflows/continuous-testing.yml/badge.svg
+ :target: https://github.com/ds4dm/ecole/actions/workflows/continuous-testing.yml
+ :alt: Test and deploy on Github Actions
+
+Ecole (pronounced [ekɔl]) stands for *Extensible Combinatorial Optimization Learning
+Environments* and aims to expose a number of control problems arising in combinatorial
+optimization solvers as Markov
+Decision Processes (*i.e.*, Reinforcement Learning environments).
+Rather than trying to predict solutions to combinatorial optimization problems directly, the
+philosophy behind Ecole is to work
+in cooperation with a state-of-the-art Mixed Integer Linear Programming solver
+that acts as a controllable algorithm.
+
+The underlying solver used is `SCIP `_, and the user facing API is
+meant to mimic the `OpenAI Gym `_ API (as much as possible).
+
+.. code-block:: python
+
+ import ecole
+
+ env = ecole.environment.Branching(
+ reward_function=-1.5 * ecole.reward.LpIterations() ** 2,
+ observation_function=ecole.observation.NodeBipartite(),
+ )
+ instances = ecole.instance.SetCoverGenerator()
+
+ for _ in range(10):
+ obs, action_set, reward_offset, done, info = env.reset(next(instances))
+ while not done:
+ obs, action_set, reward, done, info = env.step(action_set[0])
+
+
+Documentation
+-------------
+Consult the `user Documentation `_ for tutorials, examples, and library reference.
+
+Discussions and help
+--------------------
+Head to `Github Discussions `_ for interaction with the community: give
+and recieve help, discuss intresting envirnoment, rewards function, and instances generators.
+
+Installation
+------------
+Conda
+^^^^^
+
+.. image:: https://img.shields.io/conda/vn/conda-forge/ecole?label=version&logo=conda-forge
+ :target: https://anaconda.org/conda-forge/ecole
+ :alt: Conda-Forge version
+.. image:: https://img.shields.io/conda/pn/conda-forge/ecole?logo=conda-forge
+ :target: https://anaconda.org/conda-forge/ecole
+ :alt: Conda-Forge platforms
+
+.. code-block:: bash
+
+ conda install -c conda-forge ecole
+
+All dependencies are resolved by conda, no compiler is required.
+
+Pip wheel (binary)
+^^^^^^^^^^^^^^^^^^
+Currently unavailable.
+
+Pip source
+^^^^^^^^^^^
+.. image:: https://img.shields.io/pypi/v/ecole?logo=python
+ :target: https://pypi.org/project/ecole/
+ :alt: PyPI version
+
+Building from source requires:
+ - A `C++17 compiler `_,
+ - A `SCIP `__ installation.
+
+.. code-block:: bash
+
+ pip install ecole
+
+Other Options
+^^^^^^^^^^^^^
+Checkout the `installation instructions `_ in the
+documentation for more installation options.
+
+Related Projects
+----------------
+
+* `OR-Gym `_ is a gym-like library providing gym-like environments to produce feasible solutions
+ directly, without the need for an MILP solver;
+* `MIPLearn `_ for learning to configure solvers.
+
+Use It, Cite It
+---------------
+
+.. image:: https://img.shields.io/badge/arxiv-2011.06069-red
+ :target: https://arxiv.org/abs/2011.06069
+ :alt: Ecole publication on Arxiv
+
+
+If you use Ecole in a scientific publication, please cite the Ecole publication
+
+.. code-block:: text
+
+ @inproceedings{
+ prouvost2020ecole,
+ title={Ecole: A Gym-like Library for Machine Learning in Combinatorial Optimization Solvers},
+ author={Antoine Prouvost and Justin Dumouchelle and Lara Scavuzzo and Maxime Gasse and Didier Ch{\'e}telat and Andrea Lodi},
+ booktitle={Learning Meets Combinatorial Algorithms at NeurIPS2020},
+ year={2020},
+ url={https://openreview.net/forum?id=IVc9hqgibyB}
+ }
diff --git a/ecole/VERSION b/ecole/VERSION
new file mode 100644
index 0000000..b0841b7
--- /dev/null
+++ b/ecole/VERSION
@@ -0,0 +1,10 @@
+# See PEP 440 for valid version specification
+# The following should include only numbers
+VERSION_MAJOR 0
+VERSION_MINOR 8
+VERSION_PATCH 1
+# The following should include their whole string and can be combined.
+# They must be numbered, starting from 0.
+VERSION_PRE # Pre release without leading dot, e.g. `a0` (alpha), `b0` (beta), or `rc0` (release candidate)
+VERSION_POST # Post release with leading dot, e.g. `.post0`
+VERSION_DEV # Dev release with leading dot, e.g. `.dev0`
diff --git a/ecole/cmake/CompilerWarnings.cmake b/ecole/cmake/CompilerWarnings.cmake
new file mode 100644
index 0000000..c025401
--- /dev/null
+++ b/ecole/cmake/CompilerWarnings.cmake
@@ -0,0 +1,128 @@
+# Module to set default compiler warnings.
+#
+# File adapted from Jason Turner's cpp_starter_project
+# https://github.com/lefticus/cpp_starter_project/blob/master/cmake/CompilerWarnings.cmake
+# Using INTERFACE targets is not so desirable as they need to be installed when building
+# static libraries.
+
+function(ecole_target_add_compile_warnings target)
+ option(WARNINGS_AS_ERRORS "Treat compiler warnings as errors" OFF)
+
+ set(msvc_warnings
+ # Baseline reasonable warnings
+ /W4
+ # "identfier": conversion from "type1" to "type1", possible loss of data
+ /w14242
+ # "operator": conversion from "type1:field_bits" to "type2:field_bits", possible
+ # loss of data
+ /w14254
+ # "function": member function does not override any base class virtual member
+ # function
+ /w14263
+ # "classname": class has virtual functions, but destructor is not virtual instances
+ # of this class may not be destructed correctly
+ /w14265
+ # "operator": unsigned/negative constant mismatch
+ /w14287
+ # Nonstandard extension used: "variable": loop control variable declared in the
+ # for-loop is used outside the for-loop scope
+ /we4289
+ # "operator": expression is always "boolean_value"
+ /w14296
+ # "variable": pointer truncation from "type1" to "type2"
+ /w14311
+ # Expression before comma evaluates to a function which is missing an argument list
+ /w14545
+ # Function call before comma missing argument list
+ /w14546
+ # "operator": operator before comma has no effect; expected operator with side-effect
+ /w14547
+ # "operator": operator before comma has no effect; did you intend "operator"?
+ /w14549
+ # Expression has no effect; expected expression with side- effect
+ /w14555
+ # Pragma warning: there is no warning number "number"
+ /w14619
+ # Enable warning on thread un-safe static member initialization
+ /w14640
+ # Conversion from "type1" to "type_2" is sign-extended. This may cause unexpected
+ # runtime behavior.
+ /w14826
+ # Wide string literal cast to "LPSTR"
+ /w14905
+ # String literal cast to "LPWSTR"
+ /w14906
+ # Illegal copy-initialization; more than one user-defined conversion has been
+ # implicitly applied
+ /w14928
+ )
+
+ set(clang_warnings
+ # Some default set of warnings
+ -Wall
+ # Reasonable and standard
+ -Wextra
+ # Warn the user if a variable declaration shadows one from a parent context
+ -Wshadow
+ # Warn the user if a class with virtual functions has a non-virtual destructor.
+ # This helps catch hard to track down memory errors
+ -Wnon-virtual-dtor
+ # Warn for c-style casts
+ -Wold-style-cast
+ # Warn for potential performance problem casts
+ -Wcast-align
+ # Warn on anything being unused
+ -Wunused
+ # Warn if you overload (not override) a virtual function
+ -Woverloaded-virtual
+ # Warn if non-standard C++ is used
+ -Wpedantic
+ # Warn on type conversions that may lose data
+ -Wconversion
+ # Warn on sign conversions
+ -Wsign-conversion
+ # Warn if a null dereference is detected
+ -Wnull-dereference
+ # Warn if float is implicit promoted to double
+ -Wdouble-promotion
+ # Warn on security issues around functions that format output (ie printf)
+ -Wformat=2
+ # Warn on code that cannot be executed
+ -Wunreachable-code
+ # Warn if a variable is used before being initialized
+ -Wuninitialized
+ )
+
+ if (WARNINGS_AS_ERRORS)
+ set(clang_warnings ${clang_warnings} -Werror)
+ set(msvc_warnings ${msvc_warnings} /WX)
+ endif()
+
+ set(gcc_warnings
+ ${clang_warnings}
+ # FIXME currently not adding more warning for GCC because they fail on clang-tidy
+ # warn if identation implies blocks where blocks do not exist
+ # -Wmisleading-indentation
+ # warn if if / else chain has duplicated conditions
+ # -Wduplicated-cond
+ # warn if if / else branches have duplicated code
+ # -Wduplicated-branches
+ # warn about logical operations being used where bitwise were probably wanted
+ # -Wlogical-op
+ # warn if you perform a cast to the same type
+ # -Wuseless-cast
+ )
+
+ if(MSVC)
+ set(warnings ${msvc_warnings})
+ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
+ set(warnings ${clang_warnings})
+ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ set(warnings ${clang_warnings})
+ else()
+ set(warnings ${gcc_warnings})
+ endif()
+
+ target_compile_options("${target}" PRIVATE ${warnings})
+
+endfunction()
diff --git a/ecole/cmake/Conda.cmake b/ecole/cmake/Conda.cmake
new file mode 100644
index 0000000..aba98f7
--- /dev/null
+++ b/ecole/cmake/Conda.cmake
@@ -0,0 +1,45 @@
+# This file processes the flags set by conda in the compiler packages.
+#
+# Conda has a separate set of debug flags defined, which are not picked up by CMake.
+# Similarily the regular flags set by Conda are not adapted to debug builds.
+#
+# This file adds to build type:
+# - CondaDebug for the the DEBUG_XXXFLAGS set by Conda,
+# - CondaRelease for the XXXFLAGS set by Conda.
+#
+# Note: the `LDFLAGS` environment variable is not processed into CMAKE_EXE_LINKER_FLAGS_,
+# CMAKE_SHARED_LINKER_FLAGS_ and CMAKE_MODULE_LINKER_FLAGS_ as they contain
+# information to find the library dependencies.
+
+# If we are building a recipe or not using the compiler packages then do nothing.
+if(DEFINED ENV{CONDA_BUILD} OR NOT DEFINED ENV{CONDA_BUILD_SYSROOT})
+ return()
+endif()
+
+# Utility to set the language and linker flags.
+function(set_flags LANG FLAGS)
+set(BUILD "${ARGV2}") # Optional build type
+ set(
+ CMAKE_${LANG}_FLAGS_${BUILD} "${FLAGS}"
+ CACHE STRING "Flags used by ${LANG} during ${BUILD} builds."
+ )
+ MARK_AS_ADVANCED(CMAKE_${LANG}_FLAGS_${BUILD})
+endfunction()
+
+# Define the CondaDebug build type
+set_flags(Fortran "$ENV{DEBUG_FFLAGS}" CONDADEBUG)
+set_flags(C "$ENV{DEBUG_CFLAGS} $ENV{DEBUG_CPPFLAGS}" CONDADEBUG)
+set_flags(CXX "$ENV{DEBUG_CXXFLAGS} $ENV{DEBUG_CPPFLAGS}" CONDADEBUG)
+# Unset the environment flags in order to prevent CMake from reading them
+set(ENV{DEBUG_FFLAGS} "")
+set(ENV{DEBUG_CFLAGS} "")
+set(ENV{DEBUG_CXXFLAGS} "")
+
+# Define the CondaRelease build type
+set_flags(Fortran "$ENV{FFLAGS}" CONDARELEASE)
+set_flags(C "$ENV{CFLAGS} $ENV{CPPFLAGS}" CONDARELEASE)
+set_flags(CXX "$ENV{CXXFLAGS} $ENV{CPPFLAGS}" CONDARELEASE)
+# Unset the environment flags in order to prevent CMake from reading them
+set(ENV{FFLAGS} "")
+set(ENV{CFLAGS} "")
+set(ENV{CXXFLAGS} "")
diff --git a/ecole/cmake/Coverage.cmake b/ecole/cmake/Coverage.cmake
new file mode 100644
index 0000000..786cac3
--- /dev/null
+++ b/ecole/cmake/Coverage.cmake
@@ -0,0 +1,14 @@
+# Module to enable code coverage
+
+function(ecole_target_add_coverage target)
+
+ set(supported_compilers "GNU" "Clang" "AppleClang")
+ if(CMAKE_CXX_COMPILER_ID IN_LIST supported_compilers)
+ option(COVERAGE "Enable coverage reporting for gcc/clang" FALSE)
+ if(COVERAGE)
+ target_compile_options("${target}" PRIVATE --coverage -O0 -g)
+ target_link_libraries("${target}" PRIVATE --coverage)
+ endif()
+ endif()
+
+endfunction()
diff --git a/ecole/cmake/CreateVersionFile.cmake b/ecole/cmake/CreateVersionFile.cmake
new file mode 100644
index 0000000..fffaf38
--- /dev/null
+++ b/ecole/cmake/CreateVersionFile.cmake
@@ -0,0 +1,43 @@
+# Script to configure version files at build time.
+#
+# This is not meant to be included directly in CMakeLists.txt but to be called with `cmake -P`
+# script mode.
+# This way, one can build a target that regenerate the version file at every compilation.
+# It avoids getting an outdated Git revision.
+# All other variable defined before running the script can also be used for templating the
+# versio file.
+
+# Default working directory
+if(NOT WORKING_DIR)
+ get_filename_component(WORKING_DIR "${SOURCE_FILE}" DIRECTORY)
+endif()
+
+if(NOT Ecole_VERSION_MAJOR)
+ set(Ecole_VERSION_MAJOR 0)
+endif()
+if(NOT Ecole_VERSION_MINOR)
+ set(Ecole_VERSION_MINOR 0)
+endif()
+if(NOT Ecole_VERSION_PATCH)
+ set(Ecole_VERSION_PATCH 0)
+endif()
+
+if(NOT Ecole_VERSION_REVISION)
+ message(STATUS "Resolving Git Version")
+ set(Ecole_VERSION_REVISION "unknown")
+ find_package(Git)
+ if(GIT_FOUND)
+ execute_process(
+ COMMAND ${GIT_EXECUTABLE} rev-parse --verify HEAD
+ WORKING_DIRECTORY "${WORKING_DIR}"
+ OUTPUT_VARIABLE Ecole_VERSION_REVISION
+ ERROR_QUIET
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+ message(STATUS "Git revision: ${Ecole_VERSION_REVISION}")
+ else()
+ message(STATUS "Git not found")
+ endif()
+endif()
+
+configure_file("${SOURCE_FILE}" "${TARGET_FILE}" @ONLY)
diff --git a/ecole/cmake/DefaultSettings.cmake b/ecole/cmake/DefaultSettings.cmake
new file mode 100644
index 0000000..de84550
--- /dev/null
+++ b/ecole/cmake/DefaultSettings.cmake
@@ -0,0 +1,69 @@
+# Set default parameters depending if user or developer.
+
+
+# Set the default build type to the given value if no build type was specified
+function(set_default_build_type DEFAULT_BUILD_TYPE)
+ if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
+ message(STATUS "Setting build type to ${DEFAULT_BUILD_TYPE} as none was specified")
+ set(
+ CMAKE_BUILD_TYPE ${DEFAULT_BUILD_TYPE}
+ CACHE STRING "Choose the type of build" FORCE
+ )
+ # Set the possible values of build type for cmake-gui, ccmake
+ set_property(
+ CACHE CMAKE_BUILD_TYPE
+ PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo"
+ )
+ endif()
+endfunction()
+
+# Set default common for all cases
+macro(set_common_defaults)
+ option(CMAKE_EXPORT_COMPILE_COMMANDS "Generate compile_commands.json for tools" ON)
+ option(CMAKE_POSITION_INDEPENDENT_CODE "Position Independent Code for building shared libraries." ON)
+ option(CMAKE_VISIBILITY_INLINES_HIDDEN "Hidden symbol visibility for inline functions in shared libraries" ON)
+ set(CMAKE_CXX_VISIBILITY_PRESET hidden CACHE STRING "Hidden visibility of symbols in shared libraries.")
+ option(ECOLE_BUILD_LIB "Build Ecole library, find already installed otherwise" ON)
+ option(ECOLE_BUILD_PY_EXT "Build Ecole Python Extension" ON)
+endmacro()
+
+
+# Set of defaults for Ecole users
+macro(set_user_defaults)
+ set_common_defaults()
+ set_default_build_type(RelWithDebInfo)
+ option(ENABLE_IPO "Enable Interprocedural Optimization, aka Link Time Optimization (LTO)" ON)
+ option(ECOLE_BUILD_BENCHMARKS "Build Ecole benchmarks" OFF)
+ option(ECOLE_BUILD_TESTS "Build Ecole tests" OFF)
+endmacro()
+
+
+# Set of defaults for Ecole developers (anyone contributing)
+macro(set_developer_defaults)
+ set_common_defaults()
+ set_default_build_type(Debug)
+
+ option(ECOLE_BUILD_BENCHMARKS "Build Ecole benchmarks" ON)
+ option(ECOLE_BUILD_TESTS "Build Ecole tests" ON)
+
+ # Enable compiler cache if found
+ find_program(CCACHE ccache)
+ if(CCACHE)
+ message(STATUS "Using ccache")
+ set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE} CACHE FILEPATH "Compiler launching tool")
+ else()
+ message(STATUS "Cannot find requirement ccache")
+ endif()
+endmacro()
+
+
+macro(set_defaults)
+ if(ECOLE_DEVELOPER)
+ set_developer_defaults()
+ else()
+ set_user_defaults()
+ endif()
+endmacro()
+
+
+set_defaults()
diff --git a/ecole/cmake/DependenciesResolver.cmake b/ecole/cmake/DependenciesResolver.cmake
new file mode 100644
index 0000000..326c710
--- /dev/null
+++ b/ecole/cmake/DependenciesResolver.cmake
@@ -0,0 +1,95 @@
+# Find or download dependencies.
+#
+# Utility to try to find a package, or downloaad it, configure it, and install it inside
+# the build tree.
+# Based and fetch content, it avoids using `add_subdirectory` which exposes other project
+# targets and errors as part of this project.
+
+# Avoid warning about DOWNLOAD_EXTRACT_TIMESTAMP in CMake 3.24:
+if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0")
+ cmake_policy(SET CMP0135 NEW)
+endif()
+
+include(FetchContent)
+
+
+# Where downloaded dependencies will be installed (in the build tree by default).
+set(FETCHCONTENT_INSTALL_DIR "${FETCHCONTENT_BASE_DIR}/local")
+option(ECOLE_FORCE_DOWNLOAD "Don't look for dependencies locally, rather always download them." OFF)
+
+
+# Execute command at comfigure time and handle errors and output.
+function(execute_process_handle_output)
+ execute_process(
+ ${ARGV}
+ RESULT_VARIABLE ERROR
+ OUTPUT_VARIABLE STD_OUT
+ ERROR_VARIABLE STD_ERR
+ )
+ if(ERROR)
+ message(FATAL_ERROR "${STD_OUT} ${STD_ERR}")
+ else()
+ message(DEBUG "${STD_OUT}")
+ endif()
+endfunction()
+
+
+# Configure, build and install the a CMake project
+#
+# The source of the project must have been made available prior to calling this function.
+function(build_package)
+ set(options)
+ set(oneValueArgs SOURCE_DIR BUILD_DIR INSTALL_DIR)
+ set(multiValueArgs CONFIGURE_ARGS)
+ cmake_parse_arguments(ARG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+ message(DEBUG "${CMAKE_COMMAND}" -S "${ARG_SOURCE_DIR}" -B "${ARG_BUILD_DIR}" ${ARG_CONFIGURE_ARGS})
+ execute_process_handle_output(
+ COMMAND "${CMAKE_COMMAND}" -S "${ARG_SOURCE_DIR}" -B "${ARG_BUILD_DIR}" -G "${CMAKE_GENERATOR}" ${ARG_CONFIGURE_ARGS}
+ )
+ execute_process_handle_output(COMMAND "${CMAKE_COMMAND}" --build "${ARG_BUILD_DIR}" --parallel)
+ execute_process_handle_output(COMMAND "${CMAKE_COMMAND}" --install "${ARG_BUILD_DIR}" --prefix "${ARG_INSTALL_DIR}")
+endfunction()
+
+
+# Try to find a package or downloads it at configure time.
+#
+# Use FetchContent to download a package if it was not found and build it inside the build tree.
+# This is a macro so that find_package can export variables in the parent scope.
+macro(find_or_download_package)
+ set(options)
+ set(oneValueArgs NAME URL URL_HASH)
+ set(multiValueArgs CONFIGURE_ARGS)
+ cmake_parse_arguments(ARG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+ if(NOT ${ARG_NAME}_FOUND)
+ if(NOT ECOLE_FORCE_DOWNLOAD)
+ find_package(${ARG_NAME} QUIET)
+ endif()
+
+ if(${ARG_NAME}_FOUND)
+ message(STATUS "Found ${ARG_NAME}")
+ else()
+ FetchContent_Declare(
+ ${ARG_NAME}
+ URL ${ARG_URL}
+ URL_HASH ${ARG_URL_HASH}
+ )
+ FetchContent_GetProperties(${ARG_NAME})
+ if(NOT ${ARG_NAME}_POPULATED)
+ message(STATUS "Downloading ${ARG_NAME}")
+ FetchContent_Populate(${ARG_NAME})
+ message(STATUS "Building ${ARG_NAME}")
+ # FetchContent_Populate uses lower case name of FetchContent_Declare for directories
+ string(TOLOWER "${ARG_NAME}" ARG_NAME_LOWER)
+ build_package(
+ CONFIGURE_ARGS ${ARG_CONFIGURE_ARGS} -D "CMAKE_PREFIX_PATH=${FETCHCONTENT_INSTALL_DIR}"
+ SOURCE_DIR "${${ARG_NAME_LOWER}_SOURCE_DIR}"
+ BUILD_DIR "${${ARG_NAME_LOWER}_BINARY_DIR}"
+ INSTALL_DIR "${FETCHCONTENT_INSTALL_DIR}"
+ )
+ find_package(${ARG_NAME} PATHS "${FETCHCONTENT_INSTALL_DIR}" NO_DEFAULT_PATH QUIET)
+ endif()
+ endif()
+ endif()
+endmacro()
diff --git a/ecole/cmake/InterproceduralOptimization.cmake b/ecole/cmake/InterproceduralOptimization.cmake
new file mode 100644
index 0000000..f65ec3b
--- /dev/null
+++ b/ecole/cmake/InterproceduralOptimization.cmake
@@ -0,0 +1,13 @@
+option(ENABLE_IPO "Enable Interprocedural Optimization, aka Link Time Optimization (LTO)" OFF)
+
+if(ENABLE_IPO)
+ include(CheckIPOSupported)
+ check_ipo_supported(RESULT result OUTPUT output)
+ if(result)
+ set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
+ message(STATUS "IPO enabled")
+ else()
+ message(STATUS "IPO is not supported")
+ message(DEBUG "${output}")
+ endif()
+endif()
diff --git a/ecole/cmake/Python.cmake b/ecole/cmake/Python.cmake
new file mode 100644
index 0000000..545d18f
--- /dev/null
+++ b/ecole/cmake/Python.cmake
@@ -0,0 +1,17 @@
+# Set some variables to find the proper Python version
+
+if(SKBUILD)
+ # If scikit-build is compiling, let if define the interpreter
+ set(Python_EXECUTABLE "${PYTHON_EXECUTABLE}")
+ set(Python_INCLUDE_DIR "${PYTHON_INCLUDE_DIR}")
+ set(Python_LIBRARY "${PYTHON_LIBRARY}")
+ set(DUMMY "${PYTHON_VERSION_STRING}") # Not needed, silences a warning
+
+elseif(NOT DEFINED Python_EXECUTABLE)
+ # Find Python interpreter from the path and don't resolve symlinks
+ execute_process(
+ COMMAND "python3" "-c" "import sys; print(sys.executable)"
+ OUTPUT_VARIABLE Python_EXECUTABLE
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+ )
+endif()
diff --git a/ecole/cmake/Sanitizers.cmake b/ecole/cmake/Sanitizers.cmake
new file mode 100644
index 0000000..76198f2
--- /dev/null
+++ b/ecole/cmake/Sanitizers.cmake
@@ -0,0 +1,42 @@
+# Module to enable compiler runtime checks.
+#
+# File adapted from Jason Turner's cpp_starter_project
+# https://github.com/lefticus/cpp_starter_project/blob/master/cmake/Sanitizers.cmake
+# Using INTERFACE targets is not so desirable as they need to be installed when building
+# static libraries.
+
+function(ecole_target_add_sanitizers target)
+
+ set(supported_compilers "GNU" "Clang" "AppleClang")
+ if(CMAKE_CXX_COMPILER_ID IN_LIST supported_compilers)
+ set(sanitizers "")
+
+ option(SANITIZE_ADDRESS "Enable address sanitizer" FALSE)
+ if(SANITIZE_ADDRESS)
+ list(APPEND sanitizers "address")
+ endif()
+
+ option(SANITIZE_MEMORY "Enable memory sanitizer" FALSE)
+ if(SANITIZE_MEMORY)
+ list(APPEND sanitizers "memory")
+ endif()
+
+ option(SANITIZE_UNDEFINED_BEHAVIOR "Enable undefined behavior sanitizer" FALSE)
+ if(SANITIZE_UNDEFINED_BEHAVIOR)
+ list(APPEND sanitizers "undefined")
+ endif()
+
+ option(SANITIZE_THREAD "Enable thread sanitizer" FALSE)
+ if(SANITIZE_THREAD)
+ list(APPEND sanitizers "thread")
+ endif()
+
+ list(JOIN sanitizers "," list_of_sanitizers)
+ if(NOT "${list_of_sanitizers}" STREQUAL "")
+ target_compile_options("${target}" PRIVATE -fsanitize=${list_of_sanitizers})
+ target_link_libraries("${target}" PRIVATE -fsanitize=${list_of_sanitizers})
+ endif()
+
+ endif()
+
+endfunction()
diff --git a/ecole/cmake/Version.cmake b/ecole/cmake/Version.cmake
new file mode 100644
index 0000000..bd30de5
--- /dev/null
+++ b/ecole/cmake/Version.cmake
@@ -0,0 +1,15 @@
+function(read_version file version_var)
+ file(READ "${file}" version_text)
+
+ string(REGEX MATCH "VERSION_MAJOR ([0-9]+)" _ "${version_text}")
+ set(version_major "${CMAKE_MATCH_1}")
+
+ string(REGEX MATCH "VERSION_MINOR ([0-9]+)" _ "${version_text}")
+ set(version_minor "${CMAKE_MATCH_1}")
+
+ string(REGEX MATCH "VERSION_PATCH ([0-9]+)" _ "${version_text}")
+ set(version_patch "${CMAKE_MATCH_1}")
+
+ set("${version_var}" "${version_major}.${version_minor}.${version_patch}" PARENT_SCOPE)
+ message(STATUS "Ecole version ${version_major}.${version_minor}.${version_patch}")
+endfunction()
diff --git a/ecole/dev/Dockerfile.src b/ecole/dev/Dockerfile.src
new file mode 100644
index 0000000..d280386
--- /dev/null
+++ b/ecole/dev/Dockerfile.src
@@ -0,0 +1,51 @@
+# Matrix built with different compilers (e.g. gcc9, clang10) and python versions.
+ARG compiler=clang10
+
+FROM conanio/${compiler}
+USER root
+
+ARG CXXFLAGS=""
+ARG LDFLAGS=""
+ENV CXXFLAGS="$CXXFLAGS"
+ENV LDFLAGS="$LDFLAGS"
+
+# Install minimal dependencies for a CircleCI image.
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends wget git openssh-client tar gzip ca-certificates && \
+ apt-get clean
+
+# Install SCIP from source.
+# We do not need a very "complete" scip, just the bare minimum.
+ARG scip_version=8.0.0
+RUN wget --no-verbose https://scip.zib.de/download/release/scipoptsuite-${scip_version}.tgz && \
+ tar -xzf scipoptsuite-${scip_version}.tgz && \
+ cmake -B build/ -S scipoptsuite-${scip_version} \
+ -D CMAKE_BUILD_TYPE=Release \
+ -D PARASCIP=ON \
+ -D PAPILO=OFF \
+ -D GCG=OFF \
+ -D ZIMPL=OFF \
+ -D GMP=OFF \
+ -D IPOPT=OFF \
+ -D BOOST=OFF && \
+ cmake --build build && \
+ cmake --install build && \
+ rm -rf build/ scipoptsuite-${scip_version} scipoptsuite-${scip_version}.tgz
+
+
+# Install Python and NumPy
+# Pyenv needs a full version (e.g 3.7.10) so we search for the latest bug fix release
+ARG python_version=3.7
+# This system script uses /usr/bin/python3 which get hijacked by pyenv so we hard code it to system python3.7.
+# https://askubuntu.com/q/965043
+# Does not happen on all images.
+RUN sed --in-place '1s:^#!/usr/bin/python3:#!/usr/bin/python3.7:' "$(which lsb_release)" || true
+RUN version_regex='^[[:blank:]]*'"$(echo ${python_version} | sed 's/\./\\./')"'\.[[:digit:]]+[[:blank:]]*$' && \
+ python_version_fix=$(pyenv install --list | grep -E "${version_regex}" | cut -d'.' -f 3 | sort -n | tail -1) && \
+ python_full_version=${python_version}.${python_version_fix} && \
+ pyenv install ${python_full_version} && \
+ pyenv global ${python_full_version} && \
+ python -m pip install --no-cache-dir --upgrade pip && \
+ python -m pip install --no-cache-dir cmake numpy pytest pytest-helpers-namespace pyscipopt
+
+WORKDIR /app
diff --git a/ecole/dev/conda.yaml b/ecole/dev/conda.yaml
new file mode 100644
index 0000000..ea5d694
--- /dev/null
+++ b/ecole/dev/conda.yaml
@@ -0,0 +1,45 @@
+channels:
+ - conda-forge
+ - defaults
+
+dependencies:
+ # C++ build tools
+ - make
+ - cmake>=3.15
+ # C++ build time dependencies
+ - xtensor
+ - cxx-compiler
+ - xsimd
+ - fmt
+ # C++ runtime dependencies
+ - scip=8
+ # Build time Python dependencies
+ - python>=3.6
+ - pybind11>=2.7
+ - numpy>=1.4
+ - xtensor-python
+ - scikit-build
+ - build
+
+ # Documentation
+ - doxygen
+ - sphinx = 4.4
+ - breathe>=4.15
+ - sphinx_rtd_theme
+
+ # General dev tools
+ - pre-commit
+ # C++ dev tools
+ - clang-tools=11
+ - ccache
+ - catch2 < 3.0
+ - cli11
+ # Python dev tools
+ - pip
+ - pytest
+ - pytest-helpers-namespace
+ - black
+ - ipython
+ - pyscipopt >= 3.0.1 # optional
+ - twine
+ - papermill
diff --git a/ecole/dev/hooks/build b/ecole/dev/hooks/build
new file mode 100644
index 0000000..a87381b
--- /dev/null
+++ b/ecole/dev/hooks/build
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+set -o errexit # Fail script on errors
+set -o nounset # Fail on empty variables
+set -o pipefail # Error if error in pipe
+
+# Directory of this file
+__DIR__="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+
+# St on Dockerhub but this and the CWD seem to be unreliable
+DOCKERFILE_DIR="${__DIR__}/.."
+# Set on DockerHub but set default value to use script locally
+DOCKER_REPO="${DOCKER_REPO:-ecoleai/ci}"
+
+for python_version in "3.7" "3.8" "3.9" "3.10"; do
+
+ # Source images with given compiler
+ for compiler in "gcc9" "clang10" ; do
+ extra_args=()
+ # If using clang, compile with LLVM libc++ because the given libstd++ does not fully support C++17.
+ # FIXME libstdc++ should just be updated (because libc++ does not fully support C+=17), but
+ # somehow the add-apt-repository hangs .
+ if [[ "${compiler}" = clang* ]]; then
+ extra_args+=(--build-arg CXXFLAGS="-stdlib=libc++" --build-arg LDFLAGS="-lc++abi")
+ fi;
+ docker build \
+ --file "${DOCKERFILE_DIR}/Dockerfile.src" \
+ --build-arg python_version="${python_version}" \
+ --build-arg compiler="${compiler}" \
+ "${extra_args[@]+"${extra_args[@]}"}" \
+ --tag "${DOCKER_REPO}-linux-src-${compiler}-py${python_version}:${DOCKER_TAG:-latest}" "${DOCKERFILE_DIR}"
+ done
+
+done
diff --git a/ecole/dev/hooks/push b/ecole/dev/hooks/push
new file mode 100644
index 0000000..fcdbf96
--- /dev/null
+++ b/ecole/dev/hooks/push
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -o errexit # Fail script on errors
+set -o nounset # Fail on empty variables
+set -o pipefail # Error if error in pipe
+
+# Set on DockerHub but set default value to use script locally
+DOCKER_REPO="${DOCKER_REPO:-index.docker.io/ecoleai/ci}"
+
+for image_name in $(docker images "${DOCKER_REPO#index.docker.io/}-*" --format "{{.Repository}}"); do
+ docker push "${image_name}"
+done
diff --git a/ecole/dev/run.sh b/ecole/dev/run.sh
new file mode 100755
index 0000000..a0a8cac
--- /dev/null
+++ b/ecole/dev/run.sh
@@ -0,0 +1,602 @@
+#!/usr/bin/env bash
+
+
+# Directory of this file
+readonly __DIR__="$(cd "$(dirname "${BASH_SOURCE[0]:?}")" && pwd)"
+
+# Top of the repository in which this file is
+readonly __ECOLE_DIR__="$(cd "${__DIR__:?}/.." && pwd)"
+
+# If CI is defined then "true", otherwise "false" (string, not bools).
+readonly __CI__="$([ -z "${CI+x}" ] && printf "false" || printf "true")"
+
+# Number of spaces to shift the commands outputs by.
+readonly __SHIFT__=4
+
+
+# Print in yellow with a new line.
+function echo_yellow {
+ local -r yellow="\033[1;33m"
+ local -r nc="\033[0m"
+ printf "${yellow}$*${nc}\n"
+}
+
+
+# Logging entry point
+function log {
+ echo_yellow "$@"
+}
+
+# Read each character of stdin, indenting each line.
+# Not using sed as explained in this SO post https://stackoverflow.com/a/46495830/5862073
+function interactive_indent {
+ local -r spaces="$(printf "%${__SHIFT__}s")"
+ echo -n "$spaces"
+ while IFS= read -r -d '' -n1 chr; do
+ [[ $chr == $'\n' ]] && chr="\\n\\r$spaces"
+ [[ $chr == $'\r' ]] && chr="\\r$spaces"
+ echo -ne "$chr"
+ done
+ echo -ne '\r'
+}
+
+# Execute a command, indenting its output while preserving colors.
+function execute_shift_output {
+ # Number of columns to use is reduced by the indentation
+ local -r columns=$((${COLUMNS:-$(tput -T "${TERM:-xterm}" cols)} - ${__SHIFT__}))
+ # `script` cannot run builtin command like `export`
+ if [ "$(type -t "$1")" = "builtin" ]; then
+ COLUMNS=${columns} "$@"
+ else
+ # Usage of `script` for MacOS
+ if [[ "$(uname -s)" = Darwin* ]]; then
+ COLUMNS=${columns} script -q /dev/null "$@" | interactive_indent
+ # Usage of `script` for Linux
+ else
+ # Quote-expand command to avoid space splitting words https://stackoverflow.com/a/12985540/5862073
+ local -r command="$(printf "'%s' " "$@")"
+ script -feqc "COLUMNS=${columns} ${command}" /dev/null | interactive_indent
+ fi
+ fi
+}
+
+# Wrap calls to manage verbosity, dry-run, ...
+function execute {
+ log "$@"
+ if [ "${dry_run}" = "false" ]; then
+ ## Run the command. Indent both stdout and stderr but preserve them.
+ execute_shift_output "$@"
+ fi
+}
+
+
+# Wrap call and set PYTHONPATH
+function execute_pythonpath {
+ if [ "${fix_pythonpath}" = "true" ]; then
+ execute export PYTHONPATH="${cmake_build_dir}/python/ecole${PYTHONPATH+:}${PYTHONPATH:-}"
+ execute "$@"
+ execute unset PYTHONPATH
+ else
+ execute "$@"
+ fi
+}
+
+
+function configure {
+ local extra_args=("$@")
+ if [ "${cmake_warnings}" = "true" ]; then
+ extra_args+=("-Wdev")
+ fi
+ if [ "${warnings_as_errors}" = "true" ]; then
+ extra_args+=("-Werror=dev" "-D" "WARNINGS_AS_ERRORS=ON")
+ fi
+ execute cmake -S "${source_dir}" -B "${cmake_build_dir}" -D ECOLE_BUILD_TESTS=ON -D ECOLE_BUILD_BENCHMARKS=ON ${extra_args[@]+"${extra_args[@]}"}
+ execute ln -nfs "${cmake_build_dir}/compile_commands.json"
+}
+
+
+function build_all {
+ # List all functions in that file.
+ local all_funcs
+ mapfile -t all_funcs < <(declare -F)
+ all_funcs=("${all_funcs[@]#declare -f }")
+ # Run functions that start with test_
+ local func
+ for func in "${all_funcs[@]}"; do
+ if [[ "${func}" = build_* && "${func}" != "build_all" ]]; then
+ "${func}"
+ fi
+ done
+}
+
+
+function cmake_build {
+ execute cmake --build "${cmake_build_dir}" --parallel --target "${1-all}" "${@:2}"
+}
+
+
+function build_lib {
+ cmake_build ecole-lib "$@"
+}
+
+
+function build_lib_test {
+ cmake_build ecole-lib-test "$@"
+}
+
+
+function build_py {
+ cmake_build ecole-py-ext "$@"
+}
+
+
+# Execute a command if rebuild is true.
+function if_rebuild_then {
+ if [ "${rebuild}" = "true" ]; then
+ "${@}"
+ fi
+}
+
+
+function build_doc {
+ if_rebuild_then build_py
+ if [ "${warnings_as_errors}" = "true" ]; then
+ local sphinx_args+=("-W")
+ fi
+ execute_pythonpath python -m sphinx ${sphinx_args[@]+"${sphinx_args[@]}"} -b html "${source_doc_dir}" "${build_doc_dir}" "$@"
+}
+
+
+function test_all {
+ # List all functions in that file.
+ local all_funcs
+ mapfile -t all_funcs < <(declare -F)
+ all_funcs=("${all_funcs[@]#declare -f }")
+ # Run functions that start with test_
+ local func
+ for func in "${all_funcs[@]}"; do
+ if [[ "${func}" = test_* && "${func}" != "test_all" ]]; then
+ "${func}"
+ fi
+ done
+}
+
+
+# Return false (1) when `diff` is set and given files pattern have modifications since `rev`.
+function files_have_changed {
+ if [ "${diff}" = "true" ]; then
+ cd "${__ECOLE_DIR__}" && git diff --name-only --exit-code "${rev}" -- "${@}" > /dev/null && return 1 || return 0
+ fi
+}
+
+
+function test_lib {
+ if files_have_changed 'CMakeLists.txt' 'libecole'; then
+ if_rebuild_then build_lib_test
+ local extra_args=("$@")
+ if [ "${fail_fast}" = "true" ]; then
+ extra_args+=("--abort")
+ fi
+ execute "${cmake_build_dir}/libecole/tests/ecole-lib-test" ${extra_args[@]+"${extra_args[@]}"}
+ else
+ log "Skipping ${FUNCNAME[0]} as unchanged since ${rev}."
+ fi
+}
+
+
+# CTest runner runs test individually, reducing mem consumption
+function ctest_lib {
+ if files_have_changed 'CMakeLists.txt' 'libecole'; then
+ if_rebuild_then build_lib_test
+ local extra_args=("$@")
+ if [ "${fail_fast}" = "true" ]; then
+ extra_args+=("--stop-on-failure ")
+ fi
+ # Possible option --parallel
+ cmake_build test -- ARGS="${extra_args[@]+"${extra_args[@]}"}"
+ else
+ log "Skipping ${FUNCNAME[0]} as unchanged since ${rev}."
+ fi
+}
+
+
+function test_py {
+ local -r relevant_files=('CMakeLists.txt' 'libecole/CMakeLists.txt' 'libecole/src' 'libecole/include' 'python')
+ if files_have_changed "${relevant_files[@]}"; then
+ if_rebuild_then build_py
+ local extra_args=("$@")
+ if [ "${fail_fast}" = "true" ]; then
+ extra_args+=("--exitfirst")
+ fi
+ execute_pythonpath python -m pytest ${extra_args[@]+"${extra_args[@]}"}
+ else
+ log "Skipping ${FUNCNAME[0]} as unchanged since ${rev}."
+ fi
+}
+
+
+function test_doc {
+ if files_have_changed 'doc' 'python'; then
+ if_rebuild_then build_doc
+ local extra_args=("$@")
+ if [ "${warnings_as_errors}" = "true" ]; then
+ extra_args+=("-W")
+ fi
+ execute python -m sphinx ${extra_args[@]+"${extra_args[@]}"} -b linkcheck "${source_doc_dir}" "${build_doc_dir}"
+ execute_pythonpath python -m sphinx ${extra_args[@]+"${extra_args[@]}"} -b doctest "${source_doc_dir}" "${build_doc_dir}"
+ else
+ log "Skipping ${FUNCNAME[0]} as unchanged since ${rev}."
+ fi
+}
+
+
+function file_version {
+ local -r version_text="$(cat "${source_dir}/VERSION")"
+
+ function find_version {
+ local -r version="${1}"
+ local -r regex="${version}[[:space:]]+(\.?[[:alnum:]]+)"
+ if [[ "${version_text}" =~ $regex ]]; then
+ echo "${BASH_REMATCH[1]}"
+ fi
+ }
+
+ local -r file_major="$(find_version 'VERSION_MAJOR')"
+ local -r file_minor="$(find_version 'VERSION_MINOR')"
+ local -r file_patch="$(find_version 'VERSION_PATCH')"
+ local -r file_pre="$(find_version 'VERSION_PRE')"
+ local -r file_post="$(find_version 'VERSION_POST')"
+ local -r file_dev="$(find_version 'VERSION_DEV')"
+ local version="${file_major:?}.${file_minor:?}.${file_patch:?}"
+ version+="${file_pre}${file_post}${file_dev}"
+ echo "${version}"
+}
+
+
+# Check that a string is version and print it without the leading 'v'.
+function is_version {
+ local -r candidate="${1-"$(git describe --tags --exact-match 2> /dev/null)"}"
+ ( printf "${candidate}" | grep -E '^v?[0-9]+\.[0-9]+\.[0-9]+((a|b|rc)[0-9]+)?(\.post[0-9]+)?(\.dev[0-9]+)?$' | sed 's/^v//' ) || return 1
+}
+
+
+function sort_versions {
+ local -r sort_versions=(
+ 'import sys, pkg_resources;'
+ 'lines = [pkg_resources.parse_version(l) for l in sys.stdin.readlines()];'
+ 'versions = sorted(lines);'
+ 'print(" ".join(str(v) for v in versions));'
+ )
+ python -c "${sort_versions[*]}"
+}
+
+
+function git_version {
+ local -r rev="${1-HEAD}"
+ # All possible git tags.
+ mapfile -t all_tags < <(git tag)
+ # Find tags that are ancestor of rev and match a version.
+ local prev_versions
+ local tag
+ for tag in "${all_tags[@]}"; do
+ if git merge-base --is-ancestor "${tag}" "${rev}"; then
+ if version=$(is_version "${tag}"); then
+ prev_versions+=("${version}")
+ fi
+ fi
+ done
+ # Sort using proper version comparison.
+ mapfile -t sorted_versions < <(echo "${prev_versions[@]}" | xargs -n1 | sort_versions | xargs -n1)
+ # Take the lastest version.
+ local -r latest_version="${sorted_versions[${#sorted_versions[@]}-1]}"
+ echo "${latest_version}"
+}
+
+
+# Test that the git version matches the version in the source code.
+function test_version {
+ # Without args, use the version from git
+ if [ -z "${1+x}" ]; then
+ local -r version="$(git_version)"
+ # Otherwise use the arg
+ else
+ local -r version=$(is_version "${1}")
+ fi
+ [ "$(file_version)" = "${version}" ]
+}
+
+
+# These differential checks are the ones used in CI, for per-commit diff, install the pre-commit hooks with
+# pre-commit install
+function check_code {
+ if_rebuild_then cmake_build ecole-lib-version
+ local extra_args=("$@")
+ if [ "${diff}" = "true" ]; then
+ extra_args+=("--from-ref" "${rev}" "--to-ref" "HEAD")
+ else
+ extra_args+=("--all-files")
+ fi
+ execute pre-commit run "${extra_args[@]}"
+}
+
+
+# Install libecole in the given folder.
+function install_lib {
+ if_rebuild_then cmake_build ecole-lib
+ execute cmake --install "${cmake_build_dir}" --prefix "${1-${build_dir}/local}" "${@:2}"
+}
+
+
+# Test the intallation of libecole withe the cmake example.
+function test_example_libecole {
+ local -r install_dir="${1-${build_dir}/local}"
+ if_rebuild_then install_lib "${install_dir}"
+ local -r ecole_dir="$(find "${install_dir}" -name "EcoleConfig.cmake" | head -1 | xargs dirname | xargs realpath)"
+ local -r example_build_dir="${build_dir}/examples"
+ execute cmake -B "${example_build_dir}" -S "${source_dir}/examples/libecole" -D Ecole_DIR="${ecole_dir}"
+ execute cmake --build "${example_build_dir}"
+ execute "${example_build_dir}/branching"
+}
+
+
+# Test the configuring example with easy parameters
+function test_example_configuring {
+ if_rebuild_then build_py
+ local -r in_nb="${source_dir}/examples/configuring-bandits/example.ipynb"
+ local -r out_nb="${build_dir}/examples/configuring-bandits/example.ipynb"
+ execute mkdir -p "$(dirname "${out_nb}")"
+ execute_pythonpath python -m papermill.cli --no-progress-bar "${in_nb}" "${out_nb}" \
+ -p train_n_items 100 -p train_n_bids 100 \
+ -p optim_n_iters 2 -p optim_n_burnins 1 \
+ -p test_n_evals 2 -p test_n_items 100 -p test_n_bids 100 \
+ "$@"
+}
+
+# Test the branching example with easy parameters
+function test_example_branching {
+ if_rebuild_then build_py
+ local -r in_nb="${source_dir}/examples/branching-imitation/example.ipynb"
+ local -r out_nb="${build_dir}/examples/branching-imitation/example.ipynb"
+ execute mkdir -p "$(dirname "${out_nb}")"
+ execute_pythonpath python -m papermill.cli --no-progress-bar "${in_nb}" "${out_nb}" \
+ -p DATA_MAX_SAMPLES 3 -p NB_EPOCHS 2 -p NB_EVAL_INSTANCES 2 "$@"
+}
+
+
+# Install documentation to a local folder depending on the branch/tag
+# FIXME the Github Action could be moved here to a deploy_doc function
+# FIXME this is not used in Github Action for now
+function deploy_doc_locally {
+ # Try getting from exact tag.
+ local -r tag=$(cd "${source_dir}" && git describe --tags --exact-match HEAD 2> /dev/null)
+ local -r branch="$(cd "${source_dir}" && git rev-parse --abbrev-ref HEAD)"
+
+ local -r install_dir="${1}"
+ if_rebuild_then build_doc
+
+ # Install master to latest
+ if printf ${branch} | grep -E '(master|main)' &> /dev/null; then
+ local -r dir="${install_dir}/latest"
+ # Only create the parent so that source dir is not created in target
+ execute mkdir -p "$(dirname "${dir}")"
+ execute rm -rf "${dir}"
+ execute cp -R "${build_doc_dir}/" "${dir}"
+ fi
+
+ # Install versions to v.x.x
+ if version=$(is_version "${tag}"); then
+ local -r version_major_minor="$(printf "${tag}" | grep -E -o '[0-9]+\.[0-9]+')"
+ local -r dir="${install_dir}/v${version_major_minor}"
+ # Only create the parent so that source dir is not created in target
+ execute mkdir -p "$(dirname "${dir}")"
+ execute rm -rf "${dir}"
+ execute cp -R "${build_doc_dir}/" "${dir}"
+ fi
+
+ # Install stable
+ if [[ ! -z "${dir-}" && "$(git_version origin/master)" = "${version-false}" ]]; then
+ execute ln -s -f "${dir}" "${install_dir}/stable"
+ fi
+}
+
+
+# Build Python source distribution and wheel (from the sdist).
+# FIXME wheel is missing MacOS version.
+function build_dist {
+ local -r dist_dir="${1:-"${build_dir}/dist"}"
+ execute python -m build --outdir="${dist_dir}" "${@:2}"
+}
+
+
+# Install wheel into a virtual environment.
+function test_dist {
+ local -r dist_dir="${build_dir}/dist"
+ if_rebuild_then build_dist "${dist_dir}"
+ local -r venv="${build_dir}/venv"
+ execute python -m venv --upgrade-deps "${venv}"
+ # FIXME should install wheel but it is missing MacOS version
+ local -r sdist=("${dist_dir}"/ecole-*.tar.gz)
+ execute "${venv}/bin/python" -m pip install --ignore-installed "${sdist[0]}"
+ execute "${venv}/bin/python" -m ecole.doctor
+}
+
+
+# Deploy sdist to PyPI. Set TWINE_USERNAME and TWINE_PASSWORD environment variables or pass them as arguments.
+function deploy_sdist {
+ local -r dist_dir="${build_dir}/dist"
+ if_rebuild_then build_dist "${dist_dir}" --sdist
+ local -r strict="$([ "${warnings_as_errors}" = "true" ] && echo -n '--strict')"
+ local -r sdists=("${dist_dir}"/ecole-*.tar.gz)
+ execute python -m twine check "${strict}" "${sdists[@]}"
+ execute python -m twine upload --non-interactive "$@" "${sdists[@]}"
+}
+
+
+# The usage of this script.
+function help {
+ echo "${BASH_SOURCE[0]} [--options...] [...] [-- [...]]..."
+ echo ""
+ echo "Options:"
+ echo " --dry-run|--no-dry-run (${dry_run})"
+ echo " --source-dir= (${source_dir})"
+ echo " --build-dir= (${build_dir})"
+ echo " --cmake-build-dir= (${cmake_build_dir})"
+ echo " --source-doc-dir= (${source_doc_dir})"
+ echo " --build-doc-dir= (${build_doc_dir})"
+ echo " --warnings-as-errors|--no-warnings-as-errors (${warnings_as_errors})"
+ echo " --cmake-warnings|--no-cmake-warnings (${cmake_warnings})"
+ echo " --fail-fast|--no-fail-fast (${fail_fast})"
+ echo " --fix-pythonpath|--no-fix-pythonpath (${fix_pythonpath})"
+ echo " --rebuild|--no-rebuild (${rebuild})"
+ echo " --diff|--no-diff (${diff})"
+ echo " --rev= (${rev})"
+ echo ""
+ echo "Commands:"
+ echo " help, configure,"
+ echo " build-lib, build-lib-test, build-py, build-doc, build-all"
+ echo " test-lib, test-py, test-doc, test-version,"
+ echo " test-example-libecole, test-example-configuring, test-all"
+ echo " check-code"
+ echo " build-dist, test-dist, deploy-sdist"
+ echo ""
+ echo "Example:"
+ echo " ${BASH_SOURCE[0]} --warnings-as-errors configure -D ECOLE_DEVELOPER=ON -- test-lib -- test-py --no-slow"
+}
+
+
+# Update variable if it exists or throw an error.
+function set_option {
+ local -r key="${1}"
+ local -r val="${2}"
+ # If variable referenced in key is not set throw error
+ if [ -z "${!key+x}" ]; then
+ echo "Invalid option ${key}." 1>&2
+ return 1
+ # Otherwise update it's value
+ else
+ printf -v "${key}" "%s" "${val}"
+ fi
+}
+
+
+# Parse command line parameters into variables.
+#
+# Parsing is done as follows. The output variables must be previously defined to avoid errors.
+# --some-key=val -> some_key="val"
+# --some-key -> some_key="true"
+# --no-some-key -> some_key="false"
+# As soon as one of this case does not match, all the remaining parameters are put unchanged in
+# a `positional` array.
+function parse_argv {
+ while [[ $# -gt 0 ]]; do
+ local arg="${1}"
+ case "${arg}" in
+ --*=*)
+ local key="${arg%=*}"
+ local key="${key#--}"
+ local key="${key//-/_}"
+ set_option "${key}" "${arg#*=}"
+ shift
+ ;;
+ --no-*)
+ local key="${arg#--no-}"
+ local key="${key//-/_}"
+ set_option "${key}" "false"
+ shift
+ ;;
+ --*)
+ local key="${arg#--}"
+ local key="${key//-/_}"
+ set_option "${key}" "true"
+ shift
+ ;;
+ *)
+ positional=("$@")
+ return 0
+ ;;
+ esac
+ done
+}
+
+
+# Parse the positional arguments and run the commands
+# configure -D ECOLE_DEVELOPER=ON -- test-lib -- test-py --pdb
+# Will execute
+# configure -D ECOLE_DEVELOPER=ON
+# test_lib
+# test_py --pdb
+function parse_and_run_commands {
+ if [ $# = 0 ]; then
+ return 0
+ fi
+ local -r args=("$@") # Somehow we need to syntax this to use the following syntax.
+ # First item in -- separated list is the name of the function where we replace - > _.
+ local last_cmd_idx=0
+ local func="${args[$last_cmd_idx]//-/_}"
+
+ for idx in ${!args[@]}; do
+ # -- is the delimitor that end the parameters for the current function
+ if [ "${args[$idx]}" = "--" ]; then
+ # Run current function with its args
+ ${func} "${args[@]:$last_cmd_idx+1:$idx-$last_cmd_idx-1}"
+ # Next fucntion start at the position after --
+ last_cmd_idx=$(($idx + 1))
+ func="${args[$last_cmd_idx]//-/_}"
+ fi
+ done
+ # Run the last function that does not terminate with a -- separator
+ ${func} "${args[@]:$last_cmd_idx+1}"
+}
+
+
+function run_main {
+ # Only print the commands that would be executed.
+ local dry_run="false"
+ # Where the top-level CMakeLists.txt is.
+ local source_dir="${__ECOLE_DIR__:?}"
+ # A top level folder for all build artifacts
+ local build_dir="build"
+ # Where is the CMake build folder with the test.
+ local cmake_build_dir="${build_dir}/cmake"
+ # Where to find sphinx conf.py.
+ local source_doc_dir="${__ECOLE_DIR__}/docs"
+ # Where to output the doc.
+ local build_doc_dir="${build_dir}/docs/html"
+ # Fail if there are warnings.
+ local warnings_as_errors="${__CI__}"
+ # Warning for CMake itself (not compiler).
+ local cmake_warnings="${__CI__}"
+ # Stop on first failure
+ local fail_fast="${__CI__}"
+ # Add build tree to PYTHONPATH.
+ local fix_pythonpath="$([ "${__CI__}" = "true" ] && printf "false" || printf "true")"
+ # Automaticaly rebuild libraries for tests and doc.
+ local rebuild="true"
+ # Test only if relevant differences have been made since the revision branch
+ local rev="origin/master"
+ local diff="$([ "${__CI__}" = "true" ] && printf "false" || printf "true")"
+
+ # Parse all command line arguments.
+ parse_argv "$@"
+
+ # Functions to execute are positional arguments with - replaced by _.
+ parse_and_run_commands "${positional[@]}"
+ # local -r commands_and_extta args=("${positional[@]//-/_}")
+
+ # for cmd in "${commands[@]}"; do
+ # "${cmd}"
+ # done
+}
+
+
+# Run the main when script is not being sourced
+if [[ "${BASH_SOURCE[0]}" = "${0}" ]] ; then
+
+ # Fail fast
+ set -o errexit
+ set -o pipefail
+ set -o nounset
+
+ run_main "$@"
+
+fi
diff --git a/ecole/dev/singularity.def b/ecole/dev/singularity.def
new file mode 100644
index 0000000..bd2e8bb
--- /dev/null
+++ b/ecole/dev/singularity.def
@@ -0,0 +1,26 @@
+Bootstrap: docker
+From: continuumio/miniconda3
+
+
+%help
+ This image provides a complete developement environemnt for Ecole.
+ Use as `singularity run ...` as `singularity shell ...` will not initialize conda properly.
+
+%files
+ conda.yaml
+
+%post
+ /opt/conda/bin/conda update --name base --channel defaults conda
+ /opt/conda/bin/conda create --name ecole --channel conda-forge cxx-compiler
+ /opt/conda/bin/conda env update --name ecole --file conda.yaml
+ /opt/conda/bin/conda clean --all
+ rm conda.yaml
+
+ # Singularity does all the environment sourcing as shell (only latter calls bash),
+ # which conda does not support.
+ # We put the content in a file, manually call bash, and source it.
+ echo "source /opt/conda/etc/profile.d/conda.sh" >> /conda_init.sh
+ echo "conda activate ecole" >> /conda_init.sh
+
+%runscript
+ exec /bin/bash --rcfile /conda_init.sh "$@"
diff --git a/ecole/docs/_static/css/custom.css b/ecole/docs/_static/css/custom.css
new file mode 100644
index 0000000..69d5a83
--- /dev/null
+++ b/ecole/docs/_static/css/custom.css
@@ -0,0 +1,170 @@
+@import url("theme.css");
+
+/* Style the top search bar and logo, or top bar on mobile */
+.wy-side-nav-search, .wy-nav-top {
+ background-color: #F58A1F;
+}
+
+.wy-side-nav-search input[type="text"] {
+ border-color: #D97A1B;
+}
+
+.wy-side-nav-search > a img.logo, .wy-side-nav-search .wy-dropdown > a img.logo {
+ height: 10em;
+}
+
+/* Style the table of content */
+.wy-menu-vertical header, .wy-menu-vertical p.caption {
+ color: #77D1F6;;
+}
+
+/* Style warning and notes */
+.wy-alert.wy-alert-warning,
+.rst-content .wy-alert-warning.note,
+.rst-content .attention,
+.rst-content .caution,
+.rst-content .wy-alert-warning.danger,
+.rst-content .wy-alert-warning.error,
+.rst-content .wy-alert-warning.hint,
+.rst-content .wy-alert-warning.important,
+.rst-content .wy-alert-warning.tip,
+.rst-content .warning,
+.rst-content .wy-alert-warning.seealso,
+.rst-content .admonition-todo,
+.rst-content .wy-alert-warning.admonition {
+ background: #fef2e7;
+}
+
+.wy-alert.wy-alert-warning .wy-alert-title,
+.rst-content .wy-alert-warning.note .wy-alert-title,
+.rst-content .attention .wy-alert-title,
+.rst-content .caution .wy-alert-title,
+.rst-content .wy-alert-warning.danger .wy-alert-title,
+.rst-content .wy-alert-warning.error .wy-alert-title,
+.rst-content .wy-alert-warning.hint .wy-alert-title,
+.rst-content .wy-alert-warning.important .wy-alert-title,
+.rst-content .wy-alert-warning.tip .wy-alert-title,
+.rst-content .warning .wy-alert-title,
+.rst-content .wy-alert-warning.seealso .wy-alert-title,
+.rst-content .admonition-todo .wy-alert-title,
+.rst-content .wy-alert-warning.admonition .wy-alert-title,
+.wy-alert.wy-alert-warning .rst-content .admonition-title,
+.rst-content .wy-alert.wy-alert-warning .admonition-title,
+.rst-content .wy-alert-warning.note .admonition-title,
+.rst-content .attention .admonition-title,
+.rst-content .caution .admonition-title,
+.rst-content .wy-alert-warning.danger .admonition-title,
+.rst-content .wy-alert-warning.error .admonition-title,
+.rst-content .wy-alert-warning.hint .admonition-title,
+.rst-content .wy-alert-warning.important .admonition-title,
+.rst-content .wy-alert-warning.tip .admonition-title,
+.rst-content .warning .admonition-title,
+.rst-content .wy-alert-warning.seealso .admonition-title,
+.rst-content .admonition-todo .admonition-title,
+.rst-content .wy-alert-warning.admonition .admonition-title {
+ background: #f8a254;
+}
+
+.wy-alert.wy-alert-info,
+.rst-content .note,
+.rst-content .wy-alert-info.attention,
+.rst-content .wy-alert-info.caution,
+.rst-content .wy-alert-info.danger,
+.rst-content .wy-alert-info.error,
+.rst-content .wy-alert-info.hint,
+.rst-content .wy-alert-info.important,
+.rst-content .wy-alert-info.tip,
+.rst-content .wy-alert-info.warning,
+.rst-content .seealso,
+.rst-content .wy-alert-info.admonition-todo,
+.rst-content .wy-alert-info.admonition {
+ background: #e7f7fd;
+}
+
+.wy-alert.wy-alert-info .wy-alert-title,
+.rst-content .note .wy-alert-title,
+.rst-content .wy-alert-info.attention .wy-alert-title,
+.rst-content .wy-alert-info.caution .wy-alert-title,
+.rst-content .wy-alert-info.danger .wy-alert-title,
+.rst-content .wy-alert-info.error .wy-alert-title,
+.rst-content .wy-alert-info.hint .wy-alert-title,
+.rst-content .wy-alert-info.important .wy-alert-title,
+.rst-content .wy-alert-info.tip .wy-alert-title,
+.rst-content .wy-alert-info.warning .wy-alert-title,
+.rst-content .seealso .wy-alert-title,
+.rst-content .wy-alert-info.admonition-todo .wy-alert-title,
+.rst-content .wy-alert-info.admonition .wy-alert-title,
+.wy-alert.wy-alert-info .rst-content .admonition-title,
+.rst-content .wy-alert.wy-alert-info .admonition-title,
+.rst-content .note .admonition-title,
+.rst-content .wy-alert-info.attention .admonition-title,
+.rst-content .wy-alert-info.caution .admonition-title,
+.rst-content .wy-alert-info.danger .admonition-title,
+.rst-content .wy-alert-info.error .admonition-title,
+.rst-content .wy-alert-info.hint .admonition-title,
+.rst-content .wy-alert-info.important .admonition-title,
+.rst-content .wy-alert-info.tip .admonition-title,
+.rst-content .wy-alert-info.warning .admonition-title,
+.rst-content .seealso .admonition-title,
+.rst-content .wy-alert-info.admonition-todo .admonition-title,
+.rst-content .wy-alert-info.admonition .admonition-title {
+ background: #58c6f4;
+}
+
+/* Override Pygment style */
+.highlight { /* Match side bar */
+ background: rgb(52, 49, 49) !important;
+ color: rgb(217, 217, 217) !important;
+}
+
+.highlight .k {
+ color: #77D1F6 !important;
+}
+
+/* CSS to fix Mathjax equation numbers displaying above.
+ *
+ * Credit to @hagenw https://github.com/readthedocs/sphinx_rtd_theme/pull/383
+ */
+div.math {
+ position: relative;
+ padding-right: 2.5em;
+}
+.eqno {
+ height: 100%;
+ position: absolute;
+ right: 0;
+ padding-left: 5px;
+ padding-bottom: 5px;
+ /* Fix for mouse over in Firefox */
+ padding-right: 1px;
+}
+.eqno:before {
+ /* Force vertical alignment of number */
+ display: inline-block;
+ height: 100%;
+ vertical-align: middle;
+ content: "";
+}
+.eqno .headerlink {
+ display: none;
+ visibility: hidden;
+ font-size: 14px;
+}
+.eqno:hover .headerlink {
+ display: inline-block;
+ visibility: hidden;
+}
+.eqno .headerlink:after {
+ display: inline-block;
+ visibility: visible;
+ content: "\f0c1";
+ font-family: FontAwesome;
+ margin-left: -.6em;
+}
+
+/* Make responsive */
+.MathJax_Display {
+ max-width: 100%;
+ overflow-x: auto;
+ overflow-y: hidden;
+}
diff --git a/ecole/docs/_static/favicon.ico b/ecole/docs/_static/favicon.ico
new file mode 100644
index 0000000..0442d9f
Binary files /dev/null and b/ecole/docs/_static/favicon.ico differ
diff --git a/ecole/docs/_static/images/ecole-logo-bare.png b/ecole/docs/_static/images/ecole-logo-bare.png
new file mode 100644
index 0000000..157a01e
Binary files /dev/null and b/ecole/docs/_static/images/ecole-logo-bare.png differ
diff --git a/ecole/docs/_static/images/ecole-logo.svg b/ecole/docs/_static/images/ecole-logo.svg
new file mode 100644
index 0000000..cb4ec43
--- /dev/null
+++ b/ecole/docs/_static/images/ecole-logo.svg
@@ -0,0 +1,909 @@
+
+
diff --git a/ecole/docs/_templates/layout.html b/ecole/docs/_templates/layout.html
new file mode 100644
index 0000000..74b8643
--- /dev/null
+++ b/ecole/docs/_templates/layout.html
@@ -0,0 +1,8 @@
+{% extends '!layout.html' %}
+
+{% block extrahead %}
+
+
+{% endblock %}
diff --git a/ecole/docs/conf.py b/ecole/docs/conf.py
new file mode 100644
index 0000000..3f842be
--- /dev/null
+++ b/ecole/docs/conf.py
@@ -0,0 +1,111 @@
+from typing import List, Tuple
+import pathlib
+import re
+
+
+CURRENT_FILE = pathlib.Path(__file__).resolve()
+CURRENT_DIR = CURRENT_FILE.parent
+PROJECT_DIR = CURRENT_DIR.parent
+
+
+def read_authors(file: pathlib.Path) -> List[str]:
+ with open(file) as f:
+ return [l.strip() for l in f.readlines()]
+
+
+def read_version(file: pathlib.Path) -> Tuple[int, int, int]:
+ with open(file) as f:
+ text = f.read()
+ major = re.search("VERSION_MAJOR (\d+)", text).group(1)
+ minor = re.search("VERSION_MINOR (\d+)", text).group(1)
+ patch = re.search("VERSION_PATCH (\d+)", text).group(1)
+ return major, minor, patch
+
+
+project = "Ecole"
+author = ", ".join(read_authors(PROJECT_DIR / "AUTHORS"))
+copyright = author
+version_major, version_minor, version_patch = read_version(PROJECT_DIR / "VERSION")
+version = f"{version_major}.{version_minor}"
+release = f"{version_major}.{version_minor}.{version_patch}"
+
+extensions = []
+
+# Show [source] link to source code
+extensions += ["sphinx.ext.viewcode"]
+
+# Test code sample in documentation
+extensions += ["sphinx.ext.doctest"]
+# Patching ecole.scip.Model.from_file and write_problem globally to be able to put fake paths
+# Also try import pyscipopt for disable test if it is not available
+doctest_global_setup = """
+import unittest.mock
+import ecole
+
+_generator = ecole.instance.SetCoverGenerator(n_rows=100, n_cols=200)
+_read_patcher = unittest.mock.patch("ecole.core.scip.Model.from_file", side_effect=_generator)
+_read_patcher.start()
+_write_patcher = unittest.mock.patch("ecole.core.scip.Model.write_problem")
+_write_patcher.start()
+
+try:
+ import pyscipopt
+except ImportError:
+ pyscipopt = None
+"""
+
+# Math setting
+extensions += ["sphinx.ext.mathjax"]
+
+# Code style
+pygments_style = "monokai"
+
+# Theme
+extensions += ["sphinx_rtd_theme"]
+html_theme = "sphinx_rtd_theme"
+html_context = {
+ "display_github": True,
+ "github_user": "ds4dm",
+ "github_repo": "ecole",
+ "github_version": "master", # For the edit on Github link
+ "conf_py_path": "/docs/", # For the edit on Github link
+}
+html_theme_options = {
+ "logo_only": True,
+}
+html_logo = "_static/images/ecole-logo-bare.png"
+html_favicon = "_static/favicon.ico"
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ["_static"]
+html_style = "css/custom.css"
+
+# Custom footer
+templates_path = ["_templates"]
+
+# Autodoc to read Python docstrings
+extensions += ["sphinx.ext.autodoc"]
+autodoc_default_options = {
+ "members": True, # Document all members
+ "special-members": "__init__", # Document these dunder methods
+ "undoc-members": True,
+}
+
+# Napoleon write docstrings in Numpy style
+extensions += ["sphinx.ext.napoleon"]
+napoleon_google_docstring = False
+napoleon_numpy_docstring = True
+
+
+# Preprocess docstring to remove "core" from type name
+def preprocess_signature(app, what, name, obj, options, signature, return_annotation):
+ if signature is not None:
+ signature = signature.replace(".core", "")
+ if return_annotation is not None:
+ return_annotation = return_annotation.replace(".core", "")
+ return signature, return_annotation
+
+
+def setup(app):
+ app.connect("autodoc-process-signature", preprocess_signature)
diff --git a/ecole/docs/contributing.rst b/ecole/docs/contributing.rst
new file mode 100644
index 0000000..ede6224
--- /dev/null
+++ b/ecole/docs/contributing.rst
@@ -0,0 +1,255 @@
+.. _contributing-reference:
+
+Contribution Guidelines
+=======================
+
+Thank you for your interest in contributing to Ecole! 🌟
+Contributions are more diverse than contributing new features.
+Improving the documentation, reporting and reproducing bugs, discussing the direction of Ecole in
+the discussions, helping others use Ecole.
+
+
+Contribution acceptance
+-----------------------
+Not all code contributions are relevant for Ecole.
+It does not mean that the idea is not good.
+We try to balance added value with maintenance and complexity of a growing codebase.
+For that reason, it is a good idea to communicate with the developpers first to be sure that we agree on
+what should be added/modified.
+
+.. important::
+
+ Be sure to open an issue before sending a pull request.
+
+
+Tour of the codebase
+--------------------
+- ``libecole`` is the Ecole C++ library.
+ Ecole is mostly written in C++ so this is where you will find most features, rewards, observations...
+- ``python`` contains some Python and C++ code to create bindings to Python.
+ Ecole uses `PyBind `_ to create the binding, these are all the C++ files
+ in this directory.
+ Sometimes, you may find Python code as well.
+ This is either because a feature is more naturally implemented in Python, or because we have accepted an early contribution
+ that is not yet ported to C++.
+- ``docs`` is the `Sphinx `_ documentation written in reStructuredText.
+- ``examples`` are practical examples showcasing how to use Ecole for certain tasks.
+
+
+Dependencies with Conda
+-----------------------
+All dependencies required for building Ecole (including SCIP) can be resolved using a
+`conda `_ environment.
+Install everything in a development environment (named ``ecole``) using
+
+.. code-block:: bash
+
+ conda env create -n ecole -f dev/conda.yaml
+
+.. code-block:: bash
+
+ conda activate ecole
+ conda config --append channels conda-forge
+ conda config --set channel_priority flexible
+
+.. note::
+
+ This environment contains tools to build ecole and scip, format code, test,
+ generate documentation etc. These are more than the dependencies to only use Ecole.
+
+
+Development script
+------------------
+To ease the burden or remembering the relation between commands, their default values *etc*., we
+provide a script, ``./dev/run.sh`` to run all commands.
+Contributors are still free to use the script commands manually.
+
+Full usage and options can be found
+
+.. code-block:: bash
+
+ ./dev/run.sh help
+
+
+.. important::
+
+ This script is meant for development and does not optimize Ecole for speed.
+ To install Ecole (including from source) see the :ref:`installation instructions`.
+
+Configure with CMake
+^^^^^^^^^^^^^^^^^^^^
+`CMake `_ is a meta-build tool, used for configuring other build tools
+(*e.g.* Make) or IDE's.
+The whole build of Ecole can be done with CMake.
+A one-time configuration is necessary for CMake to find dependencies, detect system
+information, *etc*.
+CMake is made available in the ``ecole`` environment created earlier.
+For the following, this environment always needs to be activated.
+
+In the Ecole source repository, configure using
+
+.. code-block:: bash
+
+ ./dev/run.sh configure -D ECOLE_DEVELOPER=ON
+
+.. note::
+
+ This is the time to pass optional build options, such as the build type and compiler
+ choice. For instance ``-D CMAKE_BUILD_TYPE=Debug`` can be added to compile with debug
+ information.
+
+The definition ``-D ECOLE_DEVELOPER=ON`` changes the default settings (such as the build
+type, *etc.*) for added convenience.
+Only the default settings are changed, this mode does not override any explicit setting.
+
+Building (Optional)
+^^^^^^^^^^^^^^^^^^^
+
+Ecole can be build with the following commands, although tests will (re)build Ecole automatically.
+
+.. code-block:: bash
+
+ ./dev/run.sh build-lib -- build-py
+
+.. important::
+
+ Be sure to eliminate all warnings. They will be considered as errors in the PR.
+
+Running the tests
+^^^^^^^^^^^^^^^^^
+
+The C++ tests are build with `Catch2 `_.
+
+.. code-block:: bash
+
+ ./dev/run.sh test-lib
+
+Python tests are build with `PyTest `_.
+By default, this will find Ecole inside the devlopement build tree.
+
+.. code-block:: bash
+
+ ./dev/run.sh test-py
+
+
+Documentation
+^^^^^^^^^^^^^
+The documentation is build with `Sphinx `_.
+It reads the docstrings from the Ecole package.
+
+.. code-block:: bash
+
+ ./dev/run.sh build-doc
+
+Additional test on the documentation can be run with
+
+.. code-block:: bash
+
+ ./dev/run.sh test-doc
+
+The generated HTML files are located under ``build/doc/html``.
+In particular, ``build/doc/html/index.html`` can be opened in your browser to visualize the
+documentation.
+
+
+Using the Ecole Python package
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+To use the Ecole Python package generated by the development script with
+
+.. code-block:: bash
+
+ ./dev/run.sh build-py
+
+one can set the `PYTHONPATH `_ environment variable,
+as it is done for running the tests. For instance:
+
+.. code-block:: bash
+
+ PYTHONPATH="${PWD}/build/cmake/python/ecole" python -m IPython
+
+This is useful to debug in `IPython `_ or `Jupyter `_, but anything more
+should rely on an :ref:`installation `.
+
+
+Coding standards
+----------------
+The quality and conventions of the code are enforced automatically with various tools, for instance
+to format the layout of the code and fix some C++ error-prone patterns.
+
+Compilation database
+^^^^^^^^^^^^^^^^^^^^
+Some C++ tools need access to a *compilation database*.
+This is a file called ``compile_commands.json`` that is created automatically by CMake and
+symlinked when configuring with ``./dev/run.sh configure``.
+Otherwise, you would need to manually symlink it to the root of the project.
+
+.. code-block:: bash
+
+ ln -s build/compile_commands.json
+
+.. tip::
+
+ This file is also read by `clangd `_, a C++ language server (already
+ installed in the conda environment).
+ To get code completion, compile errors, go-to-definition and more, you can install a language
+ server protocol plugin for your editor.
+
+Pre-commit
+^^^^^^^^^^
+The tools are configured to run with `pre-commit `_, that is they can be
+added to run automatically when making a commit, pushing, or on demand.
+To have the tools run automatically, install the pre-commit hooks using
+
+.. code-block:: bash
+
+ pre-commit install
+
+The tools are configured to run light tests only on the files that were changed during the commit,
+so they should not run for long.
+Installing the pre-commit hooks to run the tools is recommended.
+Similar tests will be run online and pull requests *will* fail if the tools have not been run.
+
+With ``pre-commit`` hooks, commits will be rejected by ``git`` if the tests ran by the tools fail.
+If the tools can fix the issue for you, you will find some modifications that you can add to
+your commit.
+
+Sometimes when working locally, it can be useful not to run the tools.
+You can tell ``git`` to ignore the ``pre-commit`` hooks by passing the ``--no-verify`` to any
+``git`` command making commit, including ``commit``, ``merge``, ``rebase``, ``push``...
+
+.. code-block:: bash
+
+ git commit --no-verify
+
+Pre-commit can also be run manually using
+
+.. code-block:: bash
+
+ ./dev/run.sh check-code
+
+
+Compiler issues
+---------------
+If you encounter problems with your compiler (because it is too old for instance),
+you can use the ones from ananconda.
+
+.. code-block:: bash
+
+ conda install -c conda-forge cxx-compiler
+
+And start again the configuration of Ecole.
+
+.. code-block:: bash
+
+ rm -rf build/ && ./dev/run.sh configure -D ECOLE_DEVELOPER=ON
+
+
+When things fail
+----------------
+If you cannot eliminate some warnings, code checks, errors, do not hesistate to ask questions in the
+`Github Discussions `_.
+
+.. important::
+
+ When you cannot figure things out, it's OK to send a failing pull request.
+ We wish to grow as a community, and help others improve, not exclude and belittle. 🌈
diff --git a/ecole/docs/developers/example-observation.rst b/ecole/docs/developers/example-observation.rst
new file mode 100644
index 0000000..5b695d7
--- /dev/null
+++ b/ecole/docs/developers/example-observation.rst
@@ -0,0 +1,65 @@
+Example: How to Contribute an Observation Function
+==================================================
+
+To contribute an observation (or reward) function, there are a few files to modify.
+For the purpose of example, let us call our observation `Cookie`.
+We recommend looking, at every step, to other observation functions as examples.
+
+.. note::
+ Be sure to read the :ref:`contribution guidelines ` to figure out how to get started and
+ running the tests.
+
+Create the Observation
+----------------------
+The C++ code is typically separated into `headers `_
+and source files.
+
+Headers care not compiled and should only contains the public
+`declaration `_
+or classes/functions signature (except for tempalted code).
+They should ``#include`` the minimal headers to be self contained.
+
+ - Create the header file ``libecole/include/ecole/observation/cookie.hpp``, and add the observation function declaration.
+
+Source files contain the definition of the functions, _i.e._ their implementation.
+
+ - Create the source file ``libecole/src/observation/cookie.cpp``,
+ - Add the inclusion of your header ``#include "ecole/observation/cookie.hpp``
+ - Add the definition of your observation function (you can also add helper functions/classes here),
+ - Explicitly add the source file in CMake, in ``libecole/CMakeLists.txt``.
+
+Test Your Code
+--------------
+Tests are not part of a library, so they only need a source file.
+
+ - Create the test file ``libecole/tests/src/observation/test-cookie.cpp``,
+ - Add unit tests to ensure the observation function abides to the required interface,
+ - Add functional tests to ensure the observation function is correct,
+ - Explicitly add the test file in CMake, in ``libecole/tests/CMakeLists.txt``.
+
+
+Bind Code to Python
+-------------------
+To expose the code in Python, we are using `PyBind `_ directly from C++.
+
+ - Edit ``python/src/ecole/core/observation.cpp``, and bind the class using ``py::class_``,
+ - Add the docstring.
+
+.. warning::
+ Due to some discrepencies between C++ and Python, not all bindings are straightforward.
+ More complex types need to be handled on a case-by-case basis.
+
+Test the Bindings
+-----------------
+We need to make sure nothing is forgotten or raises runtime errors when used from Python.
+
+ - Edit ``python/tests/test_observation.py``, test the interface, and the return types.
+
+Reference the Observation in the Documentation
+----------------------------------------------
+Documentation from docstring is automatically read by Sphinx, so we only need to tell it where to display it.
+
+ - Add the observation function in the list in ``docs/reference/observation.rst``.
+
+.. note::
+ Remember to run the tests and code checks before pushing.
diff --git a/ecole/docs/discussion/gym-differences.rst b/ecole/docs/discussion/gym-differences.rst
new file mode 100644
index 0000000..fe20d32
--- /dev/null
+++ b/ecole/docs/discussion/gym-differences.rst
@@ -0,0 +1,88 @@
+Differences with OpenAI Gym
+===========================
+
+Changing reward and observations
+--------------------------------
+Contrarily to `OpenAI Gym `_ where learning tasks are predefined,
+Ecole gives the user the tools to easily extend and customize environments.
+This is because the objective with Ecole is not only to provide a collection of challenges
+for machine learning, but really to solve combinatorial optimization problems more
+efficiently.
+If different data or tweaking the control task delivers better performance, it is an improvement!
+This is why Ecole let users change the environment reward and observation using
+:py:class:`~ecole.typing.RewardFunction` and :py:class:`~ecole.typing.ObservationFunction`.
+
+Parameter to reset
+------------------
+In OpenAI Gym, ``reset`` does not take parameters, whereas Ecole
+:py:meth:`~ecole.environment.Environment.reset` takes a problem instance as a mandatory
+input.
+This is because when doing machine learning for optimization, there is no practical interest in
+solving the same problem over and over again.
+What is important is that the machine learning model is able to generalize to unseen problems.
+This is typically done by training on mutliple problem instances.
+
+This setting is similar to multi-task reinforcement learning, where each problem instance is a task
+and one aims to generalize to unseen tasks.
+An alternative way to implement this is found in `MetaWorld `_,
+where instead of passing the task as a parameter to ``reset``, an supplementary ``set_task`` method
+is defined in the environment.
+
+Done on reset
+-------------
+In Ecole, :py:meth:`~ecole.environment.Environment.reset` returns the same ``done`` flag as
+in :py:meth:`~ecole.environment.Environment.step`.
+This is because nothing prevents an initial state from also being a terminal one.
+It is not only a theoretical consideration: for instance, in :py:class:`~ecole.environment.Branching`,
+the initial state would typically be on the root node, prior to making the first branching decision.
+However, modern solvers have powerful presolvers, and it is not uncommon that the solution to the
+problem is found without needing to branch on any variable.
+
+Action set
+----------
+Ecole defines an action set at every transition of the environment, while OpenAI Gym defines an
+``action_space`` as a static variable of the environment.
+Ecole environments are more complex: for instance in :py:class:`~ecole.environment.Branching`
+the set of valid actions changes, not only with every episode, but also with every transition!
+The ``action_set`` is required to make the next call to
+:py:meth:`~ecole.environment.Environment.step`.
+We chose to add it as a return type to :py:meth:`~ecole.environment.Environment.step` and
+:py:meth:`~ecole.environment.Environment.reset` to emphasize this difference.
+
+Reward offset
+-------------
+In :py:meth:`~ecole.environment.Environment.reset` a ``reward_offset`` is returned.
+This is not only a difference with OpenAI Gym, but also with the MDP formulation.
+Its purpose is not to provide additional input to the learning algorithms, but rather to help
+researchers better benchmark the resulting performance.
+Indeed, :py:class:`~ecole.typing.RewardFunction` are often designed so that their cumulative sum match a
+metric on the terminal state, such as solving time or number of LP iterations: this is because final metrics
+are often all that matter.
+However, for learning, a single reward on the terminal state is hard to learn from.
+It is then divided over all intermediate transitions in the episode.
+
+Rather than providing a different mean of evaluating such metrics, we chose to reuse the
+environments to compute the cummulative sum, and therfore need the ``reward_offset`` to exactly
+match the metric.
+
+No observation on terminal states
+---------------------------------
+On terminal states, in OpenAI Gym as in Ecole, no further action can be taken and the environment
+needs to be :py:meth:`~ecole.environment.Environment.reset`. In Ecole, when an episode is over (that is, when
+the ``done`` flag is ``True``), environments always return ``None`` as the observation. This is in contrast with OpenAI Gym,
+where some environments do return observations on terminal states.
+
+
+This can be explained as follows: most of the time, a terminal state in Ecole is a solved problem.
+This means that some complex observations cannot be extracted because they require information that
+simply does not exist.
+For instance, the :py:class:`~ecole.observation.NodeBipartite` observation function extracts some
+information about the LP solution of the current branch-and-bound node.
+When the problem is solved, for example on a terminal state of the
+:py:class:`~ecole.environment.Branching` environment, there might not be a current node, or a linear
+relaxation problem, from which this information can be extracted. For these reasons, one would find a
+``None`` instead of an observation on terminal states.
+
+In any case, one might note that in reinforcement learning, the observation of a terminal state is usually not very useful.
+It is not given to a policy to take the next action (because there are not any), and hence never
+used for learning either, so not returning a final observation has no impact in practice.
diff --git a/ecole/docs/discussion/seeding.rst b/ecole/docs/discussion/seeding.rst
new file mode 100644
index 0000000..9985aae
--- /dev/null
+++ b/ecole/docs/discussion/seeding.rst
@@ -0,0 +1,85 @@
+.. _seeding-discussion:
+
+Seeding
+=======
+Ecole empowers researchers to learn reliable machine learning models, and that means not overfitting
+on insignificant behaviours of the solver.
+One such aspect is the solver randomness, which is controlled by its random seed.
+
+This means that, by default, Ecole environment will generate different episodes (and in
+particular different initial states) after each new call to
+:py:meth:`~ecole.environment.Environment.reset`.
+To do so, the environment keeps a :py:class:`~ecole.RandomGenerator` (random state)
+between episodes, and start a new episode by calling
+:py:meth:`~ecole.typing.Dynamics.set_dynamics_random_state` on the underlying
+:py:class:`~ecole.typing.Dynamics`.
+The latter set random elements of the state including, but not necessary limited to, the
+:py:class:`~ecole.scip.Model` random seed, by consuming random numbers from the
+:py:class:`~ecole.environment.RandomeGenerator`.
+That way, the :py:class:`~ecole.environment.Environment` can avoid generating identical
+episodes while letting :py:class:`~ecole.typing.Dynamics` decide what random parameters need to
+be set.
+
+The :py:meth:`~ecole.environment.Environment.seed` method is really one of the environment,
+because it seeds the :py:class:`~ecole.RandomGenerator`, not direclty the episode for
+the :py:class:`~ecole.typing.Dynamics`.
+
+When not explicitly seeded, :py:class:`~ecole.typing.Environment` use a :py:class:`~ecole.RandomGenerator` derived
+from Ecole's global source of randomness by invoking :py:func:`ecole.spawn_random_generator`.
+By default this source is truly random, but it can be controlled with :py:func:`ecole.seed`.
+
+Similarily, an :py:class:`~ecole.typing.InstanceGenerator` default random generator derived from Ecole global source of
+randomness.
+
+As examples, we provide the following snippets.
+
+Reproducible program
+--------------------
+Running this program again will give the same outcome.
+
+.. testcode::
+
+ import ecole
+
+ ecole.seed(754)
+
+ env = ecole.environment.Branching()
+
+ for _ in range(10):
+ observation, action_set, reward_offset, done, info = env.reset("path/to/problem")
+ while not done:
+ obs, action_set, reward, done, info = env.step(action_set[0])
+
+
+Reproducible environments
+-------------------------
+Creating this envionment with same seed anywhere else will give the same outcome.
+
+.. testcode::
+
+ import ecole
+
+ env = ecole.environment.Branching()
+ env.seed(8462)
+
+ for _ in range(10):
+ observation, action_set, reward_offset, done, info = env.reset("path/to/problem")
+ while not done:
+ obs, action_set, reward, done, info = env.step(action_set[0])
+
+
+Reproducible episode
+--------------------
+All episodes run in this snippet are identical.
+
+.. testcode::
+
+ import ecole
+
+ env = ecole.environment.Branching()
+
+ for _ in range(10):
+ env.seed(81)
+ observation, action_set, reward_offset, done, info = env.reset("path/to/problem")
+ while not done:
+ obs, action_set, reward, done, info = env.step(action_set[0])
diff --git a/ecole/docs/discussion/theory.rst b/ecole/docs/discussion/theory.rst
new file mode 100644
index 0000000..cc0eb20
--- /dev/null
+++ b/ecole/docs/discussion/theory.rst
@@ -0,0 +1,208 @@
+.. _theory:
+
+Ecole Theoretical Model
+=======================
+
+The Ecole elements directly correspond to the different elements of
+an episodic `partially-observable Markov decision process `_
+(PO-MDP).
+
+Markov Decision Process
+-----------------------
+Consider a regular Markov decision process
+:math:`(\mathcal{S}, \mathcal{A}, p_\textit{init}, p_\textit{trans}, R)`,
+whose components are
+
+* a state space :math:`\mathcal{S}`
+* an action space :math:`\mathcal{A}`
+* an initial state distribution :math:`p_\textit{init}: \mathcal{S} \to \mathbb{R}_{\geq 0}`
+* a state transition distribution
+ :math:`p_\textit{trans}: \mathcal{S} \times \mathcal{A} \times \mathcal{S} \to \mathbb{R}_{\geq 0}`
+* a reward function :math:`R: \mathcal{S} \to \mathbb{R}`.
+
+.. note::
+
+ Having deterministic rewards :math:`r_t = R(s_t)` is an arbitrary choice
+ here, in order to best fit the Ecole library. It is not restrictive though,
+ as any MDP with stochastic rewards
+ :math:`r_t \sim p_\textit{reward}(r_t|s_{t-1},a_{t-1},s_{t})`
+ can be converted into an equivalent MDP with deterministic ones,
+ by considering the reward as part of the state.
+
+Together with an action policy
+
+.. math::
+
+ \pi: \mathcal{A} \times \mathcal{S} \to \mathbb{R}_{\geq 0}
+
+such that :math:`a_t \sim \pi(a_t|s_t)`, an MDP can be unrolled to produce
+state-action trajectories
+
+.. math::
+
+ \tau=(s_0,a_0,s_1,\dots)
+
+that obey the following joint distribution
+
+.. math::
+
+ \tau \sim \underbrace{p_\textit{init}(s_0)}_{\text{initial state}}
+ \prod_{t=0}^\infty \underbrace{\pi(a_t | s_t)}_{\text{next action}}
+ \underbrace{p_\textit{trans}(s_{t+1} | a_t, s_t)}_{\text{next state}}
+ \text{.}
+
+The MDP Control Problem
+^^^^^^^^^^^^^^^^^^^^^^^
+We define the MDP control problem as that of finding a policy
+:math:`\pi^\star` which is optimal with respect to the expected total
+reward,
+
+.. math::
+ :label: mdp_control
+
+ \pi^\star = \underset{\pi}{\operatorname{arg\,max}}
+ \lim_{T \to \infty} \mathbb{E}_\tau\left[\sum_{t=0}^{T} r_t\right]
+ \text{,}
+
+where :math:`r_t := R(s_t)`.
+
+.. note::
+
+ In the general case this quantity may not be bounded, for example for MDPs
+ corresponding to *continuing* tasks where episode length may be infinite.
+ In Ecole, we guarantee that all environments correspond to *episodic*
+ tasks, that is, each episode is guaranteed to end in a terminal state.
+ This can be modeled by introducing a null state :math:`s_\textit{null}`,
+ such that
+
+ * :math:`s_\textit{null}` is absorbing, i.e., :math:`p_\textit{trans}(s_{t+1}|a_t,s_t=s_\textit{null}) := \delta_{s_\textit{null}}(s_{t+1})`
+ * :math:`s_\textit{null}` yields no reward, i.e., :math:`R(s_\textit{null}) := 0`
+ * a state :math:`s` is terminal :math:`\iff` it transitions
+ into the null state with probability one, i.e., :math:`p_\textit{trans}(s_{t+1}|a_t,s_t=s) := \delta_{s_\textit{null}}(s_{t+1})`
+
+ As such, all actions and states encountered after a terminal state
+ can be safely ignored in the MDP control problem.
+
+Partially-Observable Markov Decision Process
+--------------------------------------------
+In the PO-MDP setting, complete information about the current MDP state
+is not necessarily available to the decision-maker. Instead,
+at each step only a partial observation :math:`o \in \Omega`
+is made available, which can be seen as the result of applying an observation
+function :math:`O: \mathcal{S} \to \Omega` to the current state. As such, a
+PO-MDP consists of a tuple
+:math:`(\mathcal{S}, \mathcal{A}, p_\textit{init}, p_\textit{trans}, R, O)`.
+
+.. note::
+
+ Similarly to having deterministic rewards, having deterministic
+ observations is an arbitrary choice here, but is not restrictive.
+
+As a result, PO-MDP trajectories take the form
+
+.. math::
+
+ \tau=(o_0,r_0,a_0,o_1\dots)
+ \text{,}
+
+where :math:`o_t:= O(s_t)` and :math:`r_t:=R(s_t)` are respectively the
+observation and the reward collected at time step :math:`t`.
+
+Let us now introduce a convenience variable
+:math:`h_t:=(o_0,r_0,a_0,\dots,o_t,r_t)\in\mathcal{H}` that represents the
+PO-MDP history at time step :math:`t`. Due to the non-Markovian nature of
+the trajectories, that is,
+
+.. math::
+
+ o_{t+1},r_{t+1} \mathop{\rlap{\perp}\mkern2mu{\not\perp}} h_{t-1} \mid o_t,r_t,a_t
+ \text{,}
+
+the decision-maker must take into account the whole history of observations,
+rewards and actions in order to decide on an optimal action at current time
+step :math:`t`. PO-MDP policies then take the form
+
+.. math::
+
+ \pi:\mathcal{A} \times \mathcal{H} \to \mathbb{R}_{\geq 0}
+
+such that :math:`a_t \sim \pi(a_t|h_t)`.
+
+The PO-MDP Control Problem
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+The PO-MDP control problem can then be written identically to the MDP one,
+
+.. math::
+ :label: pomdp_control
+
+ \pi^\star = \underset{\pi}{\operatorname{arg\,max}} \lim_{T \to \infty}
+ \mathbb{E}_\tau\left[\sum_{t=0}^{T} r_t\right]
+ \text{.}
+
+Ecole as PO-MDP Elements
+------------------------
+
+The following Ecole elements directly translate into PO-MDP elements from
+the aforementioned formulation:
+
+* :py:class:`~ecole.typing.RewardFunction` <=> :math:`R`
+* :py:class:`~ecole.typing.ObservationFunction` <=> :math:`O`
+* :py:meth:`~ecole.typing.Dynamics.reset_dynamics` <=>
+ :math:`p_\textit{init}(s_0)`
+* :py:meth:`~ecole.typing.Dynamics.step_dynamics` <=>
+ :math:`p_\textit{trans}(s_{t+1}|s_t,a_t)`
+
+The state space :math:`\mathcal{S}` can be considered to be the whole computer
+memory occupied by the environment, which includes the state of the underlying
+SCIP solver instance. The action space :math:`\mathcal{A}` is specific to each
+environment.
+
+.. note::
+
+ In practice, both :py:class:`~ecole.typing.RewardFunction` and
+ :py:class:`~ecole.typing.ObservationFunction` are implemented as stateful
+ classes, and therefore should be considered part of the MDP state
+ :math:`s`. This *extended* state is not meant to take part in the MDP
+ dynamics per se, but nonetheless has to be considered as the actual
+ PO-MDP state, in order to allow for a strict interpretation of Ecole
+ environments as PO-MDPs.
+
+The :py:class:`~ecole.environment.Environment` class wraps all of
+those components together to form the actual PO-MDP. Its API can be
+interpreted as follows:
+
+* :py:meth:`~ecole.environment.Environment.reset` <=>
+ :math:`s_0 \sim p_\textit{init}(s_0), r_0=R(s_0), o_0=O(s_0)`
+* :py:meth:`~ecole.environment.Environment.step` <=>
+ :math:`s_{t+1} \sim p_\textit{trans}(s_{t+1}|a_t,s_t), r_t=R(s_t), o_t=O(s_t)`
+* ``done == True`` <=> the current state :math:`s_{t}` is terminal. As such,
+ the episode ends now.
+
+.. note::
+
+ In Ecole we allow environments to optionally specify a set of valid
+ actions at each time step :math:`t`. To this end, both the
+ :py:meth:`~ecole.environment.Environment.reset` and
+ :py:meth:`~ecole.environment.Environment.step` methods return
+ the valid ``action_set`` for the next transition, in addition to the
+ current observation and reward. This action set is optional, and
+ environments in which the action set is implicit may simply return
+ ``action_set == None``.
+
+Implementation of both the PO-MDP policy :math:`\pi(a_t|h_t)` and a method
+to solve the resulting control problem :eq:`pomdp_control` is left to the
+user.
+
+.. note::
+
+ As can be seen from :eq:`mdp_control` and :eq:`pomdp_control`, the initial
+ reward :math:`r_0` returned by
+ :py:meth:`~ecole.environment.Environment.reset`
+ does not affect the control problem. In Ecole we
+ nevertheless chose to preserve this initial reward, in order to obtain
+ meaningful cumulated episode rewards, such as the total running time
+ (which must include the time spend in
+ :py:meth:`~ecole.environment.Environment.reset`), or the total
+ number of branch-and-bound nodes in a
+ :py:class:`~ecole.environment.Branching` environment (which must include
+ the root node).
diff --git a/ecole/docs/howto/create-environments.rst b/ecole/docs/howto/create-environments.rst
new file mode 100644
index 0000000..5377ae1
--- /dev/null
+++ b/ecole/docs/howto/create-environments.rst
@@ -0,0 +1,230 @@
+.. _create-new-environment:
+
+Create New Environments
+=======================
+
+Environment Structure
+---------------------
+In Ecole, it is possible to customize the :ref:`reward` or
+:ref:`observation` returned by the environment. These components are structured in
+:py:class:`~ecole.typing.RewardFunction` and :py:class:`~ecole.typing.ObservationFunction` classes that are
+independent from the rest of the environment. We call what is left, that is, the environment without rewards
+or observations, the environment's :py:class:`~ecole.typing.Dynamics`.
+In other words, the dynamics define the bare bone transitions of the Markov Decision Process.
+
+Dynamics have an interface similar to environments, but with different input parameters and return
+types.
+In fact environments are wrappers around dynamics classes that drive the following orchestration:
+
+* Environments store the state as a :py:class:`~ecole.scip.Model`;
+* Then, they forward the :py:class:`~ecole.scip.Model` to the :py:class:`~ecole.typing.Dynamics` to start a new
+ episode or transition to receive an action set;
+* Next, they forward the :py:class:`~ecole.scip.Model` to the :py:class:`~ecole.typing.RewardFunction` and
+ :py:class:`~ecole.typing.ObservationFunction` to receive an observation and reward;
+* Finally, return everything to the user.
+
+One susbtantial difference between the environment and the dynamics is the seeding behavior.
+Given that this is not an easy topic, it is discussed in :ref:`seeding-discussion`.
+
+Creating Dynamics
+-----------------
+
+Reset and Step
+^^^^^^^^^^^^^^
+Creating dynamics is very similar to :ref:`creating reward and observation functions`.
+It can be done from scratch or by inheriting an existing one.
+The following examples show how we can inherit a :py:class:`~ecole.dynamics.BranchingDynamics` class to
+deactivate cutting planes and presolving in SCIP.
+
+.. note::
+
+ One can also more directly deactivate SCIP parameters through the
+ :ref:`environment constructor`.
+
+Given that there is a large number of parameters to change, we want to use one of SCIP default's modes
+by calling ``SCIPsetPresolving`` and ``SCIPsetSeparating`` through PyScipOpt
+(`SCIP doc `_).
+
+We will do so by overriding :py:meth:`~ecole.dynamics.BranchingDynamics.reset_dynamics`, which
+gets called by :py:meth:`~ecole.environment.Environment.reset`.
+The similar method :py:meth:`~ecole.dynamics.BranchingDynamics.step_dynamics`, which is called
+by :py:meth:`~ecole.environment.Environment.step`, does not need to be changed in this
+example, so we do not override it.
+
+.. testcode::
+ :skipif: pyscipopt is None
+
+ import ecole
+ from pyscipopt.scip import PY_SCIP_PARAMSETTING
+
+
+ class SimpleBranchingDynamics(ecole.dynamics.BranchingDynamics):
+ def reset_dynamics(self, model):
+ # Share memory with Ecole model
+ pyscipopt_model = model.as_pyscipopt()
+
+ pyscipopt_model.setPresolve(PY_SCIP_PARAMSETTING.OFF)
+ pyscipopt_model.setSeparating(PY_SCIP_PARAMSETTING.OFF)
+
+ # Let the parent class get the model at the root node and return
+ # the done flag / action_set
+ return super().reset_dynamics(model)
+
+
+With our ``SimpleBranchingDynamics`` class we have defined what we want the solver to do.
+Now, to use it as a full environment that can manage observations and rewards, we wrap it in an
+:py:class:`~ecole.environment.Environment`.
+
+
+.. testcode::
+ :skipif: pyscipopt is None
+
+ class SimpleBranching(ecole.environment.Environment):
+ __Dynamics__ = SimpleBranchingDynamics
+
+
+The resulting ``SimpleBranching`` class is then an environment as valid as any other in Ecole.
+
+Passing parameters
+^^^^^^^^^^^^^^^^^^
+We can make the previous example more flexible by deciding what we want to disable.
+To do so, we will take parameters in the constructor.
+
+.. testcode::
+ :skipif: pyscipopt is None
+
+ class SimpleBranchingDynamics(ecole.dynamics.BranchingDynamics):
+ def __init__(self, disable_presolve=True, disable_cuts=True, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.disable_presolve = disable_presolve
+ self.disable_cuts = disable_cuts
+
+ def reset_dynamics(self, model):
+ # Share memory with Ecole model
+ pyscipopt_model = model.as_pyscipopt()
+
+ if self.disable_presolve:
+ pyscipopt_model.setPresolve(PY_SCIP_PARAMSETTING.OFF)
+ if self.disable_cuts:
+ pyscipopt_model.setSeparating(PY_SCIP_PARAMSETTING.OFF)
+
+ # Let the parent class get the model at the root node and return
+ # the done flag / action_set
+ return super().reset_dynamics(model)
+
+
+ class SimpleBranching(ecole.environment.Environment):
+ __Dynamics__ = SimpleBranchingDynamics
+
+
+The constructor arguments are forwarded from the :py:meth:`~ecole.environment.Environment.__init__` constructor:
+
+.. testcode::
+ :skipif: pyscipopt is None
+
+ env = SimpleBranching(observation_function=None, disable_cuts=False)
+
+Similarily, extra arguments given to the environemnt :py:meth:`~ecole.environment.Environment.reset` and
+:py:meth:`~ecole.environment.Environment.step` are forwarded to the associated
+:py:class:`~ecole.typing.Dynamics` methods.
+
+Using Control Inversion
+-----------------------
+When using a traditional SCIP callback, the user has to add the callback to SCIP, call ``SCIPsolve``, and wait for the
+solving process to terminate.
+We say that *SCIP has the control*.
+This has some downsides, such a having to forward all the data the agent will use to the callback, making it harder to
+stop the solving process, and reduce interactivity.
+For instance when using a callback in a notebook, if the user forgot to fetch some data, then they have to re-execute
+the whole solving process.
+
+On the contrary, when using an Ecole environment such as :py:class:`~ecole.environment.Branching`, the environment
+pauses on every branch-and-bound node (*i.e.* every branchrule callback call) to let the user make a decision,
+or inspect the :py:class:`~ecole.scip.Model`.
+We say that the *user (or the agent) has the control*.
+To do so, we did not reconstruct the solving algorithm ``SCIPsolve`` to fit our needs.
+Rather, we have implemented a general *inversion of control* mechanism to let SCIP pause and be resumed on every
+callback call (using a form of *stackful coroutine*).
+We call this approach *iterative solving* and it runs exactly the same ``SCIPsolve`` algorithm, without noticable
+overhead, while perfectly forwarding all information available in the callback.
+
+To use this tool, the user start by calling :py:meth:`ecole.scip.Model.solve_iter`, with a set of call callback
+constructor arguments.
+Iterative solving will then add these callbacks, start solving, and return the first time that one of these callback
+is executed.
+The return value describes where the solving has stopped, and the parameters of the callback where it has stopped.
+This is the time for the user to perform whichever action they would have done in the callback.
+Solving can be resumed by calling :py:meth:`ecole.scip.Model.solve_iter_continue` with the
+:py:class:`ecole.scip.callback.Result` that would have been set in the callback.
+Solving is finished when one of the iterative solving function returns ``None``.
+The :py:class:`ecole.scip.Model` can safely be deleted an any time (SCIP termination is handled automatically).
+
+For instance, iterative solving an environement while pausing on branchrule and heuristic callbacks look like the
+following.
+
+.. testcode::
+
+ model = ecole.scip.Model.from_file("path/to/file")
+
+ # Start solving until the first pause, if any.
+ fcall = model.solve_iter(
+ # Stop on branchrule callback.
+ ecole.scip.callback.BranchruleConstructor(),
+ # Stop on heuristic callback after node.
+ ecole.scip.callback.HeuristicConstructor(timing_mask=ecole.scip.HeurTiming.AfterNode),
+ )
+ # While solving is not finished, `fcall` contains information about the current stop.
+ while fcall is not None:
+ # Solving stopped on a branchrule callback.
+ if isinstance(fcall, ecole.scip.callback.BranchruleCall):
+ # Perform some branching (through PyScipOpt).
+ ...
+ # Resume solving until next pause.
+ fcall = model.solve_iter_continue(ecole.scip.callback.Result.Branched)
+ # Solving stopped on a heurisitc callback.
+ elif isinstance(fcall, ecole.scip.callback.HeuristicCall):
+ # Return as no heuristic was performed (only data collection)
+ fcall = model.solve_iter_continue(ecole.scip.callback.Result.DidNotRun)
+
+See :py:class:`~ecole.scip.callback.BranchruleConstructor`, :py:class:`~ecole.scip.callback.HeuristicConstructor` for
+callback constructor parameters, as well as :py:class:`~ecole.scip.callback.BranchruleCall` and
+:py:class:`~ecole.scip.callback.BranchruleCall` for callbacks functions parameters passed by SCIP to the callback
+methods.
+
+.. note::
+
+ By default callback parameters such as ``priority``, ``frequency``, and ``max_depth`` taht control how when
+ the callback are evaluated by SCIP are set to run as often as possible.
+ However, it is entirely possible to run it with lower priority or frequency for create specific environments or
+ whatever other purpose.
+
+To create dynamics using iterative solving, one should call :py:meth:`ecole.scip.Model.solve_iter` in
+:py:meth:`~ecole.typing.Dynamics.reset_dynamics` and :py:meth:`ecole.scip.Model.solve_iter_continue` in
+:py:meth:`~ecole.typing.Dynamics.step_dynamics`.
+For instance, a branching environment could be created with the following dynamics.
+
+.. testcode::
+ :skipif: pyscipopt is None
+
+ class MyBranchingDynamics:
+ def __init__(self, pseudo_candidates=False, max_depth=ecole.scip.callback.max_depth_none):
+ self.pseudo_candidates = pseudo_candidates
+ self.max_depth = max_depth
+
+ def action_set(self, model):
+ if self.pseudo_candidates:
+ return model.as_pyscipopt().getPseudoBranchCands()
+ else:
+ return model.as_pyscipopt().getLPBranchCands()
+ return ...
+
+ def reset_dynamics(self, model):
+ fcall = model.solve_iter(
+ ecole.scip.callback.BranchruleConstructor(max_depth=self.max_depth)
+ )
+ return (fcall is None), self.action_set(model)
+
+ def step_dynamics(self, model, action):
+ model.as_pyscipopt().branchVar(action)
+ fcall = model.solve_iter_continue(ecole.scip.callback.Result.Branched)
+ return (fcall is None), self.action_set(model)
diff --git a/ecole/docs/howto/create-functions.rst b/ecole/docs/howto/create-functions.rst
new file mode 100644
index 0000000..f161ef0
--- /dev/null
+++ b/ecole/docs/howto/create-functions.rst
@@ -0,0 +1,160 @@
+.. _create-new-functions:
+
+Create New Functions
+====================
+
+:py:class:`~ecole.typing.ObservationFunction` and :py:class:`~ecole.typing.RewardFunction` functions
+can be adapted and created from Python.
+
+At the core of the environment, a SCIP :py:class:`~ecole.scip.Model` (equivalent abstraction to a
+``pyscipopt.Model`` or a ``SCIP*`` in ``C``), describes the state of the environment.
+The idea of observation and reward functions is to have a function that takes as input a
+:py:class:`~ecole.scip.Model`, and returns the desired value (an observation, or a reward).
+The environment itself does nothing more than calling the functions and forward their output to the
+user.
+
+Pratically speaking, it is more convenient to implement such functions as a class than a function,
+as it makes it easier to keep information between states.
+
+Extending a Function
+--------------------
+To reuse a function, Python inheritance can be used. For example, the method in an observation function called
+to extract the features from the model is called :py:meth:`~ecole.typing.ObservationFunction.extract`.
+In the following example, we will extend the :py:class:`~ecole.observation.NodeBipartite` observation function by
+overloading its :py:meth:`~ecole.typing.ObservationFunction.extract` function to scale the features by their
+maximum absolute value.
+
+.. testcode::
+
+ import numpy as np
+ from ecole.observation import NodeBipartite
+
+
+ class ScaledNodeBipartite(NodeBipartite):
+ def extract(self, model, done):
+ # Call parent method to get the original observation
+ obs = super().extract(model, done)
+ # Apply scaling
+ column_max_abs = np.abs(obs.column_features).max(0)
+ obs.column_features[:] /= column_max_abs
+ row_max_abs = np.abs(obs.row_features).max(0)
+ obs.row_features[:] /= row_max_abs
+ # Return the updated observation
+ return obs
+
+By using inheritance, we used :py:class:`~ecole.observation.NodeBipartite`'s own :py:meth:`~ecole.typing.ObservationFunction.extract`
+to do the heavy lifting, only appending the additional scaling code.
+The resulting ``ScaledNodeBipartite`` class is a perfectly valid observation function that can be given to an
+environment.
+
+As an additional example, instead of scaling by the maximum absolute value one might want to use a scaling factor smoothed by
+`exponential moving averaging `_, with some coefficient α.
+This will illustrate how the class paradigm is useful to saving information between states.
+
+.. testcode::
+
+ class MovingScaledNodeBipartite(NodeBipartite):
+ def __init__(self, alpha, *args, **kwargs):
+ # Construct parent class with other parameters
+ super().__init__(*args, **kwargs)
+ self.alpha = alpha
+
+ def before_reset(self, model):
+ super().before_reset(model)
+ # Reset the exponential moving average (ema) on new episodes
+ self.column_ema = None
+ self.row_ema = None
+
+ def extract(self, model, done):
+ obs = super().extract(model, done)
+
+ # Compute the max absolute vector for the current observation
+ column_max_abs = np.abs(obs.column_features).max(0)
+ row_max_abs = np.abs(obs.row_features).max(0)
+
+ if self.column_ema is None:
+ # New exponential moving average on a new episode
+ self.column_ema = column_max_abs
+ self.row_ema = row_max_abs
+ else:
+ # Update the exponential moving average
+ self.column_ema = self.alpha * column_max_abs + (1 - alpha) * self.column_ema
+ self.row_ema = self.alpha * row_max_abs + (1 - alpha) * self.row_ema
+
+ # Scale features and return the new observation
+ obs.column_features[:] /= self.column_ema
+ obs.row_features[:] /= self.row_ema
+ return obs
+
+Here, you can notice how we used the constructor to customize the coefficient of the
+exponential moving average.
+Note also that we inherited the :py:meth:`~ecole.typing.ObservationFunction.before_reset` method which does not
+return anything: this method is called at the begining of the episode by
+:py:meth:`~ecole.environment.Environment.reset` and is used to reintialize the class
+internal attribute on new episodes.
+Finally, the :py:meth:`~ecole.typing.ObservationFunction.extract` is also called during during
+:py:meth:`~ecole.environment.Environment.reset`, hence the ``if`` else ``else`` condition.
+Both these methods call the parent method to let it do its own initialization/resetting.
+
+.. warning::
+
+ The scaling shown in this example is naive implementation meant to showcase the use of
+ observation function.
+ For proper scaling functions consider `Scikit-Learn Scalers
+ `_
+
+
+Writing a Function from Scratch
+-------------------------------
+The :py:class:`~ecole.typing.ObservationFunction` and :py:class:`~ecole.typing.RewardFunction` classes don't do
+anything more than what is explained in the previous section.
+This means that to create new function in Python, one can simply create a class with the previous
+methods.
+
+For instance, we can create a ``StochasticReward`` function that will wrap any given
+:py:class:`~ecole.typing.RewardFunction`, and with some probability return either the given reward or
+0.
+
+.. testcode::
+
+ import random
+
+
+ class StochasticReward:
+ def __init__(self, reward_function, probability=0.05):
+ self.reward_function = reward_function
+ self.probability = probability
+
+ def before_reset(self, model):
+ self.reward_function.before_reset(model)
+
+ def extract(self, model, done):
+ # Unconditionally getting reward as reward_funcition.extract may have side effects
+ reward = self.reward_function.extract(model, done)
+ if random.random() < probability:
+ return 0.0
+ else:
+ return reward
+
+The resulting class is a perfectly valid reward function which can be used in any environment, for example as follows.
+
+.. doctest::
+
+ >> stochastic_lpiterations = StochaticReward(-ecole.reward.LpIteration, probability=0.1)
+ >> env = ecole.environment.Branching(reward_function=stochastic_lpiterations)
+
+
+Using PySCIPOpt
+---------------
+The extraction functions described on this page, by definition, aim to extract information from the solver about the state
+of the process. An excellent reason to create or extend a reward function is to access information not provided by the
+default functions in Ecole. To do so in Python, one might want to use `PyScipOpt `_,
+the official Python interface to SCIP.
+
+In ``PySCIPOpt`, the state of the SCIP solver is stored in an ``pyscipopt.Model`` object. This is closely related to,
+but not quite the same, as Ecole's :py:class:`~ecole.scip.Model` class. For a number of reasons (such as C++ compatibility),
+the two classes don't coincide. However, for ease of use, it is possible to convert back and forth without any copy.
+
+Using :py:meth:`ecole.scip.Model.as_pyscipopt`, one can get a ``pyscipopt.Model`` that shares its
+internal data with :py:class:`ecole.scip.Model`. Conversely, given a ``pyscipopt.Model``, it is possible to to create a :py:class:`ecole.scip.Model`
+using the static method :py:meth:`ecole.scip.Model.from_pyscipopt`.
diff --git a/ecole/docs/howto/instances.rst b/ecole/docs/howto/instances.rst
new file mode 100644
index 0000000..75f3d33
--- /dev/null
+++ b/ecole/docs/howto/instances.rst
@@ -0,0 +1,171 @@
+.. _generate-instances:
+
+Generate Problem Instances
+==========================
+
+Ecole contains a number of combinatorial optimization instance generators in the``ecole.instance`` module. The various
+:py:class:`~ecole.typing.InstanceGenerator` classes generate instances as :py:class:`ecole.scip.Model` objects.
+
+To use those classes to generate instances, you first instantiate a generator object from the desired class. The various
+generator classes take problem-specific hyperparameters as constructor arguments, which can be used to control the type
+of instances being generated. The resulting :py:class:`~ecole.typing.InstanceGenerator` objects are infinite `Python
+iterators `_, which can then be iterated over to yield as many instances as desired.
+
+For instance, to generate `set covering problems `_, one would use
+:py:class:`~ecole.instance.SetCoverGenerator` in the following fashion.
+
+.. testcode::
+
+ from ecole.instance import SetCoverGenerator
+
+
+ generator = SetCoverGenerator(n_rows=100, n_cols=200, density=0.1)
+
+ for i in range(50):
+ instance = next(generator)
+
+ # Do anything with the ecole.scip.Model
+ instance.write_problem("some-folder/set-cover-{i:04}.lp")
+
+
+Note how we are iterating over a ``range(50)`` and calling ``next`` on the generator, as iterating directly over
+the iterator would produce an infinite loop. Another simple syntax would be to use `islice `_
+from the standard Python library.
+
+
+Generator Random States
+-----------------------
+Internally, an :py:class:`~ecole.typing.InstanceGenerator` holds a random state , which gets updated after generating an instance.
+This state can be reset using the :py:meth:`~ecole.typing.InstanceGenerator.seed` method of the generator.
+
+.. testcode::
+
+ generator_a = SetCoverGenerator(n_rows=100, n_cols=200, density=0.1)
+ generator_b = SetCoverGenerator(n_rows=100, n_cols=200, density=0.1)
+
+ # These are not the same instance
+ instance_a = next(generator_a)
+ instance_b = next(generator_b)
+
+ generator_a.seed(809)
+ generator_b.seed(809)
+
+ # These are exactly the same instances
+ instance_a = next(generator_a)
+ instance_b = next(generator_b)
+
+
+With an Environment
+-------------------
+The instance objects generated by :py:class:`~ecole.typing.InstanceGenerator`s,
+of type :py:class:`ecole.scip.Model`, can be passed directly to an environment's
+:py:meth:`~ecole.environment.Environment.reset` method.
+
+A typical example training over 1000 instances/episodes would look like:
+
+.. testcode::
+
+ import ecole
+
+
+ env = ecole.environment.Branching()
+ gen = ecole.instance.SetCoverGenerator(n_rows=100, n_cols=200)
+
+ for _ in range(1000):
+ observation, action_set, reward_offset, done, info = env.reset(next(gen))
+ while not done:
+ observation, action_set, reward, done, info = env.step(action_set[0])
+
+.. note::
+ The generated instance objects can be, in principle, modified between their generation and their usage in an environment
+ :py:meth:`~ecole.environment.Environment.reset` method. To keep code clean, however, we recommend that such modifications
+ be wrapped in a custom environment class. Details about custom environments :ref:`can be found here`.
+
+
+Extending Instance Generators
+-----------------------------
+In various use cases, the provided :py:class:`~ecole.typing.InstanceGenerator` are too limited. Thankfully, it is easy to extend
+the provided generators in various ways. This section presents a few common patterns.
+
+Combining Multiple Generators
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+To learn over multiple problem types, one can build a generator that, for every instance to generate, chooses a
+a problem type at random, and returns it.
+
+.. testcode::
+
+ import random
+
+
+ def CombineGenerators(*generators):
+ # A random state for choice
+ rng = random.Random()
+ while True:
+ # Randomly pick a generator
+ gen = rng.choice(generators)
+ # And yield the instance it generates
+ yield next(gen)
+
+
+Note that this is not quite a fully-fledged instance generator, as it is missing a way to set the seed. A more complete instance generator
+could be written as follows.
+
+.. testcode::
+
+ class CombinedGenerator:
+ def __init__(self, *generators):
+ self.generators = generators
+ self.rng = random.Random()
+
+ def __next__(self):
+ return next(self.rng.choice(self.generators))
+
+ def __iter__(self):
+ return self
+
+ def seed(self, val):
+ self.rng.seed(val)
+ for gen in self.generators:
+ gen.seed(val)
+
+Generator with Random Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+The provided instance generators have fixed hyperparameters, but to increase variability it might be desirable to randomly vary them as well.
+
+This can be without creating various :py:class:`~ecole.typing.InstanceGenerator` objects by using a generator's
+:py:meth:`~ecole.typing.InstanceGenerator.generate_instance` static method, and manually pass a :py:class:`~ecole.RandomGenerator`.
+For instance, to randomly choose the ``n_cols`` and ``n_rows`` parameters from
+:py:class:`~ecole.instance.SetCoverGenerator`, one could use
+
+.. testcode::
+
+ import random
+ import ecole
+
+
+ class VariableSizeSetCoverGenerator:
+ def __init__(self, n_cols_range, n_rows_range):
+ self.n_cols_range = n_cols_range
+ self.n_rows_range = n_rows_range
+ # A Python random state for randint
+ self.py_rng = random.Random()
+ # An Ecole random state to pass to generating functions
+ # This function returns a random state whose seed depends on Ecole global random state
+ self.ecole_rng = ecole.spawn_random_generator()
+
+ def __next__(self):
+ return ecole.instance.SetCoverGenerator(
+ n_cols=self.py_rng.randint(*self.n_cols_range),
+ n_rows=self.py_rng.randint(*self.n_rows_range),
+ rng=self.ecole_rng,
+ )
+
+ def __iter__(self):
+ return self
+
+ def seed(self, val):
+ self.py_rng.seed(val)
+ self.ecole_rng.seed(val)
+
+
+See :ref:`the discussion on seeding` for an explanation of :py:func:`ecole.spawn_random_generator`.
diff --git a/ecole/docs/howto/observation-functions.rst b/ecole/docs/howto/observation-functions.rst
new file mode 100644
index 0000000..c9c6fa1
--- /dev/null
+++ b/ecole/docs/howto/observation-functions.rst
@@ -0,0 +1,92 @@
+.. _use-observation-functions:
+
+Use Observation Functions
+=========================
+
+Using any environment, the observation [#observation]_ received by the user to take the
+next action can be customized changing the :py:class:`~ecole.typing.ObservationFunction` used by the solver.
+The environment is not extracting data directly but delegates that responsibility to an
+:py:class:`~ecole.typing.ObservationFunction` object.
+The object has complete access to the solver and extract the data it needs.
+
+Specifying an observation function is as easy as specifying a parameter when
+creating an environment.
+For instance with the :py:class:`~ecole.environment.Branching` environment:
+
+.. doctest::
+
+ >>> env = ecole.environment.Branching(observation_function=ecole.observation.Nothing())
+ >>> env.observation_function # doctest: +SKIP
+ ecole.observation.Nothing()
+ >>> obs, _, _, _, _ = env.reset("path/to/problem")
+ >>> obs is None
+ True
+
+Environments have an observation function set as default parameter for convenience.
+
+.. doctest::
+
+ >>> env = ecole.environment.Branching()
+ >>> env.observation_function # doctest: +SKIP
+ ecole.observation.NodeBipartite()
+ >>> obs, _, _, _, _ = env.reset("path/to/problem")
+ >>> obs # doctest: +SKIP
+ ecole.observation.NodeBipartiteObs(...)
+
+.. TODO Use an observation function that is more intutive than Nothing
+.. TODO Adapt the output to the actual __repr__ and remove #doctest: +SKIP
+
+
+See :ref:`the reference` for the list of available observation functions,
+as well as :ref:`the documention` for explanation on how to create one.
+
+
+No Observation Function
+-----------------------
+To not use any observation function, for instance for learning with a bandit algorithm,
+you can explicitly pass ``None`` to the environment constructor.
+
+.. doctest::
+
+ >>> env = ecole.environment.Branching(observation_function=None)
+ >>> env.observation_function # doctest: +SKIP
+ ecole.observation.nothing()
+ >>> obs, _, _, _, _ = env.reset("path/to/problem")
+ >>> obs is None
+ True
+
+.. TODO Adapt the output to the actual __repr__ and remove #doctest: +SKIP
+
+Multiple Observation Functions
+------------------------------
+To use multiple observation functions, wrap them in a ``list`` or ``dict``.
+
+.. doctest::
+
+ >>> obs_func = {
+ ... "some_name": ecole.observation.NodeBipartite(),
+ ... "other_name": ecole.observation.Nothing(),
+ ... }
+ >>> env = ecole.environment.Branching(observation_function=obs_func)
+ >>> obs, _, _, _, _ = env.reset("path/to/problem")
+ >>> obs # doctest: +SKIP
+ {'some_name': ecole.observation.NodeBipartiteObs(), 'other_name': None}
+
+.. TODO Adapt the output to the actual __repr__ and remove #doctest: +SKIP
+
+Similarily with a tuple
+
+.. doctest::
+
+ >>> obs_func = (ecole.observation.NodeBipartite(), ecole.observation.Nothing())
+ >>> env = ecole.environment.Branching(observation_function=obs_func)
+ >>> obs, _, _, _, _ = env.reset("path/to/problem")
+ >>> obs # doctest: +SKIP
+ [ecole.observation.NodeBipartiteObs(), None]
+
+.. TODO Use an observation function that is more intutive than Nothing
+.. TODO Adapt the output to the actual __repr__ and remove #doctest: +SKIP
+
+.. [#observation] We use the term *observation* rather than state since the state
+ is really the whole state of the solver, which is unaccessible. Thus, mathematically,
+ we really have a Partially Observable Markov Decision Process.
diff --git a/ecole/docs/howto/reward-functions.rst b/ecole/docs/howto/reward-functions.rst
new file mode 100644
index 0000000..a6feb41
--- /dev/null
+++ b/ecole/docs/howto/reward-functions.rst
@@ -0,0 +1,120 @@
+.. _use-reward-functions:
+
+Use Reward Functions
+====================
+
+Similarily to :ref:`observation functions ` the reward received by
+the user for learning can be customized by changing the :py:class:`~ecole.typing.RewardFunction` used by the
+solver.
+In fact, the mechanism of reward functions is very similar to that of observation
+functions: environments do not compute the reward directly but delegate that
+responsibility to a :py:class:`~ecole.typing.RewardFunction` object.
+The object has complete access to the solver and extracts the data it needs.
+
+Specifying a reward function is performed by passing the :py:class:`~ecole.typing.RewardFunction` object to
+the ``reward_function`` environment parameter.
+For instance, specifying a reward function with the :py:class:`~ecole.environment.Configuring` environment
+looks as follows:
+
+.. doctest::
+
+ >>> env = ecole.environment.Configuring(reward_function=ecole.reward.LpIterations())
+ >>> env.reward_function # doctest: +SKIP
+ ecole.reward.LpIterations()
+ >>> env.reset("path/to/problem") # doctest: +ELLIPSIS
+ (..., ..., 0.0, ..., ...)
+ >>> env.step({}) # doctest: +SKIP
+ (..., ..., 45.0, ..., ...)
+
+Environments also have a default reward function, which will be used if the user does not specify any.
+
+.. doctest::
+
+ >>> env = ecole.environment.Configuring()
+ >>> env.reward_function # doctest: +SKIP
+ ecole.reward.IsDone()
+
+.. TODO Adapt the output to the actual __repr__ and remove #doctest: +SKIP
+
+See :ref:`the reference` for the list of available reward functions,
+as well as :ref:`the documention` for explanations on how to create one.
+
+
+Arithmetic on Reward Functions
+------------------------------
+Reinforcement learning in combinatorial optimization solving is an active area of research, and
+there is at this point little consensus on reward functions to use. In recognition of that fact,
+reward functions have been explicitely designed in Ecole to be easily combined with Python arithmetic.
+
+For instance, one might want to minimize the number of LP iterations used throughout the solving process.
+To achieve this using a standard reinforcement learning algorithm, one would might use the negative
+number of LP iterations between two steps as a reward: this can be achieved by negating the
+:py:class:`~ecole.reward.LpIterations` function.
+
+.. doctest::
+
+ >>> env = ecole.environment.Configuring(reward_function=-ecole.reward.LpIterations())
+ >>> env.reset("path/to/problem") # doctest: +ELLIPSIS
+ (..., ..., -0.0, ..., ...)
+ >>> env.step({}) # doctest: +SKIP
+ (..., ..., -45.0, ..., ...)
+
+More generally, any operation, such as
+
+.. testcode::
+
+ from ecole.reward import LpIterations
+
+ -3.5 * LpIterations() ** 2.1 + 4.4
+
+is valid.
+
+Note that this is a full reward *function* object that can be given to an environment:
+it is equivalent to doing the following.
+
+.. doctest::
+
+ >>> env = ecole.environment.Configuring(reward_function=ecole.reward.LpIterations())
+ >>> env.reset("path/to/problem") # doctest: +ELLIPSIS
+ (..., ..., ..., ..., ...)
+ >>> _, _, lp_iter_reward, _, _ = env.step({})
+ >>> reward = -3.5 * lp_iter_reward ** 2.1 + 4.4
+
+Arithmetic operations are even allowed between different reward functions,
+
+.. testcode::
+
+ from ecole.reward import LpIterations, IsDone
+
+ 4.0 * LpIterations() ** 2 - 3 * IsDone()
+
+which is especially powerful because in this normally it would *not* be possible to pass both
+:py:class:`~ecole.reward.LpIterations` and :py:class:`~ecole.reward.IsDone` to the
+environment.
+
+All operations that are valid between scalars are valid between reward functions.
+
+.. testcode::
+
+ -IsDone() ** abs(LpIterations() // 4)
+
+In addition, not all commonly used mathematical operations have a dedicated Python operator: to
+accomodate this, Ecole implements a number of other operations as methods of reward functions.
+For instance, to get the exponential of :py:class:`~ecole.reward.LpIterations`, one can use
+
+.. testcode::
+
+ LpIterations().exp()
+
+This also works with rewards functions created from arithmetic expressions.
+
+.. testcode::
+
+ (3 - 2 * LpIterations()).exp()
+
+Finally, reward functions have an ``apply`` method to compose rewards with any
+function.
+
+.. testcode::
+
+ LpIterations().apply(lambda reward: math.factorial(round(reward)))
diff --git a/ecole/docs/images/mdp.png b/ecole/docs/images/mdp.png
new file mode 100644
index 0000000..770f409
Binary files /dev/null and b/ecole/docs/images/mdp.png differ
diff --git a/ecole/docs/index.rst b/ecole/docs/index.rst
new file mode 100644
index 0000000..563fe35
--- /dev/null
+++ b/ecole/docs/index.rst
@@ -0,0 +1,106 @@
+Introduction
+============
+
+Ecole is a library of *Extensible Combinatorial Optimization Learning Environments*
+designed to ease the development of machine learning approaches for
+combinatorial optimization. More precisely, the goal of Ecole is to allow for a fast
+and safe prototyping of any ML for CO approach that can be formulated as a control
+problem (*i.e.*, a Markov Decision Process), as well as providing reproducible benchmarking protocols
+for comparison to existing approaches.
+
+.. testcode::
+
+ import ecole
+
+ env = ecole.environment.Branching(
+ reward_function=-1.5 * ecole.reward.LpIterations() ** 2,
+ observation_function=ecole.observation.NodeBipartite(),
+ )
+ instances = ecole.instance.SetCoverGenerator(n_rows=100, n_cols=200)
+
+ for _ in range(10):
+ observation, action_set, reward_offset, done, info = env.reset(next(instances))
+ while not done:
+ observation, action_set, reward, done, info = env.step(action_set[0])
+
+
+Combinatorial optimization solvers typically rely on a plethora of handcrafted expert heuristics,
+which can fail to exploit subtle statistical similarities between problem intances.
+`Machine Learning `_ algorithms offer
+a promising approach for replacing those heuristics, by learning data-driven policies that automatically
+account for such statistical relationships, and thereby creating a new kind of highly adaptive solvers.
+
+For instance, many combinatorial optimization problems can be modeled using `Mixed Integer
+Linear Programming `_ and solved using
+the `branch-and-bound `_ algorithm.
+Despite its simplicity, the algorithm requires many non-trivial decisions, such as iteratively
+picking the next variable to branch on. Ecole aims at exposing these algorithmic control problems with a
+standard reinforcement learning API (agent / environment loop), in order to ease the exploration
+of new machine learning models and algorithms for learning data-driven policies.
+
+Ecole's interface is inspired from `OpenAI Gym `_ and will look
+familiar to reinforcement learning praticionners.
+The state-of-the-art Mixed Integer Linear Programming solver that acts as a controllable
+algorithm inside Ecole is `SCIP `_.
+
+The reader is referred to [Bengio2020]_ for motivation on why machine learning is a promising
+candidate to use for combinatorial optimization, as well as the methodology to do so.
+
+.. [Bengio2020]
+ Bengio, Yoshua, Andrea Lodi, and Antoine Prouvost.
+ "`Machine learning for combinatorial optimization: a methodological tour d'horizon.
+ `_"
+ *European Journal of Operational Research*. 2020.
+
+
+.. toctree::
+ :caption: Getting started
+ :hidden:
+
+ self
+ installation
+ using-environments
+
+.. toctree::
+ :caption: How to
+ :hidden:
+
+ howto/observation-functions.rst
+ howto/reward-functions.rst
+ howto/create-functions.rst
+ howto/create-environments.rst
+ howto/instances.rst
+
+.. toctree::
+ :caption: Practical Tutorials
+ :hidden:
+
+ Configuring the Solver with Bandits
+ Branching with Imitation Learning
+
+.. toctree::
+ :caption: Reference
+ :hidden:
+
+ reference/environments.rst
+ reference/observations.rst
+ reference/rewards.rst
+ reference/information.rst
+ reference/scip-interface.rst
+ reference/instances.rst
+ reference/utilities.rst
+
+.. toctree::
+ :caption: Discussion
+ :hidden:
+
+ discussion/gym-differences.rst
+ discussion/seeding.rst
+ discussion/theory.rst
+
+.. toctree::
+ :caption: Developer Zone
+ :hidden:
+
+ contributing.rst
+ developers/example-observation.rst
diff --git a/ecole/docs/installation.rst b/ecole/docs/installation.rst
new file mode 100644
index 0000000..4bdbcc5
--- /dev/null
+++ b/ecole/docs/installation.rst
@@ -0,0 +1,71 @@
+.. _installation:
+
+Installation
+============
+
+Conda
+-----
+.. image:: https://img.shields.io/conda/vn/conda-forge/ecole?label=version&logo=conda-forge
+ :alt: Conda-Forge version
+.. image:: https://img.shields.io/conda/pn/conda-forge/ecole?logo=conda-forge
+ :alt: Conda-Forge platforms
+
+.. code-block:: bash
+
+ conda install -c conda-forge ecole
+
+All dependencies are resolved by conda, no compiler is required.
+
+`PyScipOpt `_ is not required but is the main SCIP
+interface to develop new Ecole components from Python
+
+.. code-block:: bash
+
+ conda install -c conda-forge ecole pyscipopt
+
+Currenlty, conda packages are only available for Linux and MacOS.
+
+Pip wheel (binary)
+------------------
+Currently unavailable.
+
+Pip source
+-----------
+.. image:: https://img.shields.io/pypi/v/ecole?logo=python
+ :target: https://pypi.org/project/ecole/
+ :alt: PyPI version
+
+Building from source requires:
+ - A `C++17 compiler `_,
+ - A `SCIP `_ installation.
+
+For the stable `PyPI version `_:
+
+.. code-block:: bash
+
+ python -m pip install ecole
+
+To specify the where to find SCIP (or any CMake parameters):
+
+.. code-block:: bash
+
+ CMAKE_ARGS="-DSCIP_DIR=path/to/lib/cmake/scip -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON" python -m pip install ecole
+
+For the latest Github version:
+
+.. code-block:: bash
+
+ python -m pip install git+https://github.com/ds4dm/ecole
+
+Or if the latest version is already available locally:
+
+.. code-block:: bash
+
+ python -m pip install .
+
+If all dependencies (build time such as CMake and compiler, and run time such as NumPy) are already installed,
+as is the case when developping Ecole, one can install Ecole with:
+
+.. code-block:: bash
+
+ python -m pip install --no-deps --no-build-isolation [ecole | git+https://github.com/ds4dm/ecole | .]
diff --git a/ecole/docs/reference/environments.rst b/ecole/docs/reference/environments.rst
new file mode 100644
index 0000000..3fa6416
--- /dev/null
+++ b/ecole/docs/reference/environments.rst
@@ -0,0 +1,27 @@
+Environments
+============
+
+Interface
+---------
+.. autoclass:: ecole.environment.Environment
+
+Protocol
+--------
+.. autoclass:: ecole.typing.Dynamics
+
+Listing
+-------
+Branching
+^^^^^^^^^
+.. autoclass:: ecole.environment.Branching
+.. autoclass:: ecole.dynamics.BranchingDynamics
+
+Configuring
+^^^^^^^^^^^
+.. autoclass:: ecole.environment.Configuring
+.. autoclass:: ecole.dynamics.ConfiguringDynamics
+
+PrimalSearch
+^^^^^^^^^^^^
+.. autoclass:: ecole.environment.PrimalSearch
+.. autoclass:: ecole.dynamics.PrimalSearchDynamics
diff --git a/ecole/docs/reference/information.rst b/ecole/docs/reference/information.rst
new file mode 100644
index 0000000..936f6de
--- /dev/null
+++ b/ecole/docs/reference/information.rst
@@ -0,0 +1,17 @@
+.. _information-reference:
+
+Informations
+============
+
+Interface
+---------
+.. autoclass:: ecole.typing.InformationFunction
+
+
+Listing
+-------
+The list of information functions relevant to users is given below.
+
+Nothing
+^^^^^^^
+.. autoclass:: ecole.information.Nothing
diff --git a/ecole/docs/reference/instances.rst b/ecole/docs/reference/instances.rst
new file mode 100644
index 0000000..ab7f3f3
--- /dev/null
+++ b/ecole/docs/reference/instances.rst
@@ -0,0 +1,34 @@
+Instance Generators
+===================
+
+Protocol
+--------
+The protocol of instance generators in Ecole.
+There are no constraints for user defined gnerators.
+The protocol is given for users to know what they can expect from Ecole generators.
+
+.. autoclass:: ecole.typing.InstanceGenerator
+
+Listing
+-------
+The list of instance generators is given below.
+
+Local Files
+^^^^^^^^^^^
+.. autoclass:: ecole.instance.FileGenerator
+
+Set Cover
+^^^^^^^^^
+.. autoclass:: ecole.instance.SetCoverGenerator
+
+Combinatorial Auction
+^^^^^^^^^^^^^^^^^^^^^
+.. autoclass:: ecole.instance.CombinatorialAuctionGenerator
+
+Capacitated Facility Location
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+.. autoclass:: ecole.instance.CapacitatedFacilityLocationGenerator
+
+Independent Set
+^^^^^^^^^^^^^^^
+.. autoclass:: ecole.instance.IndependentSetGenerator
diff --git a/ecole/docs/reference/observations.rst b/ecole/docs/reference/observations.rst
new file mode 100644
index 0000000..00df8a5
--- /dev/null
+++ b/ecole/docs/reference/observations.rst
@@ -0,0 +1,45 @@
+.. _observation-reference:
+
+Observations
+============
+
+Interface
+---------
+.. autoclass:: ecole.typing.ObservationFunction
+
+
+Listing
+-------
+The list of observation functions relevant to users is given below.
+
+Nothing
+^^^^^^^
+.. autoclass:: ecole.observation.Nothing
+
+Node Bipartite
+^^^^^^^^^^^^^^
+.. autoclass:: ecole.observation.NodeBipartite
+.. autoclass:: ecole.observation.NodeBipartiteObs
+
+Milp Bipartite
+^^^^^^^^^^^^^^
+.. autoclass:: ecole.observation.MilpBipartite
+.. autoclass:: ecole.observation.MilpBipartiteObs
+
+Strong Branching Scores
+^^^^^^^^^^^^^^^^^^^^^^^
+.. autoclass:: ecole.observation.StrongBranchingScores
+
+Pseudocosts
+^^^^^^^^^^^
+.. autoclass:: ecole.observation.Pseudocosts
+
+Khalil et al. 2016
+^^^^^^^^^^^^^^^^^^
+.. autoclass:: ecole.observation.Khalil2016
+.. autoclass:: ecole.observation.Khalil2016Obs
+
+Hutter et al. 2011
+^^^^^^^^^^^^^^^^^^
+.. autoclass:: ecole.observation.Hutter2011
+.. autoclass:: ecole.observation.Hutter2011Obs
diff --git a/ecole/docs/reference/rewards.rst b/ecole/docs/reference/rewards.rst
new file mode 100644
index 0000000..933badf
--- /dev/null
+++ b/ecole/docs/reference/rewards.rst
@@ -0,0 +1,65 @@
+.. _reward-reference:
+
+Rewards
+=======
+
+Interface
+---------
+.. autoclass:: ecole.typing.RewardFunction
+
+Listing
+-------
+The list of reward functions relevant to users is given below.
+
+Is Done
+^^^^^^^
+.. autoclass:: ecole.reward.IsDone
+ :no-members:
+ :members: before_reset, extract
+
+LP Iterations
+^^^^^^^^^^^^^
+.. autoclass:: ecole.reward.LpIterations
+ :no-members:
+ :members: before_reset, extract
+
+NNodes
+^^^^^^
+.. autoclass:: ecole.reward.NNodes
+ :no-members:
+ :members: before_reset, extract
+
+Solving Time
+^^^^^^^^^^^^
+.. autoclass:: ecole.reward.SolvingTime
+ :no-members:
+ :members: before_reset, extract
+
+Primal and dual Integrals
+^^^^^^^^^^^^^^^^^^^^^^^^^
+.. autoclass:: ecole.reward.PrimalIntegral
+ :no-members:
+ :members: before_reset, extract
+.. autoclass:: ecole.reward.DualIntegral
+ :no-members:
+ :members: before_reset, extract
+.. autoclass:: ecole.reward.PrimalDualIntegral
+ :no-members:
+ :members: before_reset, extract
+
+
+Utilities
+---------
+The following reward functions are used internally by Ecole.
+
+Constant
+^^^^^^^^
+.. autoclass:: ecole.reward.Constant
+ :no-members:
+ :members: before_reset, extract
+
+Arithmetic
+^^^^^^^^^^
+.. autoclass:: ecole.reward.Arithmetic
+ :no-members:
+ :members: before_reset, extract
diff --git a/ecole/docs/reference/scip-interface.rst b/ecole/docs/reference/scip-interface.rst
new file mode 100644
index 0000000..ea924a9
--- /dev/null
+++ b/ecole/docs/reference/scip-interface.rst
@@ -0,0 +1,34 @@
+SCIP Interface
+==============
+
+Model
+-----
+.. autoclass:: ecole.scip.Model
+
+Callbacks
+---------
+Branchrule
+^^^^^^^^^^
+.. autoclass:: ecole.scip.callback.BranchruleConstructor
+.. autoclass:: ecole.scip.callback.BranchruleCall
+
+Heuristic
+^^^^^^^^^
+.. autoclass:: ecole.scip.callback.HeuristicConstructor
+.. autoclass:: ecole.scip.callback.HeuristicCall
+
+Utilities
+^^^^^^^^^
+.. autoattribute:: ecole.scip.callback.priority_max
+.. autoattribute:: ecole.scip.callback.max_depth_none
+.. autoattribute:: ecole.scip.callback.max_bound_distance_none
+.. autoattribute:: ecole.scip.callback.frequency_always
+.. autoattribute:: ecole.scip.callback.frequency_offset_none
+
+.. autoclass:: ecole.scip.callback.Result
+.. autoclass:: ecole.scip.callback.Type
+
+SCIP Data Types
+---------------
+.. autoclass:: ecole.scip.Stage
+.. autoclass:: ecole.scip.HeurTiming
diff --git a/ecole/docs/reference/utilities.rst b/ecole/docs/reference/utilities.rst
new file mode 100644
index 0000000..39bf3e7
--- /dev/null
+++ b/ecole/docs/reference/utilities.rst
@@ -0,0 +1,8 @@
+Utilities
+=========
+
+Random
+------
+.. autoclass:: ecole.RandomGenerator
+.. autofunction:: ecole.seed
+.. autofunction:: ecole.spawn_random_generator
diff --git a/ecole/docs/using-environments.rst b/ecole/docs/using-environments.rst
new file mode 100644
index 0000000..f9928c3
--- /dev/null
+++ b/ecole/docs/using-environments.rst
@@ -0,0 +1,175 @@
+Using Environments
+==================
+
+The goal of Ecole is to provide Markov decision process abstractions of common sequential decision making tasks that
+appear when solving combinatorial optimization problems using a solver.
+These control tasks are represented by stateful classes called environments.
+
+In this formulation, each solving of an instance is an episode.
+The environment class must first be instantiated, and then a specific instance must be loaded by a call to
+:py:meth:`~ecole.environment.Environment.reset`, which will bring the process to its initial state.
+Afterwards, successive calls to :py:meth:`~ecole.environment.Environment.step` will take an action from the
+user and transition to the next state.
+Finally, when the episode is finished, that is when the instance has been fully solved, a new solving episode can be
+started with another call to :py:meth:`~ecole.environment.Environment.reset`.
+
+For instance, using the :py:class:`~ecole.environment.Branching` environment for branch-and-bound variable selection,
+solving a specific instance once by always selecting the first fractional variable would look as follows.
+
+.. testcode::
+
+ import ecole
+
+ env = ecole.environment.Branching()
+ env.seed(42)
+
+ for _ in range(10):
+ observation, action_set, reward_offset, done, info = env.reset("path/to/instance")
+ while not done:
+ observation, action_set, reward, done, info = env.step(action_set[0])
+
+
+Let us analyze this example in more detail.
+
+
+General structure
+-----------------
+The example is driven by two loops.
+The inner ``while`` loop, the so-called *control loop*, transitions from an initial state until a
+terminal state is reached, which is signaled with the boolean flag ``done == True``.
+In Ecole, the termination of the environment coincides with the termination of the underlying combinatorial
+optimization algorithm.
+A full execution of this loop is known as an *episode*.
+The control loop matches a Markov decision process formulation, as used in control theory, dynamic programming and
+reinforcement learning.
+
+.. figure:: images/mdp.png
+ :alt: Markov Decision Process interaction loop.
+ :align: center
+ :width: 60%
+
+ The control loop of a Markov decision process.
+
+.. note::
+
+ More exactly, the control loop in Ecole is that of a `partially-observable Markov decision process
+ `_ (PO-MDP), since
+ only a subset of the MDP state is extracted from the environment in the form of an *observation*. We omit
+ this detail here for simplicity.
+
+The outer ``for`` loop in the example simply repeats the control loop several times, and is in
+charge of generating the initial state of each episode.
+In order to obtain a sufficient statistical signal for learning the control policy, numerous episodes are usually
+required for learning.
+Also, although not showcased here, there is usually little practical interest in using the same combinatorial problem
+instance for generating each episode.
+Indeed, it is usually desirable to learn policies that will generalize to new, unseen instances, which is very unlikely
+if the learning policy is tailored to solve a single specific instance.
+Ideally, one would like to sample training episodes from a family of similar instances, in order to solve new, similar
+instances in the future.
+For more details, see the :ref:`Ecole theortical model` in the discussion.
+
+
+.. _environment-parameters:
+
+Environment parameters
+----------------------
+Each environment can be given a set of parameters at construction, in order to further customize the task being
+solved.
+For instance, the :py:class:`~ecole.environment.Branching` environment takes a ``pseudo_candidates``
+boolean parameter, to decide whether branching candidates should include all non fixed integral variables, or only the
+fractional ones.
+Environments can be instantiated with no constructor arguments, as in the previous example, in which case a set of
+default parameters will be used.
+
+Every environment can optionally take a dictionary of
+`SCIP parameters `_ that will be used to
+initialize the solver at every episode.
+For instance, to customize the clique inequalities generated, one could set:
+
+.. testcode::
+
+ env = ecole.environment.Branching(
+ scip_params={"separating/clique/freq": 0.5, "separating/clique/maxsepacuts": 5}
+ )
+
+
+.. warning::
+
+ Depending on the nature of the environment, some user-given parameters can be overriden
+ or ignored (*e.g.*, branching parameters in the :py:class:`~ecole.environment.Branching`
+ environment).
+ It is the responsibility of the user to understand the environment they are using.
+
+.. note::
+
+ For out-out-the-box strategies on presolving, heuristics, and cutting planes, consider
+ using the dedicated
+ `SCIP methods `_
+ (``SCIPsetHeuristics`` *etc.*).
+
+:ref:`Observation functions ` and
+:ref:`reward functions ` are more advanced environment
+parameters, which we will discuss later on.
+
+
+.. _resetting-environments:
+
+Resetting environments
+----------------------
+Each episode in the inner ``while`` starts with a call to
+:py:meth:`~ecole.environment.Environment.reset` in order to bring the environment into a new
+initial state.
+The method is parameterized with a problem instance: the combinatorial optimization problem that will be loaded and
+solved by the `SCIP `_ solver during the episode.
+In the most simple case this is the path to a problem file.
+For problems instances that are generated programatically
+(for instance using `PyScipOpt `_ or using
+:ref:`instance generators`) a :py:class:`ecole.scip.Model` is also accepted.
+
+* The ``observation`` consists of information about the state of the solver that should be used to select the next
+ action to perform (for example, using a machine learning algorithm.) Note that this entry is always ``None`` when
+ the state is terminal (that is, when the ``done`` flag described below is ``True``.)
+* The ``action_set``, when not ``None``, describes the set of candidate actions which are valid for the next transition.
+ This is necessary for environments where the action set varies from state to state.
+ For instance, in the :py:class:`~ecole.environment.Branching` environment the set of candidate variables
+ for branching depends on the value of the current LP solution, which changes at every iteration of the algorithm.
+* The ``reward_offset`` is an offset to the reward function that accounts for any computation happening in
+ :py:meth:`~ecole.environment.Environment.reset` when generating the initial state.
+ For example, if clock time is selected as a reward function in a :py:class:`~ecole.environment.Branching` environment,
+ this would account for time spent in the preprocessing phase before any branching is performed.
+ This offset is thus important for benchmarking, but has no effect
+ on the control problem, and can be ignored when training a machine learning agent.
+* The boolean flag ``done`` indicates whether the initial state is also a terminal state.
+ This can happen in some environments, such as :py:class:`~ecole.environment.Branching`, where the problem instance
+ could be solved though presolving only (never actually getting to branching).
+
+See the reference section for the exact documentation of
+:py:meth:`~ecole.environment.Environment.reset`.
+
+
+Transitioning
+-------------
+The inner ``while`` loop transitions the environment from one state to the next by giving
+an action to :py:meth:`~ecole.environment.Environment.step`.
+The nature of ``observation``, ``action_set``, and ``done`` is the same as in the previous
+section :ref:`resetting-environments`.
+The ``reward`` and ``info`` variables provide additional information about
+the current transition.
+
+See the reference section for the exact documentation of
+:py:meth:`~ecole.environment.Environment.step`.
+
+
+Seeding environments
+--------------------
+Environments can be seeded by using the
+:py:meth:`~ecole.environment.Environment.seed` method.
+The seed is used by the environment (and in particular the solver) for all the
+subsequent episode trajectories.
+The solver is given a new seed at the beginning of every new trajectory (call to
+:py:meth:`~ecole.environment.Environment.reset`), in a way that preserves
+determinism, without re-using the same seed repeatedly.
+
+See the reference section for the exact documentation of
+:py:meth:`~ecole.environment.Environment.seed`.
diff --git a/ecole/examples/branching-imitation/conda-requirements.yaml b/ecole/examples/branching-imitation/conda-requirements.yaml
new file mode 100644
index 0000000..e5cad7d
--- /dev/null
+++ b/ecole/examples/branching-imitation/conda-requirements.yaml
@@ -0,0 +1,13 @@
+channels:
+ - pyg
+ - pytorch
+ - conda-forge
+
+dependencies:
+ - python
+ - jupyter
+ - ipykernel
+ - ecole
+ - numpy
+ - pytorch
+ - pyg
diff --git a/ecole/examples/branching-imitation/example.ipynb b/ecole/examples/branching-imitation/example.ipynb
new file mode 100644
index 0000000..26b970c
--- /dev/null
+++ b/ecole/examples/branching-imitation/example.ipynb
@@ -0,0 +1,1498 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Branching with Imitation Learning and a GNN\n",
+ "\n",
+ "In this tutorial we will reproduce a simplified version of the paper of Gasse et al. (2019) on learning to branch with Ecole with `pytorch` and `pytorch geometric`. We collect strong branching examples on randomly generated maximum set covering instances, then train a graph neural network with bipartite state encodings to imitate the expert by classification. Finally, we will evaluate the quality of the policy.\n",
+ "\n",
+ "The biggest difference with Gasse et al. (2019) is that only n=1,000 training examples of expert decisions are collected for training, to keep the time needed to run the tutorial reasonable. As a consequence, the resulting policy is undertrained and is not competitive with SCIP's default branching rule.\n",
+ "\n",
+ "Users that are interested in reproducing competitive performance should use a larger sample size, such as the n=100,000 samples used for training in the paper. In this case, we strongly recommend to parallelize data collection, as in the original Gasse et al. (2019) code.\n",
+ "\n",
+ "### Requirements\n",
+ "The requirements can be found in `conda-requirements.yaml`, lock files with pinned versions are also available\n",
+ "for various configurations."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import gzip\n",
+ "import pickle\n",
+ "from pathlib import Path\n",
+ "\n",
+ "import ecole\n",
+ "import numpy as np\n",
+ "import torch\n",
+ "import torch.nn.functional as F\n",
+ "import torch_geometric"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "tags": [
+ "parameters"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "DATA_MAX_SAMPLES = 1000\n",
+ "LEARNING_RATE = 0.001\n",
+ "NB_EPOCHS = 50\n",
+ "NB_EVAL_INSTANCES = 20"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 1. Data collection\n",
+ "\n",
+ "Our first step will be to run explore-then-strong-branch on randomly generated maximum set covering instances, and save the branching decisions to build a dataset. We will also record the state of the branch-and-bound process as a bipartite graph, which is already implemented in Ecole with the same features as Gasse et al. (2019)."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will use the Ecole-provided set cover instance generator."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "instances = ecole.instance.SetCoverGenerator(n_rows=500, n_cols=1000, density=0.05)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The explore-then-strong-branch scheme described in the paper is not implemented by default in Ecole. In this scheme, to diversify the states in which we collect examples of strong branching behavior, we mostly follow a weak but cheap expert (pseudocost branching) and only occasionally call the strong expert (strong branching). This also ensures that samples are closer to being independent and identically distributed.\n",
+ "\n",
+ "This can be realized in Ecole by creating a custom observation function, which will randomly compute and return the pseudocost scores (cheap) or the strong branching scores (expensive). It also showcases extensibility in Ecole by showing how easily a custom observation function can be created and used, directly in Python."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class ExploreThenStrongBranch:\n",
+ " \"\"\"\n",
+ " This custom observation function class will randomly return either strong branching scores (expensive expert)\n",
+ " or pseudocost scores (weak expert for exploration) when called at every node.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, expert_probability):\n",
+ " self.expert_probability = expert_probability\n",
+ " self.pseudocosts_function = ecole.observation.Pseudocosts()\n",
+ " self.strong_branching_function = ecole.observation.StrongBranchingScores()\n",
+ "\n",
+ " def before_reset(self, model):\n",
+ " \"\"\"\n",
+ " This function will be called at initialization of the environment (before dynamics are reset).\n",
+ " \"\"\"\n",
+ " self.pseudocosts_function.before_reset(model)\n",
+ " self.strong_branching_function.before_reset(model)\n",
+ "\n",
+ " def extract(self, model, done):\n",
+ " \"\"\"\n",
+ " Should we return strong branching or pseudocost scores at time node?\n",
+ " \"\"\"\n",
+ " probabilities = [1 - self.expert_probability, self.expert_probability]\n",
+ " expert_chosen = bool(np.random.choice(np.arange(2), p=probabilities))\n",
+ " if expert_chosen:\n",
+ " return (self.strong_branching_function.extract(model, done), True)\n",
+ " else:\n",
+ " return (self.pseudocosts_function.extract(model, done), False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can now create the environment with the correct parameters (no restarts, 1h time limit, 5% expert sampling probability).\n",
+ "\n",
+ "Besides the (pseudocost or strong branching) scores, our environment will return the node bipartite graph representation of \n",
+ "branch-and-bound states used in Gasse et al. (2019), using the `ecole.observation.NodeBipartite` observation function.\n",
+ "On one side of that bipartite graph, nodes represent the variables of the problem, with a vector encoding features of \n",
+ "that variable. On the other side of the bipartite graph, nodes represent the constraints of the problem, similarly with \n",
+ "a vector encoding features of that constraint. An edge links a variable and a constraint node if the variable participates \n",
+ "in that constraint, that is, its coefficient is nonzero in that constraint. The constraint coefficient is attached as an\n",
+ "attribute of the edge."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# We can pass custom SCIP parameters easily\n",
+ "scip_parameters = {\n",
+ " \"separating/maxrounds\": 0,\n",
+ " \"presolving/maxrestarts\": 0,\n",
+ " \"limits/time\": 3600,\n",
+ "}\n",
+ "\n",
+ "# Note how we can tuple observation functions to return complex state information\n",
+ "env = ecole.environment.Branching(\n",
+ " observation_function=(\n",
+ " ExploreThenStrongBranch(expert_probability=0.05),\n",
+ " ecole.observation.NodeBipartite(),\n",
+ " ),\n",
+ " scip_params=scip_parameters,\n",
+ ")\n",
+ "\n",
+ "# This will seed the environment for reproducibility\n",
+ "env.seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now we loop over the instances, following the strong branching expert 5% of the time and saving its decision, until enough samples are collected."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Episode 1, 2 samples collected so far\n",
+ "Episode 2, 3 samples collected so far\n",
+ "Episode 3, 4 samples collected so far\n",
+ "Episode 4, 4 samples collected so far\n",
+ "Episode 5, 8 samples collected so far\n",
+ "Episode 6, 8 samples collected so far\n",
+ "Episode 7, 10 samples collected so far\n",
+ "Episode 8, 10 samples collected so far\n",
+ "Episode 9, 10 samples collected so far\n",
+ "Episode 10, 11 samples collected so far\n",
+ "Episode 11, 11 samples collected so far\n",
+ "Episode 12, 88 samples collected so far\n",
+ "Episode 13, 88 samples collected so far\n",
+ "Episode 14, 91 samples collected so far\n",
+ "Episode 15, 91 samples collected so far\n",
+ "Episode 16, 91 samples collected so far\n",
+ "Episode 17, 91 samples collected so far\n",
+ "Episode 18, 92 samples collected so far\n",
+ "Episode 19, 93 samples collected so far\n",
+ "Episode 20, 99 samples collected so far\n",
+ "Episode 21, 103 samples collected so far\n",
+ "Episode 22, 106 samples collected so far\n",
+ "Episode 23, 106 samples collected so far\n",
+ "Episode 24, 107 samples collected so far\n",
+ "Episode 25, 117 samples collected so far\n",
+ "Episode 26, 171 samples collected so far\n",
+ "Episode 27, 171 samples collected so far\n",
+ "Episode 28, 172 samples collected so far\n",
+ "Episode 29, 174 samples collected so far\n",
+ "Episode 30, 175 samples collected so far\n",
+ "Episode 31, 175 samples collected so far\n",
+ "Episode 32, 175 samples collected so far\n",
+ "Episode 33, 175 samples collected so far\n",
+ "Episode 34, 175 samples collected so far\n",
+ "Episode 35, 182 samples collected so far\n",
+ "Episode 36, 183 samples collected so far\n",
+ "Episode 37, 194 samples collected so far\n",
+ "Episode 38, 194 samples collected so far\n",
+ "Episode 39, 195 samples collected so far\n",
+ "Episode 40, 196 samples collected so far\n",
+ "Episode 41, 196 samples collected so far\n",
+ "Episode 42, 198 samples collected so far\n",
+ "Episode 43, 203 samples collected so far\n",
+ "Episode 44, 205 samples collected so far\n",
+ "Episode 45, 205 samples collected so far\n",
+ "Episode 46, 205 samples collected so far\n",
+ "Episode 47, 205 samples collected so far\n",
+ "Episode 48, 207 samples collected so far\n",
+ "Episode 49, 208 samples collected so far\n",
+ "Episode 50, 208 samples collected so far\n",
+ "Episode 51, 208 samples collected so far\n",
+ "Episode 52, 208 samples collected so far\n",
+ "Episode 53, 208 samples collected so far\n",
+ "Episode 54, 208 samples collected so far\n",
+ "Episode 55, 208 samples collected so far\n",
+ "Episode 56, 222 samples collected so far\n",
+ "Episode 57, 228 samples collected so far\n",
+ "Episode 58, 239 samples collected so far\n",
+ "Episode 59, 239 samples collected so far\n",
+ "Episode 60, 240 samples collected so far\n",
+ "Episode 61, 240 samples collected so far\n",
+ "Episode 62, 242 samples collected so far\n",
+ "Episode 63, 242 samples collected so far\n",
+ "Episode 64, 242 samples collected so far\n",
+ "Episode 65, 248 samples collected so far\n",
+ "Episode 66, 248 samples collected so far\n",
+ "Episode 67, 249 samples collected so far\n",
+ "Episode 68, 249 samples collected so far\n",
+ "Episode 69, 249 samples collected so far\n",
+ "Episode 70, 250 samples collected so far\n",
+ "Episode 71, 251 samples collected so far\n",
+ "Episode 72, 252 samples collected so far\n",
+ "Episode 73, 254 samples collected so far\n",
+ "Episode 74, 255 samples collected so far\n",
+ "Episode 75, 256 samples collected so far\n",
+ "Episode 76, 256 samples collected so far\n",
+ "Episode 77, 283 samples collected so far\n",
+ "Episode 78, 287 samples collected so far\n",
+ "Episode 79, 288 samples collected so far\n",
+ "Episode 80, 288 samples collected so far\n",
+ "Episode 81, 348 samples collected so far\n",
+ "Episode 82, 354 samples collected so far\n",
+ "Episode 83, 354 samples collected so far\n",
+ "Episode 84, 354 samples collected so far\n",
+ "Episode 85, 355 samples collected so far\n",
+ "Episode 86, 363 samples collected so far\n",
+ "Episode 87, 363 samples collected so far\n",
+ "Episode 88, 363 samples collected so far\n",
+ "Episode 89, 363 samples collected so far\n",
+ "Episode 90, 363 samples collected so far\n",
+ "Episode 91, 365 samples collected so far\n",
+ "Episode 92, 366 samples collected so far\n",
+ "Episode 93, 367 samples collected so far\n",
+ "Episode 94, 367 samples collected so far\n",
+ "Episode 95, 368 samples collected so far\n",
+ "Episode 96, 374 samples collected so far\n",
+ "Episode 97, 374 samples collected so far\n",
+ "Episode 98, 374 samples collected so far\n",
+ "Episode 99, 374 samples collected so far\n",
+ "Episode 100, 375 samples collected so far\n",
+ "Episode 101, 375 samples collected so far\n",
+ "Episode 102, 378 samples collected so far\n",
+ "Episode 103, 380 samples collected so far\n",
+ "Episode 104, 383 samples collected so far\n",
+ "Episode 105, 384 samples collected so far\n",
+ "Episode 106, 393 samples collected so far\n",
+ "Episode 107, 394 samples collected so far\n",
+ "Episode 108, 395 samples collected so far\n",
+ "Episode 109, 396 samples collected so far\n",
+ "Episode 110, 406 samples collected so far\n",
+ "Episode 111, 415 samples collected so far\n",
+ "Episode 112, 419 samples collected so far\n",
+ "Episode 113, 419 samples collected so far\n",
+ "Episode 114, 420 samples collected so far\n",
+ "Episode 115, 421 samples collected so far\n",
+ "Episode 116, 421 samples collected so far\n",
+ "Episode 117, 421 samples collected so far\n",
+ "Episode 118, 423 samples collected so far\n",
+ "Episode 119, 423 samples collected so far\n",
+ "Episode 120, 425 samples collected so far\n",
+ "Episode 121, 426 samples collected so far\n",
+ "Episode 122, 426 samples collected so far\n",
+ "Episode 123, 426 samples collected so far\n",
+ "Episode 124, 429 samples collected so far\n",
+ "Episode 125, 436 samples collected so far\n",
+ "Episode 126, 444 samples collected so far\n",
+ "Episode 127, 445 samples collected so far\n",
+ "Episode 128, 448 samples collected so far\n",
+ "Episode 129, 450 samples collected so far\n",
+ "Episode 130, 451 samples collected so far\n",
+ "Episode 131, 469 samples collected so far\n",
+ "Episode 132, 469 samples collected so far\n",
+ "Episode 133, 476 samples collected so far\n",
+ "Episode 134, 481 samples collected so far\n",
+ "Episode 135, 483 samples collected so far\n",
+ "Episode 136, 486 samples collected so far\n",
+ "Episode 137, 487 samples collected so far\n",
+ "Episode 138, 488 samples collected so far\n",
+ "Episode 139, 489 samples collected so far\n",
+ "Episode 140, 489 samples collected so far\n",
+ "Episode 141, 502 samples collected so far\n",
+ "Episode 142, 518 samples collected so far\n",
+ "Episode 143, 518 samples collected so far\n",
+ "Episode 144, 520 samples collected so far\n",
+ "Episode 145, 522 samples collected so far\n",
+ "Episode 146, 522 samples collected so far\n",
+ "Episode 147, 522 samples collected so far\n",
+ "Episode 148, 522 samples collected so far\n",
+ "Episode 149, 523 samples collected so far\n",
+ "Episode 150, 523 samples collected so far\n",
+ "Episode 151, 523 samples collected so far\n",
+ "Episode 152, 523 samples collected so far\n",
+ "Episode 153, 524 samples collected so far\n",
+ "Episode 154, 527 samples collected so far\n",
+ "Episode 155, 530 samples collected so far\n",
+ "Episode 156, 530 samples collected so far\n",
+ "Episode 157, 530 samples collected so far\n",
+ "Episode 158, 531 samples collected so far\n",
+ "Episode 159, 539 samples collected so far\n",
+ "Episode 160, 539 samples collected so far\n",
+ "Episode 161, 542 samples collected so far\n",
+ "Episode 162, 543 samples collected so far\n",
+ "Episode 163, 543 samples collected so far\n",
+ "Episode 164, 554 samples collected so far\n",
+ "Episode 165, 556 samples collected so far\n",
+ "Episode 166, 558 samples collected so far\n",
+ "Episode 167, 564 samples collected so far\n",
+ "Episode 168, 565 samples collected so far\n",
+ "Episode 169, 565 samples collected so far\n",
+ "Episode 170, 565 samples collected so far\n",
+ "Episode 171, 577 samples collected so far\n",
+ "Episode 172, 577 samples collected so far\n",
+ "Episode 173, 578 samples collected so far\n",
+ "Episode 174, 578 samples collected so far\n",
+ "Episode 175, 581 samples collected so far\n",
+ "Episode 176, 583 samples collected so far\n",
+ "Episode 177, 583 samples collected so far\n",
+ "Episode 178, 584 samples collected so far\n",
+ "Episode 179, 584 samples collected so far\n",
+ "Episode 180, 585 samples collected so far\n",
+ "Episode 181, 585 samples collected so far\n",
+ "Episode 182, 585 samples collected so far\n",
+ "Episode 183, 586 samples collected so far\n",
+ "Episode 184, 606 samples collected so far\n",
+ "Episode 185, 608 samples collected so far\n",
+ "Episode 186, 609 samples collected so far\n",
+ "Episode 187, 610 samples collected so far\n",
+ "Episode 188, 610 samples collected so far\n",
+ "Episode 189, 610 samples collected so far\n",
+ "Episode 190, 611 samples collected so far\n",
+ "Episode 191, 611 samples collected so far\n",
+ "Episode 192, 613 samples collected so far\n",
+ "Episode 193, 614 samples collected so far\n",
+ "Episode 194, 616 samples collected so far\n",
+ "Episode 195, 617 samples collected so far\n",
+ "Episode 196, 621 samples collected so far\n",
+ "Episode 197, 621 samples collected so far\n",
+ "Episode 198, 624 samples collected so far\n",
+ "Episode 199, 629 samples collected so far\n",
+ "Episode 200, 629 samples collected so far\n",
+ "Episode 201, 629 samples collected so far\n",
+ "Episode 202, 629 samples collected so far\n",
+ "Episode 203, 631 samples collected so far\n",
+ "Episode 204, 631 samples collected so far\n",
+ "Episode 205, 632 samples collected so far\n",
+ "Episode 206, 639 samples collected so far\n",
+ "Episode 207, 640 samples collected so far\n",
+ "Episode 208, 642 samples collected so far\n",
+ "Episode 209, 642 samples collected so far\n",
+ "Episode 210, 650 samples collected so far\n",
+ "Episode 211, 650 samples collected so far\n",
+ "Episode 212, 653 samples collected so far\n",
+ "Episode 213, 655 samples collected so far\n",
+ "Episode 214, 655 samples collected so far\n",
+ "Episode 215, 655 samples collected so far\n",
+ "Episode 216, 655 samples collected so far\n",
+ "Episode 217, 656 samples collected so far\n",
+ "Episode 218, 657 samples collected so far\n",
+ "Episode 219, 657 samples collected so far\n",
+ "Episode 220, 660 samples collected so far\n",
+ "Episode 221, 662 samples collected so far\n",
+ "Episode 222, 663 samples collected so far\n",
+ "Episode 223, 663 samples collected so far\n",
+ "Episode 224, 667 samples collected so far\n",
+ "Episode 225, 667 samples collected so far\n",
+ "Episode 226, 668 samples collected so far\n",
+ "Episode 227, 670 samples collected so far\n",
+ "Episode 228, 670 samples collected so far\n",
+ "Episode 229, 671 samples collected so far\n",
+ "Episode 230, 679 samples collected so far\n",
+ "Episode 231, 679 samples collected so far\n",
+ "Episode 232, 679 samples collected so far\n",
+ "Episode 233, 680 samples collected so far\n",
+ "Episode 234, 687 samples collected so far\n",
+ "Episode 235, 689 samples collected so far\n",
+ "Episode 236, 691 samples collected so far\n",
+ "Episode 237, 699 samples collected so far\n",
+ "Episode 238, 700 samples collected so far\n",
+ "Episode 239, 700 samples collected so far\n",
+ "Episode 240, 700 samples collected so far\n",
+ "Episode 241, 700 samples collected so far\n",
+ "Episode 242, 707 samples collected so far\n",
+ "Episode 243, 708 samples collected so far\n",
+ "Episode 244, 711 samples collected so far\n",
+ "Episode 245, 712 samples collected so far\n",
+ "Episode 246, 712 samples collected so far\n",
+ "Episode 247, 712 samples collected so far\n",
+ "Episode 248, 722 samples collected so far\n",
+ "Episode 249, 722 samples collected so far\n",
+ "Episode 250, 729 samples collected so far\n",
+ "Episode 251, 729 samples collected so far\n",
+ "Episode 252, 729 samples collected so far\n",
+ "Episode 253, 734 samples collected so far\n",
+ "Episode 254, 738 samples collected so far\n",
+ "Episode 255, 739 samples collected so far\n",
+ "Episode 256, 741 samples collected so far\n",
+ "Episode 257, 741 samples collected so far\n",
+ "Episode 258, 741 samples collected so far\n",
+ "Episode 259, 741 samples collected so far\n",
+ "Episode 260, 741 samples collected so far\n",
+ "Episode 261, 741 samples collected so far\n",
+ "Episode 262, 741 samples collected so far\n",
+ "Episode 263, 743 samples collected so far\n",
+ "Episode 264, 743 samples collected so far\n",
+ "Episode 265, 744 samples collected so far\n",
+ "Episode 266, 749 samples collected so far\n",
+ "Episode 267, 751 samples collected so far\n",
+ "Episode 268, 753 samples collected so far\n",
+ "Episode 269, 753 samples collected so far\n",
+ "Episode 270, 753 samples collected so far\n",
+ "Episode 271, 754 samples collected so far\n",
+ "Episode 272, 754 samples collected so far\n",
+ "Episode 273, 756 samples collected so far\n",
+ "Episode 274, 756 samples collected so far\n",
+ "Episode 275, 756 samples collected so far\n",
+ "Episode 276, 756 samples collected so far\n",
+ "Episode 277, 757 samples collected so far\n",
+ "Episode 278, 757 samples collected so far\n",
+ "Episode 279, 759 samples collected so far\n",
+ "Episode 280, 759 samples collected so far\n",
+ "Episode 281, 760 samples collected so far\n",
+ "Episode 282, 763 samples collected so far\n",
+ "Episode 283, 771 samples collected so far\n",
+ "Episode 284, 772 samples collected so far\n",
+ "Episode 285, 772 samples collected so far\n",
+ "Episode 286, 772 samples collected so far\n",
+ "Episode 287, 773 samples collected so far\n",
+ "Episode 288, 774 samples collected so far\n",
+ "Episode 289, 781 samples collected so far\n",
+ "Episode 290, 784 samples collected so far\n",
+ "Episode 291, 805 samples collected so far\n",
+ "Episode 292, 805 samples collected so far\n",
+ "Episode 293, 806 samples collected so far\n",
+ "Episode 294, 813 samples collected so far\n",
+ "Episode 295, 815 samples collected so far\n",
+ "Episode 296, 816 samples collected so far\n",
+ "Episode 297, 818 samples collected so far\n",
+ "Episode 298, 818 samples collected so far\n",
+ "Episode 299, 818 samples collected so far\n",
+ "Episode 300, 818 samples collected so far\n",
+ "Episode 301, 818 samples collected so far\n",
+ "Episode 302, 819 samples collected so far\n",
+ "Episode 303, 821 samples collected so far\n",
+ "Episode 304, 822 samples collected so far\n",
+ "Episode 305, 822 samples collected so far\n",
+ "Episode 306, 822 samples collected so far\n",
+ "Episode 307, 825 samples collected so far\n",
+ "Episode 308, 825 samples collected so far\n",
+ "Episode 309, 825 samples collected so far\n",
+ "Episode 310, 826 samples collected so far\n",
+ "Episode 311, 829 samples collected so far\n",
+ "Episode 312, 831 samples collected so far\n",
+ "Episode 313, 831 samples collected so far\n",
+ "Episode 314, 833 samples collected so far\n",
+ "Episode 315, 836 samples collected so far\n",
+ "Episode 316, 836 samples collected so far\n",
+ "Episode 317, 837 samples collected so far\n",
+ "Episode 318, 837 samples collected so far\n",
+ "Episode 319, 837 samples collected so far\n",
+ "Episode 320, 837 samples collected so far\n",
+ "Episode 321, 837 samples collected so far\n",
+ "Episode 322, 841 samples collected so far\n",
+ "Episode 323, 865 samples collected so far\n",
+ "Episode 324, 877 samples collected so far\n",
+ "Episode 325, 881 samples collected so far\n",
+ "Episode 326, 881 samples collected so far\n",
+ "Episode 327, 881 samples collected so far\n",
+ "Episode 328, 882 samples collected so far\n",
+ "Episode 329, 882 samples collected so far\n",
+ "Episode 330, 882 samples collected so far\n",
+ "Episode 331, 886 samples collected so far\n",
+ "Episode 332, 893 samples collected so far\n",
+ "Episode 333, 897 samples collected so far\n",
+ "Episode 334, 897 samples collected so far\n",
+ "Episode 335, 897 samples collected so far\n",
+ "Episode 336, 901 samples collected so far\n",
+ "Episode 337, 906 samples collected so far\n",
+ "Episode 338, 907 samples collected so far\n",
+ "Episode 339, 907 samples collected so far\n",
+ "Episode 340, 907 samples collected so far\n",
+ "Episode 341, 907 samples collected so far\n",
+ "Episode 342, 910 samples collected so far\n",
+ "Episode 343, 910 samples collected so far\n",
+ "Episode 344, 910 samples collected so far\n",
+ "Episode 345, 911 samples collected so far\n",
+ "Episode 346, 911 samples collected so far\n",
+ "Episode 347, 913 samples collected so far\n",
+ "Episode 348, 917 samples collected so far\n",
+ "Episode 349, 917 samples collected so far\n",
+ "Episode 350, 917 samples collected so far\n",
+ "Episode 351, 919 samples collected so far\n",
+ "Episode 352, 919 samples collected so far\n",
+ "Episode 353, 923 samples collected so far\n",
+ "Episode 354, 927 samples collected so far\n",
+ "Episode 355, 928 samples collected so far\n",
+ "Episode 356, 928 samples collected so far\n",
+ "Episode 357, 933 samples collected so far\n",
+ "Episode 358, 938 samples collected so far\n",
+ "Episode 359, 944 samples collected so far\n",
+ "Episode 360, 946 samples collected so far\n",
+ "Episode 361, 949 samples collected so far\n",
+ "Episode 362, 953 samples collected so far\n",
+ "Episode 363, 954 samples collected so far\n",
+ "Episode 364, 957 samples collected so far\n",
+ "Episode 365, 960 samples collected so far\n",
+ "Episode 366, 962 samples collected so far\n",
+ "Episode 367, 968 samples collected so far\n",
+ "Episode 368, 968 samples collected so far\n",
+ "Episode 369, 968 samples collected so far\n",
+ "Episode 370, 975 samples collected so far\n",
+ "Episode 371, 975 samples collected so far\n",
+ "Episode 372, 975 samples collected so far\n",
+ "Episode 373, 976 samples collected so far\n",
+ "Episode 374, 977 samples collected so far\n",
+ "Episode 375, 979 samples collected so far\n",
+ "Episode 376, 979 samples collected so far\n",
+ "Episode 377, 979 samples collected so far\n",
+ "Episode 378, 980 samples collected so far\n",
+ "Episode 379, 980 samples collected so far\n",
+ "Episode 380, 980 samples collected so far\n",
+ "Episode 381, 980 samples collected so far\n",
+ "Episode 382, 980 samples collected so far\n",
+ "Episode 383, 981 samples collected so far\n",
+ "Episode 384, 983 samples collected so far\n",
+ "Episode 385, 991 samples collected so far\n",
+ "Episode 386, 998 samples collected so far\n",
+ "Episode 387, 998 samples collected so far\n",
+ "Episode 388, 1000 samples collected so far\n"
+ ]
+ }
+ ],
+ "source": [
+ "episode_counter, sample_counter = 0, 0\n",
+ "Path(\"samples/\").mkdir(exist_ok=True)\n",
+ "\n",
+ "# We will solve problems (run episodes) until we have saved enough samples\n",
+ "while sample_counter < DATA_MAX_SAMPLES:\n",
+ " episode_counter += 1\n",
+ "\n",
+ " observation, action_set, _, done, _ = env.reset(next(instances))\n",
+ " while not done:\n",
+ " (scores, scores_are_expert), node_observation = observation\n",
+ " action = action_set[scores[action_set].argmax()]\n",
+ "\n",
+ " # Only save samples if they are coming from the expert (strong branching)\n",
+ " if scores_are_expert and (sample_counter < DATA_MAX_SAMPLES):\n",
+ " sample_counter += 1\n",
+ " data = [node_observation, action, action_set, scores]\n",
+ " filename = f\"samples/sample_{sample_counter}.pkl\"\n",
+ "\n",
+ " with gzip.open(filename, \"wb\") as f:\n",
+ " pickle.dump(data, f)\n",
+ "\n",
+ " observation, action_set, _, done, _ = env.step(action)\n",
+ "\n",
+ " print(f\"Episode {episode_counter}, {sample_counter} samples collected so far\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 2. Train a GNN\n",
+ "\n",
+ "Our next step is to train a GNN classifier on these collected samples to predict similar choices to strong branching."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We will first define pytorch geometric data classes to handle the bipartite graph data."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class BipartiteNodeData(torch_geometric.data.Data):\n",
+ " \"\"\"\n",
+ " This class encode a node bipartite graph observation as returned by the `ecole.observation.NodeBipartite`\n",
+ " observation function in a format understood by the pytorch geometric data handlers.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(\n",
+ " self,\n",
+ " constraint_features,\n",
+ " edge_indices,\n",
+ " edge_features,\n",
+ " variable_features,\n",
+ " candidates,\n",
+ " nb_candidates,\n",
+ " candidate_choice,\n",
+ " candidate_scores,\n",
+ " ):\n",
+ " super().__init__()\n",
+ " self.constraint_features = constraint_features\n",
+ " self.edge_index = edge_indices\n",
+ " self.edge_attr = edge_features\n",
+ " self.variable_features = variable_features\n",
+ " self.candidates = candidates\n",
+ " self.nb_candidates = nb_candidates\n",
+ " self.candidate_choices = candidate_choice\n",
+ " self.candidate_scores = candidate_scores\n",
+ "\n",
+ " def __inc__(self, key, value, store, *args, **kwargs):\n",
+ " \"\"\"\n",
+ " We overload the pytorch geometric method that tells how to increment indices when concatenating graphs\n",
+ " for those entries (edge index, candidates) for which this is not obvious.\n",
+ " \"\"\"\n",
+ " if key == \"edge_index\":\n",
+ " return torch.tensor(\n",
+ " [[self.constraint_features.size(0)], [self.variable_features.size(0)]]\n",
+ " )\n",
+ " elif key == \"candidates\":\n",
+ " return self.variable_features.size(0)\n",
+ " else:\n",
+ " return super().__inc__(key, value, *args, **kwargs)\n",
+ "\n",
+ "\n",
+ "class GraphDataset(torch_geometric.data.Dataset):\n",
+ " \"\"\"\n",
+ " This class encodes a collection of graphs, as well as a method to load such graphs from the disk.\n",
+ " It can be used in turn by the data loaders provided by pytorch geometric.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, sample_files):\n",
+ " super().__init__(root=None, transform=None, pre_transform=None)\n",
+ " self.sample_files = sample_files\n",
+ "\n",
+ " def len(self):\n",
+ " return len(self.sample_files)\n",
+ "\n",
+ " def get(self, index):\n",
+ " \"\"\"\n",
+ " This method loads a node bipartite graph observation as saved on the disk during data collection.\n",
+ " \"\"\"\n",
+ " with gzip.open(self.sample_files[index], \"rb\") as f:\n",
+ " sample = pickle.load(f)\n",
+ "\n",
+ " sample_observation, sample_action, sample_action_set, sample_scores = sample\n",
+ " \n",
+ " constraint_features = sample_observation.row_features\n",
+ " edge_indices = sample_observation.edge_features.indices.astype(np.int32)\n",
+ " edge_features = np.expand_dims(sample_observation.edge_features.values, axis=-1)\n",
+ " variable_features = sample_observation.variable_features\n",
+ "\n",
+ " # We note on which variables we were allowed to branch, the scores as well as the choice\n",
+ " # taken by strong branching (relative to the candidates)\n",
+ " candidates = np.array(sample_action_set, dtype=np.int32)\n",
+ " candidate_scores = np.array([sample_scores[j] for j in candidates])\n",
+ " candidate_choice = np.where(candidates == sample_action)[0][0]\n",
+ "\n",
+ " graph = BipartiteNodeData(\n",
+ " torch.FloatTensor(constraint_features),\n",
+ " torch.LongTensor(edge_indices),\n",
+ " torch.FloatTensor(edge_features),\n",
+ " torch.FloatTensor(variable_features),\n",
+ " torch.LongTensor(candidates),\n",
+ " len(candidates),\n",
+ " torch.LongTensor([candidate_choice]),\n",
+ " torch.FloatTensor(candidate_scores)\n",
+ " )\n",
+ "\n",
+ " # We must tell pytorch geometric how many nodes there are, for indexing purposes\n",
+ " graph.num_nodes = constraint_features.shape[0] + variable_features.shape[0]\n",
+ "\n",
+ " return graph"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can then prepare the data loaders."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "sample_files = [str(path) for path in Path(\"samples/\").glob(\"sample_*.pkl\")]\n",
+ "train_files = sample_files[: int(0.8 * len(sample_files))]\n",
+ "valid_files = sample_files[int(0.8 * len(sample_files)) :]\n",
+ "\n",
+ "train_data = GraphDataset(train_files)\n",
+ "train_loader = torch_geometric.loader.DataLoader(train_data, batch_size=32, shuffle=True)\n",
+ "valid_data = GraphDataset(valid_files)\n",
+ "valid_loader = torch_geometric.loader.DataLoader(valid_data, batch_size=128, shuffle=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Next, we will define our graph neural network architecture."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "class GNNPolicy(torch.nn.Module):\n",
+ " def __init__(self):\n",
+ " super().__init__()\n",
+ " emb_size = 64\n",
+ " cons_nfeats = 5\n",
+ " edge_nfeats = 1\n",
+ " var_nfeats = 19\n",
+ "\n",
+ " # CONSTRAINT EMBEDDING\n",
+ " self.cons_embedding = torch.nn.Sequential(\n",
+ " torch.nn.LayerNorm(cons_nfeats),\n",
+ " torch.nn.Linear(cons_nfeats, emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " torch.nn.Linear(emb_size, emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " )\n",
+ "\n",
+ " # EDGE EMBEDDING\n",
+ " self.edge_embedding = torch.nn.Sequential(\n",
+ " torch.nn.LayerNorm(edge_nfeats),\n",
+ " )\n",
+ "\n",
+ " # VARIABLE EMBEDDING\n",
+ " self.var_embedding = torch.nn.Sequential(\n",
+ " torch.nn.LayerNorm(var_nfeats),\n",
+ " torch.nn.Linear(var_nfeats, emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " torch.nn.Linear(emb_size, emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " )\n",
+ "\n",
+ " self.conv_v_to_c = BipartiteGraphConvolution()\n",
+ " self.conv_c_to_v = BipartiteGraphConvolution()\n",
+ "\n",
+ " self.output_module = torch.nn.Sequential(\n",
+ " torch.nn.Linear(emb_size, emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " torch.nn.Linear(emb_size, 1, bias=False),\n",
+ " )\n",
+ "\n",
+ " def forward(\n",
+ " self, constraint_features, edge_indices, edge_features, variable_features\n",
+ " ):\n",
+ " reversed_edge_indices = torch.stack([edge_indices[1], edge_indices[0]], dim=0)\n",
+ "\n",
+ " # First step: linear embedding layers to a common dimension (64)\n",
+ " constraint_features = self.cons_embedding(constraint_features)\n",
+ " edge_features = self.edge_embedding(edge_features)\n",
+ " variable_features = self.var_embedding(variable_features)\n",
+ "\n",
+ " # Two half convolutions\n",
+ " constraint_features = self.conv_v_to_c(\n",
+ " variable_features, reversed_edge_indices, edge_features, constraint_features\n",
+ " )\n",
+ " variable_features = self.conv_c_to_v(\n",
+ " constraint_features, edge_indices, edge_features, variable_features\n",
+ " )\n",
+ "\n",
+ " # A final MLP on the variable features\n",
+ " output = self.output_module(variable_features).squeeze(-1)\n",
+ " return output\n",
+ "\n",
+ "\n",
+ "class BipartiteGraphConvolution(torch_geometric.nn.MessagePassing):\n",
+ " \"\"\"\n",
+ " The bipartite graph convolution is already provided by pytorch geometric and we merely need\n",
+ " to provide the exact form of the messages being passed.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self):\n",
+ " super().__init__(\"add\")\n",
+ " emb_size = 64\n",
+ "\n",
+ " self.feature_module_left = torch.nn.Sequential(\n",
+ " torch.nn.Linear(emb_size, emb_size)\n",
+ " )\n",
+ " self.feature_module_edge = torch.nn.Sequential(\n",
+ " torch.nn.Linear(1, emb_size, bias=False)\n",
+ " )\n",
+ " self.feature_module_right = torch.nn.Sequential(\n",
+ " torch.nn.Linear(emb_size, emb_size, bias=False)\n",
+ " )\n",
+ " self.feature_module_final = torch.nn.Sequential(\n",
+ " torch.nn.LayerNorm(emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " torch.nn.Linear(emb_size, emb_size),\n",
+ " )\n",
+ "\n",
+ " self.post_conv_module = torch.nn.Sequential(torch.nn.LayerNorm(emb_size))\n",
+ "\n",
+ " # output_layers\n",
+ " self.output_module = torch.nn.Sequential(\n",
+ " torch.nn.Linear(2 * emb_size, emb_size),\n",
+ " torch.nn.ReLU(),\n",
+ " torch.nn.Linear(emb_size, emb_size),\n",
+ " )\n",
+ "\n",
+ " def forward(self, left_features, edge_indices, edge_features, right_features):\n",
+ " \"\"\"\n",
+ " This method sends the messages, computed in the message method.\n",
+ " \"\"\"\n",
+ " output = self.propagate(\n",
+ " edge_indices,\n",
+ " size=(left_features.shape[0], right_features.shape[0]),\n",
+ " node_features=(left_features, right_features),\n",
+ " edge_features=edge_features,\n",
+ " )\n",
+ " return self.output_module(\n",
+ " torch.cat([self.post_conv_module(output), right_features], dim=-1)\n",
+ " )\n",
+ "\n",
+ " def message(self, node_features_i, node_features_j, edge_features):\n",
+ " output = self.feature_module_final(\n",
+ " self.feature_module_left(node_features_i)\n",
+ " + self.feature_module_edge(edge_features)\n",
+ " + self.feature_module_right(node_features_j)\n",
+ " )\n",
+ " return output\n",
+ "\n",
+ "\n",
+ "policy = GNNPolicy().to(DEVICE)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "With this model we can predict a probability distribution over actions as follows."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([0.0155, 0.0153, 0.0153, 0.0154, 0.0153, 0.0153, 0.0154, 0.0154, 0.0153,\n",
+ " 0.0154, 0.0155, 0.0153, 0.0154, 0.0153, 0.0154, 0.0154, 0.0153, 0.0153,\n",
+ " 0.0154, 0.0154, 0.0154, 0.0153, 0.0154, 0.0154, 0.0154, 0.0153, 0.0153,\n",
+ " 0.0154, 0.0154, 0.0153, 0.0153, 0.0154, 0.0154, 0.0155, 0.0153, 0.0154,\n",
+ " 0.0153, 0.0153, 0.0154, 0.0154, 0.0153, 0.0154, 0.0154, 0.0154, 0.0154,\n",
+ " 0.0154, 0.0153, 0.0153, 0.0154, 0.0154, 0.0154, 0.0155, 0.0154, 0.0154,\n",
+ " 0.0154, 0.0154, 0.0154, 0.0154, 0.0154, 0.0154, 0.0154, 0.0154, 0.0154,\n",
+ " 0.0154, 0.0154], device='cuda:0', grad_fn=)\n"
+ ]
+ }
+ ],
+ "source": [
+ "observation = train_data[0].to(DEVICE)\n",
+ "\n",
+ "logits = policy(\n",
+ " observation.constraint_features,\n",
+ " observation.edge_index,\n",
+ " observation.edge_attr,\n",
+ " observation.variable_features,\n",
+ ")\n",
+ "action_distribution = F.softmax(logits[observation.candidates], dim=-1)\n",
+ "\n",
+ "print(action_distribution)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "As can be seen, with randomly initialized weights, the initial distributions tend to be close to uniform.\n",
+ "Next, we will define two helper functions: one to train or evaluate the model on a whole epoch and compute metrics for monitoring, and one for padding tensors when doing predictions on a batch of graphs of potentially different number of variables."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def process(policy, data_loader, optimizer=None):\n",
+ " \"\"\"\n",
+ " This function will process a whole epoch of training or validation, depending on whether an optimizer is provided.\n",
+ " \"\"\"\n",
+ " mean_loss = 0\n",
+ " mean_acc = 0\n",
+ "\n",
+ " n_samples_processed = 0\n",
+ " with torch.set_grad_enabled(optimizer is not None):\n",
+ " for batch in data_loader:\n",
+ " batch = batch.to(DEVICE)\n",
+ " # Compute the logits (i.e. pre-softmax activations) according to the policy on the concatenated graphs\n",
+ " logits = policy(\n",
+ " batch.constraint_features,\n",
+ " batch.edge_index,\n",
+ " batch.edge_attr,\n",
+ " batch.variable_features,\n",
+ " )\n",
+ " # Index the results by the candidates, and split and pad them\n",
+ " logits = pad_tensor(logits[batch.candidates], batch.nb_candidates)\n",
+ " # Compute the usual cross-entropy classification loss\n",
+ " loss = F.cross_entropy(logits, batch.candidate_choices)\n",
+ "\n",
+ " if optimizer is not None:\n",
+ " optimizer.zero_grad()\n",
+ " loss.backward()\n",
+ " optimizer.step()\n",
+ "\n",
+ " true_scores = pad_tensor(batch.candidate_scores, batch.nb_candidates)\n",
+ " true_bestscore = true_scores.max(dim=-1, keepdims=True).values\n",
+ "\n",
+ " predicted_bestindex = logits.max(dim=-1, keepdims=True).indices\n",
+ " accuracy = (\n",
+ " (true_scores.gather(-1, predicted_bestindex) == true_bestscore)\n",
+ " .float()\n",
+ " .mean()\n",
+ " .item()\n",
+ " )\n",
+ "\n",
+ " mean_loss += loss.item() * batch.num_graphs\n",
+ " mean_acc += accuracy * batch.num_graphs\n",
+ " n_samples_processed += batch.num_graphs\n",
+ "\n",
+ " mean_loss /= n_samples_processed\n",
+ " mean_acc /= n_samples_processed\n",
+ " return mean_loss, mean_acc\n",
+ "\n",
+ "\n",
+ "def pad_tensor(input_, pad_sizes, pad_value=-1e8):\n",
+ " \"\"\"\n",
+ " This utility function splits a tensor and pads each split to make them all the same size, then stacks them.\n",
+ " \"\"\"\n",
+ " max_pad_size = pad_sizes.max()\n",
+ " output = input_.split(pad_sizes.cpu().numpy().tolist())\n",
+ " output = torch.stack(\n",
+ " [\n",
+ " F.pad(slice_, (0, max_pad_size - slice_.size(0)), \"constant\", pad_value)\n",
+ " for slice_ in output\n",
+ " ],\n",
+ " dim=0,\n",
+ " )\n",
+ " return output"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "After this, we can actually create the model and train it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Epoch 1\n",
+ "Train loss: 4.116, accuracy 0.294\n",
+ "Valid loss: 3.767, accuracy 0.305\n",
+ "Epoch 2\n",
+ "Train loss: 3.720, accuracy 0.399\n",
+ "Valid loss: 3.390, accuracy 0.440\n",
+ "Epoch 3\n",
+ "Train loss: 3.485, accuracy 0.465\n",
+ "Valid loss: 3.275, accuracy 0.430\n",
+ "Epoch 4\n",
+ "Train loss: 3.490, accuracy 0.469\n",
+ "Valid loss: 3.289, accuracy 0.385\n",
+ "Epoch 5\n",
+ "Train loss: 3.489, accuracy 0.484\n",
+ "Valid loss: 3.275, accuracy 0.435\n",
+ "Epoch 6\n",
+ "Train loss: 3.447, accuracy 0.505\n",
+ "Valid loss: 3.241, accuracy 0.445\n",
+ "Epoch 7\n",
+ "Train loss: 3.427, accuracy 0.501\n",
+ "Valid loss: 3.239, accuracy 0.430\n",
+ "Epoch 8\n",
+ "Train loss: 3.441, accuracy 0.500\n",
+ "Valid loss: 3.222, accuracy 0.405\n",
+ "Epoch 9\n",
+ "Train loss: 3.424, accuracy 0.502\n",
+ "Valid loss: 3.227, accuracy 0.465\n",
+ "Epoch 10\n",
+ "Train loss: 3.420, accuracy 0.491\n",
+ "Valid loss: 3.283, accuracy 0.410\n",
+ "Epoch 11\n",
+ "Train loss: 3.418, accuracy 0.486\n",
+ "Valid loss: 3.206, accuracy 0.440\n",
+ "Epoch 12\n",
+ "Train loss: 3.399, accuracy 0.497\n",
+ "Valid loss: 3.174, accuracy 0.455\n",
+ "Epoch 13\n",
+ "Train loss: 3.383, accuracy 0.506\n",
+ "Valid loss: 3.245, accuracy 0.440\n",
+ "Epoch 14\n",
+ "Train loss: 3.391, accuracy 0.512\n",
+ "Valid loss: 3.182, accuracy 0.435\n",
+ "Epoch 15\n",
+ "Train loss: 3.370, accuracy 0.517\n",
+ "Valid loss: 3.183, accuracy 0.460\n",
+ "Epoch 16\n",
+ "Train loss: 3.381, accuracy 0.507\n",
+ "Valid loss: 3.206, accuracy 0.425\n",
+ "Epoch 17\n",
+ "Train loss: 3.336, accuracy 0.520\n",
+ "Valid loss: 3.149, accuracy 0.455\n",
+ "Epoch 18\n",
+ "Train loss: 3.342, accuracy 0.517\n",
+ "Valid loss: 3.183, accuracy 0.450\n",
+ "Epoch 19\n",
+ "Train loss: 3.363, accuracy 0.525\n",
+ "Valid loss: 3.144, accuracy 0.450\n",
+ "Epoch 20\n",
+ "Train loss: 3.322, accuracy 0.514\n",
+ "Valid loss: 3.182, accuracy 0.455\n",
+ "Epoch 21\n",
+ "Train loss: 3.316, accuracy 0.526\n",
+ "Valid loss: 3.131, accuracy 0.475\n",
+ "Epoch 22\n",
+ "Train loss: 3.283, accuracy 0.522\n",
+ "Valid loss: 3.130, accuracy 0.465\n",
+ "Epoch 23\n",
+ "Train loss: 3.232, accuracy 0.552\n",
+ "Valid loss: 3.119, accuracy 0.430\n",
+ "Epoch 24\n",
+ "Train loss: 3.221, accuracy 0.529\n",
+ "Valid loss: 3.147, accuracy 0.465\n",
+ "Epoch 25\n",
+ "Train loss: 3.302, accuracy 0.520\n",
+ "Valid loss: 3.244, accuracy 0.435\n",
+ "Epoch 26\n",
+ "Train loss: 3.248, accuracy 0.511\n",
+ "Valid loss: 3.058, accuracy 0.475\n",
+ "Epoch 27\n",
+ "Train loss: 3.145, accuracy 0.530\n",
+ "Valid loss: 3.075, accuracy 0.455\n",
+ "Epoch 28\n",
+ "Train loss: 3.136, accuracy 0.524\n",
+ "Valid loss: 2.994, accuracy 0.450\n",
+ "Epoch 29\n",
+ "Train loss: 3.123, accuracy 0.530\n",
+ "Valid loss: 2.982, accuracy 0.495\n",
+ "Epoch 30\n",
+ "Train loss: 3.098, accuracy 0.550\n",
+ "Valid loss: 3.010, accuracy 0.470\n",
+ "Epoch 31\n",
+ "Train loss: 3.066, accuracy 0.546\n",
+ "Valid loss: 3.024, accuracy 0.485\n",
+ "Epoch 32\n",
+ "Train loss: 3.088, accuracy 0.527\n",
+ "Valid loss: 3.022, accuracy 0.460\n",
+ "Epoch 33\n",
+ "Train loss: 3.119, accuracy 0.540\n",
+ "Valid loss: 3.110, accuracy 0.425\n",
+ "Epoch 34\n",
+ "Train loss: 3.070, accuracy 0.527\n",
+ "Valid loss: 3.070, accuracy 0.440\n",
+ "Epoch 35\n",
+ "Train loss: 3.054, accuracy 0.531\n",
+ "Valid loss: 3.034, accuracy 0.445\n",
+ "Epoch 36\n",
+ "Train loss: 3.045, accuracy 0.544\n",
+ "Valid loss: 3.073, accuracy 0.435\n",
+ "Epoch 37\n",
+ "Train loss: 3.036, accuracy 0.535\n",
+ "Valid loss: 3.001, accuracy 0.470\n",
+ "Epoch 38\n",
+ "Train loss: 3.052, accuracy 0.539\n",
+ "Valid loss: 3.097, accuracy 0.435\n",
+ "Epoch 39\n",
+ "Train loss: 3.110, accuracy 0.529\n",
+ "Valid loss: 2.977, accuracy 0.435\n",
+ "Epoch 40\n",
+ "Train loss: 3.071, accuracy 0.537\n",
+ "Valid loss: 3.034, accuracy 0.450\n",
+ "Epoch 41\n",
+ "Train loss: 3.064, accuracy 0.545\n",
+ "Valid loss: 3.006, accuracy 0.485\n",
+ "Epoch 42\n",
+ "Train loss: 3.002, accuracy 0.557\n",
+ "Valid loss: 3.027, accuracy 0.410\n",
+ "Epoch 43\n",
+ "Train loss: 3.000, accuracy 0.540\n",
+ "Valid loss: 3.044, accuracy 0.430\n",
+ "Epoch 44\n",
+ "Train loss: 3.005, accuracy 0.531\n",
+ "Valid loss: 3.032, accuracy 0.465\n",
+ "Epoch 45\n",
+ "Train loss: 3.019, accuracy 0.552\n",
+ "Valid loss: 3.055, accuracy 0.415\n",
+ "Epoch 46\n",
+ "Train loss: 2.972, accuracy 0.531\n",
+ "Valid loss: 3.039, accuracy 0.440\n",
+ "Epoch 47\n",
+ "Train loss: 3.006, accuracy 0.536\n",
+ "Valid loss: 3.016, accuracy 0.445\n",
+ "Epoch 48\n",
+ "Train loss: 3.008, accuracy 0.531\n",
+ "Valid loss: 3.041, accuracy 0.440\n",
+ "Epoch 49\n",
+ "Train loss: 3.006, accuracy 0.545\n",
+ "Valid loss: 3.002, accuracy 0.450\n",
+ "Epoch 50\n",
+ "Train loss: 2.946, accuracy 0.556\n",
+ "Valid loss: 3.030, accuracy 0.450\n"
+ ]
+ }
+ ],
+ "source": [
+ "optimizer = torch.optim.Adam(policy.parameters(), lr=LEARNING_RATE)\n",
+ "for epoch in range(NB_EPOCHS):\n",
+ " print(f\"Epoch {epoch+1}\")\n",
+ "\n",
+ " train_loss, train_acc = process(policy, train_loader, optimizer)\n",
+ " print(f\"Train loss: {train_loss:0.3f}, accuracy {train_acc:0.3f}\")\n",
+ "\n",
+ " valid_loss, valid_acc = process(policy, valid_loader, None)\n",
+ " print(f\"Valid loss: {valid_loss:0.3f}, accuracy {valid_acc:0.3f}\")\n",
+ "\n",
+ "torch.save(policy.state_dict(), \"trained_params.pkl\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# 3 Evaluation\n",
+ "\n",
+ "Finally, we can evaluate the performance of the model. We first define appropriate environments. For benchmarking purposes, we include a trivial environment that merely runs SCIP."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "scip_parameters = {\n",
+ " \"separating/maxrounds\": 0,\n",
+ " \"presolving/maxrestarts\": 0,\n",
+ " \"limits/time\": 3600,\n",
+ "}\n",
+ "env = ecole.environment.Branching(\n",
+ " observation_function=ecole.observation.NodeBipartite(),\n",
+ " information_function={\n",
+ " \"nb_nodes\": ecole.reward.NNodes(),\n",
+ " \"time\": ecole.reward.SolvingTime(),\n",
+ " },\n",
+ " scip_params=scip_parameters,\n",
+ ")\n",
+ "default_env = ecole.environment.Configuring(\n",
+ " observation_function=None,\n",
+ " information_function={\n",
+ " \"nb_nodes\": ecole.reward.NNodes(),\n",
+ " \"time\": ecole.reward.SolvingTime(),\n",
+ " },\n",
+ " scip_params=scip_parameters,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Then we can simply follow the environments, taking steps appropriately according to the GNN policy."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Instance 0 | SCIP nb nodes 97 | SCIP time 4.98 \n",
+ " | GNN nb nodes 466 | GNN time 4.19 \n",
+ " | Gain -380.41% | Gain 15.80%\n",
+ "Instance 1 | SCIP nb nodes 13 | SCIP time 2.54 \n",
+ " | GNN nb nodes 85 | GNN time 1.76 \n",
+ " | Gain -553.85% | Gain 30.84%\n",
+ "Instance 2 | SCIP nb nodes 11 | SCIP time 2.79 \n",
+ " | GNN nb nodes 154 | GNN time 2.41 \n",
+ " | Gain -1300.00% | Gain 13.60%\n",
+ "Instance 3 | SCIP nb nodes 1 | SCIP time 1.67 \n",
+ " | GNN nb nodes 12 | GNN time 1.19 \n",
+ " | Gain -1100.00% | Gain 28.74%\n",
+ "Instance 4 | SCIP nb nodes 23 | SCIP time 3.73 \n",
+ " | GNN nb nodes 195 | GNN time 2.46 \n",
+ " | Gain -747.83% | Gain 34.12%\n",
+ "Instance 5 | SCIP nb nodes 3 | SCIP time 1.92 \n",
+ " | GNN nb nodes 31 | GNN time 1.39 \n",
+ " | Gain -933.33% | Gain 27.76%\n",
+ "Instance 6 | SCIP nb nodes 1 | SCIP time 1.30 \n",
+ " | GNN nb nodes 1 | GNN time 1.47 \n",
+ " | Gain 0.00% | Gain -13.20%\n",
+ "Instance 7 | SCIP nb nodes 1 | SCIP time 1.32 \n",
+ " | GNN nb nodes 1 | GNN time 1.38 \n",
+ " | Gain 0.00% | Gain -3.89%\n",
+ "Instance 8 | SCIP nb nodes 3 | SCIP time 2.31 \n",
+ " | GNN nb nodes 29 | GNN time 1.56 \n",
+ " | Gain -866.67% | Gain 32.55%\n",
+ "Instance 9 | SCIP nb nodes 3 | SCIP time 2.04 \n",
+ " | GNN nb nodes 25 | GNN time 1.50 \n",
+ " | Gain -733.33% | Gain 26.25%\n",
+ "Instance 10 | SCIP nb nodes 7 | SCIP time 1.59 \n",
+ " | GNN nb nodes 75 | GNN time 1.52 \n",
+ " | Gain -971.43% | Gain 4.56%\n",
+ "Instance 11 | SCIP nb nodes 11 | SCIP time 2.76 \n",
+ " | GNN nb nodes 93 | GNN time 2.37 \n",
+ " | Gain -745.45% | Gain 14.02%\n",
+ "Instance 12 | SCIP nb nodes 73 | SCIP time 3.58 \n",
+ " | GNN nb nodes 185 | GNN time 2.59 \n",
+ " | Gain -153.42% | Gain 27.68%\n",
+ "Instance 13 | SCIP nb nodes 1 | SCIP time 0.03 \n",
+ " | GNN nb nodes 1 | GNN time 0.03 \n",
+ " | Gain 0.00% | Gain 1.69%\n",
+ "Instance 14 | SCIP nb nodes 3 | SCIP time 1.55 \n",
+ " | GNN nb nodes 21 | GNN time 1.38 \n",
+ " | Gain -600.00% | Gain 11.01%\n",
+ "Instance 15 | SCIP nb nodes 7 | SCIP time 2.31 \n",
+ " | GNN nb nodes 51 | GNN time 1.65 \n",
+ " | Gain -628.57% | Gain 28.30%\n",
+ "Instance 16 | SCIP nb nodes 3 | SCIP time 1.59 \n",
+ " | GNN nb nodes 13 | GNN time 1.47 \n",
+ " | Gain -333.33% | Gain 7.35%\n",
+ "Instance 17 | SCIP nb nodes 1 | SCIP time 1.10 \n",
+ " | GNN nb nodes 5 | GNN time 1.16 \n",
+ " | Gain -400.00% | Gain -5.15%\n",
+ "Instance 18 | SCIP nb nodes 15 | SCIP time 2.18 \n",
+ " | GNN nb nodes 153 | GNN time 1.74 \n",
+ " | Gain -920.00% | Gain 20.00%\n",
+ "Instance 19 | SCIP nb nodes 13 | SCIP time 3.26 \n",
+ " | GNN nb nodes 61 | GNN time 2.54 \n",
+ " | Gain -369.23% | Gain 22.27%\n"
+ ]
+ }
+ ],
+ "source": [
+ "instances = ecole.instance.SetCoverGenerator(n_rows=500, n_cols=1000, density=0.05)\n",
+ "for instance_count, instance in zip(range(NB_EVAL_INSTANCES), instances):\n",
+ " # Run the GNN brancher\n",
+ " nb_nodes, time = 0, 0\n",
+ " observation, action_set, _, done, info = env.reset(instance)\n",
+ " nb_nodes += info[\"nb_nodes\"]\n",
+ " time += info[\"time\"]\n",
+ " while not done:\n",
+ " with torch.no_grad():\n",
+ " observation = (\n",
+ " torch.from_numpy(observation.row_features.astype(np.float32)).to(DEVICE),\n",
+ " torch.from_numpy(observation.edge_features.indices.astype(np.int64)).to(DEVICE),\n",
+ " torch.from_numpy(observation.edge_features.values.astype(np.float32)).view(-1, 1).to(DEVICE),\n",
+ " torch.from_numpy(observation.variable_features.astype(np.float32)).to(DEVICE),\n",
+ " )\n",
+ " logits = policy(*observation)\n",
+ " action = action_set[logits[action_set.astype(np.int64)].argmax()]\n",
+ " observation, action_set, _, done, info = env.step(action)\n",
+ " nb_nodes += info[\"nb_nodes\"]\n",
+ " time += info[\"time\"]\n",
+ "\n",
+ " # Run SCIP's default brancher\n",
+ " default_env.reset(instance)\n",
+ " _, _, _, _, default_info = default_env.step({})\n",
+ "\n",
+ " print(f\"Instance {instance_count: >3} | SCIP nb nodes {int(default_info['nb_nodes']): >4d} | SCIP time {default_info['time']: >6.2f} \")\n",
+ " print(f\" | GNN nb nodes {int(nb_nodes): >4d} | GNN time {time: >6.2f} \")\n",
+ " print(f\" | Gain {100*(1-nb_nodes/default_info['nb_nodes']): >8.2f}% | Gain {100*(1-time/default_info['time']): >8.2f}%\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can also evaluate on instances larger and harder than those trained on, say with 600 rather than 500 constraints.\n",
+ "In addition, we showcase that the cumulative number of nodes and time required to solve an instance can also be computed directly using the `.cumsum()` method."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Instance 0 | SCIP nb nodes 29 | SCIP time 3.21 \n",
+ " | GNN nb nodes 113 | GNN time 2.12 \n",
+ " | Gain -289.66% | Gain 34.19%\n",
+ "Instance 1 | SCIP nb nodes 9 | SCIP time 3.32 \n",
+ " | GNN nb nodes 93 | GNN time 2.57 \n",
+ " | Gain -933.33% | Gain 22.41%\n",
+ "Instance 2 | SCIP nb nodes 17 | SCIP time 4.60 \n",
+ " | GNN nb nodes 103 | GNN time 3.13 \n",
+ " | Gain -505.88% | Gain 31.89%\n",
+ "Instance 3 | SCIP nb nodes 3 | SCIP time 2.63 \n",
+ " | GNN nb nodes 19 | GNN time 1.89 \n",
+ " | Gain -533.33% | Gain 28.39%\n",
+ "Instance 4 | SCIP nb nodes 7 | SCIP time 2.88 \n",
+ " | GNN nb nodes 52 | GNN time 2.33 \n",
+ " | Gain -642.86% | Gain 19.36%\n",
+ "Instance 5 | SCIP nb nodes 201 | SCIP time 5.39 \n",
+ " | GNN nb nodes 569 | GNN time 4.66 \n",
+ " | Gain -183.08% | Gain 13.69%\n",
+ "Instance 6 | SCIP nb nodes 912 | SCIP time 9.93 \n",
+ " | GNN nb nodes 1548 | GNN time 10.13 \n",
+ " | Gain -69.74% | Gain -2.02%\n",
+ "Instance 7 | SCIP nb nodes 7 | SCIP time 3.50 \n",
+ " | GNN nb nodes 75 | GNN time 2.54 \n",
+ " | Gain -971.43% | Gain 27.37%\n",
+ "Instance 8 | SCIP nb nodes 37 | SCIP time 3.78 \n",
+ " | GNN nb nodes 391 | GNN time 3.51 \n",
+ " | Gain -956.76% | Gain 6.94%\n",
+ "Instance 9 | SCIP nb nodes 1 | SCIP time 1.29 \n",
+ " | GNN nb nodes 1 | GNN time 1.13 \n",
+ " | Gain 0.00% | Gain 12.59%\n",
+ "Instance 10 | SCIP nb nodes 59 | SCIP time 3.98 \n",
+ " | GNN nb nodes 177 | GNN time 2.75 \n",
+ " | Gain -200.00% | Gain 30.96%\n",
+ "Instance 11 | SCIP nb nodes 1 | SCIP time 1.50 \n",
+ " | GNN nb nodes 15 | GNN time 1.49 \n",
+ " | Gain -1400.00% | Gain 0.51%\n",
+ "Instance 12 | SCIP nb nodes 3 | SCIP time 2.17 \n",
+ " | GNN nb nodes 25 | GNN time 1.72 \n",
+ " | Gain -733.33% | Gain 20.78%\n",
+ "Instance 13 | SCIP nb nodes 3 | SCIP time 2.50 \n",
+ " | GNN nb nodes 23 | GNN time 1.81 \n",
+ " | Gain -666.67% | Gain 27.66%\n",
+ "Instance 14 | SCIP nb nodes 147 | SCIP time 5.41 \n",
+ " | GNN nb nodes 349 | GNN time 4.39 \n",
+ " | Gain -137.41% | Gain 18.86%\n",
+ "Instance 15 | SCIP nb nodes 1 | SCIP time 1.33 \n",
+ " | GNN nb nodes 5 | GNN time 1.28 \n",
+ " | Gain -400.00% | Gain 3.52%\n",
+ "Instance 16 | SCIP nb nodes 122 | SCIP time 4.74 \n",
+ " | GNN nb nodes 223 | GNN time 3.16 \n",
+ " | Gain -82.79% | Gain 33.40%\n",
+ "Instance 17 | SCIP nb nodes 1 | SCIP time 2.05 \n",
+ " | GNN nb nodes 53 | GNN time 1.96 \n",
+ " | Gain -5200.00% | Gain 4.36%\n",
+ "Instance 18 | SCIP nb nodes 1 | SCIP time 1.41 \n",
+ " | GNN nb nodes 5 | GNN time 1.24 \n",
+ " | Gain -400.00% | Gain 12.35%\n",
+ "Instance 19 | SCIP nb nodes 14 | SCIP time 2.88 \n",
+ " | GNN nb nodes 93 | GNN time 1.98 \n",
+ " | Gain -564.29% | Gain 31.17%\n"
+ ]
+ }
+ ],
+ "source": [
+ "instances = ecole.instance.SetCoverGenerator(n_rows=600, n_cols=1000, density=0.05)\n",
+ "scip_parameters = {\n",
+ " \"separating/maxrounds\": 0,\n",
+ " \"presolving/maxrestarts\": 0,\n",
+ " \"limits/time\": 3600,\n",
+ "}\n",
+ "env = ecole.environment.Branching(\n",
+ " observation_function=ecole.observation.NodeBipartite(),\n",
+ " information_function={\n",
+ " \"nb_nodes\": ecole.reward.NNodes().cumsum(),\n",
+ " \"time\": ecole.reward.SolvingTime().cumsum(),\n",
+ " },\n",
+ " scip_params=scip_parameters,\n",
+ ")\n",
+ "default_env = ecole.environment.Configuring(\n",
+ " observation_function=None,\n",
+ " information_function={\n",
+ " \"nb_nodes\": ecole.reward.NNodes().cumsum(),\n",
+ " \"time\": ecole.reward.SolvingTime().cumsum(),\n",
+ " },\n",
+ " scip_params=scip_parameters,\n",
+ ")\n",
+ "\n",
+ "for instance_count, instance in zip(range(NB_EVAL_INSTANCES), instances):\n",
+ " # Run the GNN brancher\n",
+ " observation, action_set, _, done, info = env.reset(instance)\n",
+ " while not done:\n",
+ " with torch.no_grad():\n",
+ " observation = (\n",
+ " torch.from_numpy(observation.row_features.astype(np.float32)).to(DEVICE),\n",
+ " torch.from_numpy(observation.edge_features.indices.astype(np.int64)).to(DEVICE),\n",
+ " torch.from_numpy(observation.edge_features.values.astype(np.float32)).view(-1, 1).to(DEVICE),\n",
+ " torch.from_numpy(observation.variable_features.astype(np.float32)).to(DEVICE),\n",
+ " )\n",
+ " logits = policy(*observation)\n",
+ " action = action_set[logits[action_set.astype(np.int64)].argmax()]\n",
+ " observation, action_set, _, done, info = env.step(action)\n",
+ " nb_nodes = info[\"nb_nodes\"]\n",
+ " time = info[\"time\"]\n",
+ "\n",
+ " # Run SCIP's default brancher\n",
+ " default_env.reset(instance)\n",
+ " _, _, _, _, default_info = default_env.step({})\n",
+ "\n",
+ " print(\n",
+ " f\"Instance {instance_count: >3} | SCIP nb nodes {int(default_info['nb_nodes']): >4d} | SCIP time {default_info['time']: >6.2f} \"\n",
+ " )\n",
+ " print(\n",
+ " f\" | GNN nb nodes {int(nb_nodes): >4d} | GNN time {time: >6.2f} \"\n",
+ " )\n",
+ " print(\n",
+ " f\" | Gain {100*(1-nb_nodes/default_info['nb_nodes']): >8.2f}% | Gain {100*(1-time/default_info['time']): >8.2f}%\"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### References\n",
+ "\n",
+ "Gasse, M., Chételat, D., Ferroni, N., Charlin, L. and Lodi, A. (2019). Exact combinatorial optimization with graph convolutional neural networks. In Advances in Neural Information Processing Systems (pp. 15580-15592)."
+ ]
+ }
+ ],
+ "metadata": {
+ "celltoolbar": "Tags",
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/ecole/examples/configuring-bandits/conda-requirements.yaml b/ecole/examples/configuring-bandits/conda-requirements.yaml
new file mode 100644
index 0000000..3c13c14
--- /dev/null
+++ b/ecole/examples/configuring-bandits/conda-requirements.yaml
@@ -0,0 +1,13 @@
+channels:
+ - conda-forge
+
+dependencies:
+ - python
+ - ipykernel
+ - jupyter
+ - ecole
+ - numpy
+ - scikit-optimize=0.8.1
+ # Pin scikit-learn for scikit-optimize https://github.com/scikit-optimize/scikit-optimize/issues/569
+ - scikit-learn<1.0
+ - matplotlib
diff --git a/ecole/examples/configuring-bandits/example.ipynb b/ecole/examples/configuring-bandits/example.ipynb
new file mode 100644
index 0000000..144dc01
--- /dev/null
+++ b/ecole/examples/configuring-bandits/example.ipynb
@@ -0,0 +1,370 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Solver configuration as a bandit problem\n",
+ "\n",
+ "In this tutorial we are interested in minimizing the expected branch-and-bound tree size of the SCIP solver by tuning the parameter [`branching/scorefac`](https://www.scipopt.org/doc/html/PARAMETERS.php), which takes values in the range $[0,1]$. This parameter, used in combination with the sum score function (`branching/scorefunc=s`), controls the weighting of downward and upward gain predictions in the computation of branching scores. It has a default value of 0.167.\n",
+ "\n",
+ "Dependencies are given for conda in the file `conda-requirements.yaml`"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Train, optimization, and test parameters of the notebook."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "tags": [
+ "parameters"
+ ]
+ },
+ "outputs": [],
+ "source": [
+ "# Parameters set Papermill during testing, do not rename.\n",
+ "train_n_items = 100\n",
+ "train_n_bids = 100\n",
+ "train_add_item_prob = 0.7\n",
+ "optim_n_iters = 100\n",
+ "optim_n_burnins = 10\n",
+ "optim_seed = 42\n",
+ "test_n_items = 150\n",
+ "test_n_bids = 750\n",
+ "test_add_item_prob = 0.7\n",
+ "test_seed = 1337\n",
+ "test_n_evals = 5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import ecole as ec\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import skopt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "## 1. Setting up the Ecole environment\n",
+ "\n",
+ "We formulate this parameter tuning task as a continuum-armed bandit problem, which we instantiate using a [`Configuring`](https://doc.ecole.ai/py/en/stable/reference/environments.html#configuring) environment. We request no observation (non-contextual bandit), and use the negative number of nodes as a reward (tree size minimization)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "env = ec.environment.Configuring(\n",
+ " # set up a few SCIP parameters\n",
+ " scip_params={\n",
+ " \"branching/scorefunc\": \"s\", # sum score function\n",
+ " \"branching/vanillafullstrong/priority\": 666666, # use vanillafullstrong (highest priority)\n",
+ " \"presolving/maxrounds\": 0, # deactivate presolving\n",
+ " },\n",
+ " # pure bandit, no observation\n",
+ " observation_function=None,\n",
+ " # minimize the total number of nodes\n",
+ " reward_function=-ec.reward.NNodes(),\n",
+ " # collect additional metrics for information purposes\n",
+ " information_function={\n",
+ " \"nnodes\": ec.reward.NNodes().cumsum(),\n",
+ " \"lpiters\": ec.reward.LpIterations().cumsum(),\n",
+ " \"time\": ec.reward.SolvingTime().cumsum(),\n",
+ " },\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We set up SCIP to use the sum score function for branching (`branching/scorefunc=s`), and the *vanillafullstrong* branching rule to mitigate the impact of branching heuristics (`branching/vanillafullstrong/priority=666666`). For the purpose of the tutorial we also deactivate presolving (`presolving/maxrounds=0`) in order to reduce computational time."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. Setting up the training distribution\n",
+ "\n",
+ "For the purpose of this tutorial we will consider randomly generated Combinatorial Auction problems, as the problem distribution for which we want to configure the solver. We hence set up a `CombinatorialAuctionGenerator` that will generate such instances on the fly."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# infinite instance generator, new instances will be generated on-the-fly\n",
+ "instances = ec.instance.CombinatorialAuctionGenerator(\n",
+ " n_items=train_n_items, n_bids=train_n_bids, add_item_prob=train_add_item_prob\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "For training we consider small-sized instances ($100\\times 100$), which are solved within seconds by SCIP but are difficult enough to produce tens of branch-and-bound nodes."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Solving the control problem\n",
+ "\n",
+ "We can now readily solve the optimization problem using an off-the-shelf optimization library, such as `scikit-optimize`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "iteration 10 / 100\n",
+ "iteration 20 / 100\n",
+ "iteration 30 / 100\n",
+ "iteration 40 / 100\n",
+ "iteration 50 / 100\n",
+ "iteration 60 / 100\n",
+ "iteration 70 / 100\n",
+ "iteration 80 / 100\n",
+ "iteration 90 / 100\n",
+ "iteration 100 / 100\n"
+ ]
+ }
+ ],
+ "source": [
+ "env.seed(optim_seed) # environment (SCIP)\n",
+ "instances.seed(optim_seed) # instance generator\n",
+ "rng = np.random.RandomState(optim_seed) # optimizer\n",
+ "\n",
+ "# set up the optimizer\n",
+ "opt = skopt.Optimizer(\n",
+ " dimensions=[(0.0, 1.0)],\n",
+ " base_estimator=\"GP\",\n",
+ " n_initial_points=optim_n_burnins,\n",
+ " random_state=rng,\n",
+ " acq_func=\"PI\",\n",
+ " acq_optimizer=\"sampling\",\n",
+ " acq_optimizer_kwargs={\"n_points\": 10},\n",
+ ")\n",
+ "\n",
+ "assert optim_n_iters > optim_n_burnins\n",
+ "\n",
+ "# run the optimization\n",
+ "for i in range(optim_n_iters):\n",
+ "\n",
+ " if (i + 1) % 10 == 0:\n",
+ " print(f\"iteration {i+1} / {optim_n_iters}\")\n",
+ "\n",
+ " # pick up a new random instance\n",
+ " instance = next(instances)\n",
+ "\n",
+ " # start a new episode\n",
+ " env.reset(instance)\n",
+ "\n",
+ " # get the next action from the optimizer\n",
+ " x = opt.ask()\n",
+ " action = {\"branching/scorefac\": x[0]}\n",
+ "\n",
+ " # apply the action and collect the reward\n",
+ " _, _, reward, _, _ = env.step(action)\n",
+ "\n",
+ " # update the optimizer\n",
+ " opt.tell(x, -reward) # minimize the negated reward (eq. maximize the reward)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "We can now visualize the result of the optimization process."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaMAAAEaCAYAAAC8UDhJAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAABlmklEQVR4nO2dd5hU1fn4P+/O9gJLk14WpMPSEQWVoqIoYgHFGiwxxjTjL1GTfK1pmKgxahJDLKjRYBfBrsEeFBCkg3SQXhbYvrPz/v44d5bZZXbn7u7Mzszu+TzPPDP33nPOfc/Mnfve95z3vK+oKhaLxWKxRJOEaAtgsVgsFotVRhaLxWKJOlYZWSwWiyXqWGVksVgslqhjlZHFYrFYoo5VRhaLxWKJOlYZNTFEpJuIqIgkuig7Q0Q+q+N5tojIGXWpW017+SLSPVztBbRbnz6micg8ETksIi+FW7aGRkRWicjYKJ6/i/M7e6IlgyV6WGUUwzg39FIRaV1l/zJHoXSLkmj1QkTGisiO2tRR1UxV3RQpmerIVKAt0EpVp4nIbBH5XThPICLjRGSBo/C2BDnezTleKCJrqz4AiMjlIrJVRApE5HURaVnduVS1v6p+5NS7W0T+Hc6+BJG90gOLqm5zfufySJ7XEptYZRT7bAYu82+IyEAgLXriWALoCqxXVW84GqvGWi0AngR+WU21/wBLgVbAb4CXRaSN015/4J/AVRilWQj8PRyyhsKN5W2xVEJV7StGX8AW4P+ARQH77sfcdBTo5uxrDjwD7AO2OnUSnGMep85+YBPwI6duYkDdJ4BdwHfA7wCPc2wG8FkN8p0PrALygI+AvlVk/xWwGjgEPAWkAhlAEeAD8p1XB2Ak8D+nrV3Ao0ByQHsKnOh8ng38DXgTOAp8CfQIKNsHeB84CKwDLgk41gp4AzgCfAX8NkQfXwJ2A4eBT4D+zv57gFKgzOnDD5zPpc72PKdcB+AV57fZDPw0oO27gZeBfzvyXF+DHGcAW6rs6wWUAFkB+z4FbnQ+/wF4PuBYD0e+rGrOscU5z9lV+vaNy2vlc+Avzvf+O+d8/wUOYK6/54Bsp/yzzjVQ5JzjVqAbla/NDs5vdRDYAHy/ynf3Iua6P4q5DocHHL/NkfGocw1MiPb/2b5qfkVdAPuq4cc5dnNYB/TFKJbtmCfyQGX0DDAXyHL+0OuB65xjNwJrgc5AS2BBlT/865in5wzgBMwN+gfOsRlUc6N2boQFwJlAknMz2YCjQBzZVwac93Pgd86xscCOKu0NA0YBiU4f1gA3BxyvqowOYhRYonOTm+Mcy3C+o2ucY0OdG6FficxxbmIZwADnhlWTMrrW+V5TgIeAZQHH7gb+HbA9299HZzsBWALcCSQD3TEPBBMD6pcBFzhl02qQI5gyuhBYU2Xfo8Ajzue5wG1VjucDw2q63oL1zeW14gV+4nzvacCJzvWRArTBKPOHgp3P2e5G5WvzY4wllwoMxij0CQHyFQOTMP+LPwILnWO9nWugQ0C7PYL12b5i52WH6eKDZ4GrMX/stZgbKADOZO+lwK9U9aiqbgEewAzNAFyCuQFsV9WDmD+tv25b4BzMTb9AVfdinmynu5DpUuBNVX1fVcsw1lcacEpAmUcDzvt7AoYbq6KqS1R1oap6nT78Ezi9hvO/qqpfqRkiew5zswI4D3PTfspp62uMZTLV+a4uBu50+rsSeLqmTqrqk873WoK5AQ4SkeY11QlgBNBGVe9V1VI1c17/ovL3+z9VfV1Vfapa5LJdP5kYiy2Qwxjl6ea4a1xeKztV9RHney9S1Q3O9VGiqvuAB6n5Nw08X2dgDEaZFqvqMuBxjl3XYB4i3lIzx/QsMMjZX45RgP1EJElVt6jqxtr2OZKIyJMisldEVoahrXHOPLL/VSwiF4RBzAbFjuvGB89inipzMFZQIK0xT91bA/ZtBTo6nztgnhIDj/npirFqdomIf19ClfLV0SGwLVX1icj2gPMS5LwdqmtMRHphblbDgXTMtbmkhvPvDvhciLnxgunTSSKSF3A8EfMdtnE+V/d9VJXJg1Gi05y6PudQa46/yQejK9ChiiwezFCaHzffdXXkA82q7GuGGZpyc7w2uLlWKvVFRE4AHgZOxSjABMyQrRs6AAdVNVDWrZjrw0/VayBVRBJVdYOI3Ix5eOgvIu8Ct6jqTpfnbghmY6zYqv/nWqOqC3AexhwHlQ3Ae/Vtt6GxllEcoKpbMfMNk4BXqxzejxnq6RqwrwvHrKddmKGywGN+tmPmHFqrarbzaqaq/V2ItTPwnGLuUJ0DzkuQ8/pvBsFCxf8DY/X1VNVmwK8BCVIuFNuBjwP6k63GQ+uHmGEebxC5quNyYApmiKw5ZriHGuSq2q/twOYqsmSp6qQa6tSGVUB3EQm0dAY5+/3H/dYCjmt8CmYYNxTB+hLqWqla54/OvlznN72Syt9dTX3fCbSs0rfA67pm4VWfV9UxHBvSvs9NvYZCVT/BDDVXICI9ROQdEVkiIp+KSJ86ND0VeFtVC8MiaANilVH8cB0wXlULAnc6QxQvAr8XkSwR6QrcgpkUxzn2UxHpJCItgNsD6u7CPEE9ICLNRCTB+UO4GUp5EThXRCaISBLw/zA3qy8CyvzIOW9LjHJ5wdm/B2hVZbgrCzOJn+/8CX/oQoZgzAd6ichVIpLkvEaISF/nu3oVuFtE0kWkH/C9GtrKcvp0AGOt/SHEufdg5oX8fAUcEZHbnDVJHhEZICIj3HbG+U1SMVaJiEiqiCQDqOp6YBlwl7P/QiAXMywJZvhysoicKiIZwL2Y4U03ltEeoJuIJDjnqsu1koWxzvJEpCPHewRW/b4qUNXtmGvpj07fcjH/gedCCS4ivUVkvIikYOaVijBDd7HOLOAnqjoM+AV183ycjvGwjDusMooTVHWjqi6u5vBPMM4Em4DPgOcx7sBg5ijeBb4BvuZ4y+pqzDCf3+vtZaC9C3nWYZ50H8FYZ5OByapaGlDsecwNbJPz+p1Tdy3mD7NJRPJEpAPmz3c5ZgjpXxxTXLXCudGehflT7sQM5dyHsQgAfowZ0tuNGSp5qobmnsEMDX2H+X4Whjj9E5h5ijwRed1RfpMxQyibMd/T4xgryy2nYW6mb2EsgyIqD8FMxwxdHQJmAlOd+RlUdRXGgeU5YC9GOdzk8rz+RbwHRORr53Ntr5V7MA4khzGej1WvvT8C/+d8X78IUv8yjDW6E3gNuEtV33chewrmu9iP+Z1PwDwMxSwikomZb31JRJZh5kzbO8cuEpGVQV7vVmmjPTAQ83+PO0TVJtezWCyWhkbMovX5qjpARJoB61Q15INgDe39DOM1ekO4ZGxIrGVksVgsUUZVjwCbRWQamPFYERkUolpVLiNOh+jAKiOLxWJpcETkP5hF3r1FZIeIXAdcAVwnIt9gnE+m1KK9bhjHnI8jIG6DYIfpLBaLpYkiIk9i1ubtVdUBNZQbgZkzvVRVX46ELNYyslgslqbLbEz4p2px1tvdR4QdI+Ji0WtCQoKmpdnYoBaLxVIbCgsLVVWrNTpU9RMJHf3/J5jlAq6XJNSFuFBGaWlpFBQUhC5osVgslgpEpLYhpqrW74iJgTgeq4wsFovFUkcSRSRwfeIsVZ1Vi/oPYeIDlgeEgYoIVhlZLBZL48WrqsNDF6uW4cAcRxG1BiaJiFdVXw+HcIFYZWSxWCyWoKhqjv+ziMzGLNJ9PRLniqgyEpGfA9djAhWuwOSYSceEeumGyWdyiaq6jeRraWDKysrYsWMHxcXF0RbFEmekpqbSqVMnkpKSoi2KpRqc9U5jgdYisgO4CxMHEVV9rEFlidQ6I2fi6zOgn6oWiciLmPha/TCh4WeKyO1AC1W9raa2MjIy1DowRIfNmzeTlZVFq1atiPSYsaXxoKocOHCAo0ePkpOTE7qCJSKISKGqZkRbDjdEep1RIpAmIokYi2gnZlWxP6HZ05gsl5YYpbi42CoiS60REVq1amUtaotrIjZMp6rficj9wDacSMOq+p6ItHXC0aOqu5wEXMchIjcANwAkJydHSkyLC+JCEZWUwLffQnExpKZCz56QkhK6niVixMV1Y4kZImYZOblzpmCyk3YAMkTkSrf1VXWWqg5X1eGJiXXTmfPnw8yZdapqiTf8igjM+7ffRlcei8VSKyI5THcGJsvlPlUtw+QyOQXY4+Td8Off2BspAd55Bx54IFKtWxqK3bt3M336dHr06EG/fv2YNGkS69dXSVYaMBy0ZedOBkwxMSY/+ugjzjvvvDqd96GHHqKwsHYJM92eb+zYsSxeXF16qrqf32KJVyKpjLYBo5yMmgJMANYAb3Asu+b3gLmREiAhAXy+SLVuaQhUlQsvvJCxY8eyceNGVq9ezR/+8Af27NlTuWBqauXthPpf2tFWBtE+v8XSkERMGanql5hMkF9j3LoTMGl1ZwJnisi3wJnOdkSwyigKbNoE/ftDYqJ537SpXs0tWLCApKQkbrzxxop9gwcP5tRTT0VV+eUvf8mAAQMYOH06L3z0kSmQkgJB5hkLCgq49tprGTFiBEOGDGHuXPMcVF5ezi9+8QsGDhxIbm4ujzzyCA8//DA7d+5k3LhxjBs3DoD33nuPk08+maFDhzJt2jTy8/MBeOedd+jTpw9jxozh1VerJjM1FBUVMX36dHJzc7n00kspKjoWpeWHP/whw4cPp3///tx1110AQc8frJzF0mhQ1Zh/paena134+c9VmzWrU1WLw+rVq2tXoV8/1YQEVTDv/frV6/x//etf9eabbw567OWXX9YzzjhDvV6v7t69Wzt37qw7d+7UzZs3a//+/VVVdcGCBXruueeqquqvfvUrffbZZ1VV9dChQ9qzZ0/Nz8/Xv//973rRRRdpWVmZqqoeOHBAVVW7du2q+/btU1XVffv26amnnqr5+fmqqjpz5ky95557tKioSDt16qTr169Xn8+n06ZNqzhfIA888IBec801qqr6zTffqMfj0UWLFlU6n9fr1dNPP12/+eab485fU7lYptbXjyWsAAUaA/dwN69GnULCWkZRYN26Y1+6z2e2I8Rnn33GZZddhsfjoW3btpx++uksWrSo2vLvvfceM2fOZPDgwYwdO5bi4mK2bdvGBx98wI033ojfUaZly5bH1V24cCGrV69m9OjRDB48mKeffpqtW7eydu1acnJy6NmzJyLClVcG99H55JNPKo7l5uaSm5tbcezFF19k6NChDBkyhFWrVrF69eqgbbgtZ7HEI406HJBVRlGgd29Yu9Z88QkJZrse9O/fn5dfDp7LS2u5YFtVeeWVV+hdRSZVDemGrKqceeaZ/Oc/lbM6L1u2zLULc7Bymzdv5v7772fRokW0aNGCGTNmBF2b47acxRKvWMvIEl7mzYM+fcDjMe/z5tWrufHjx1NSUsK//vWvin2LFi3i448/5rTTTuOFF16gvLycffv28cknnzBy5Mhq25o4cSKPPPJIhRJbunQpAGeddRaPPfYYXq8XgIMHDwKQlZXF0aNHARg1ahSff/45GzZsAKCwsJD169fTp08fNm/ezMaNGwGOU1Z+TjvtNJ577jkAVq5cyfLlywE4cuQIGRkZNG/enD179vD2229X1Ak8f03lLHFEqDnVMM+5xhNWGVnCS/fusGoVeL3mvXv3ejUnIrz22mu8//779OjRg/79+3P33XfToUMHLrzwQnJzcxk0aBDjx4/nT3/6E+3atau2rTvuuIOysjJyc3MZMGAAd9xxBwDXX389Xbp0qWjr+eefB+CGG27gnHPOYdy4cbRp04bZs2dz2WWXkZuby6hRo1i7di2pqanMmjWLc889lzFjxtC1a9eg5/7hD39Ifn4+ubm5/OlPf6pQmoMGDWLIkCH079+fa6+9ltGjR1fUCTx/TeUsccTkyWbkoLzcvE+eXLvjjZiIxaYLJ3WNTfd//wf33QdlZREQqomwZs0a+vbtG20xLHGKvX6qkJhoFI0fj8c8uLk9XktsbLoYwVpGFoslpujd+9gauGBzqqGON2KsMrJYLJaGItScapjnXOOJRu9NB8ccuywWiyWq+OdU63q8EdOob9GByshisVgssYtVRhaLxWKJOlYZWSwWiyXqNGpl5PGYd6uM4pexY8fy7rvvVtr30EMPcdNNN9VYJ1R6Brf41xX95S9/CUt7YNJMfPHFFxXbjz32GM8880zY2rdY4pEm48BgiU8uu+wy5syZw8SJEyv2zZkzhz//+c8RP/fu3bv54osv2Lp1a1jb/eijj8jMzOSUU04BqBSR3GJpqjRqy8gqo/hn6tSpzJ8/n5KSEgC2bNnCzp07GTNmjKuUCpmZmRWfX375ZWbMmAHAvn37uPjiixkxYgQjRozg888/P67uWWedxd69exk8eDCffvppJYtr//79dOvWDYDZs2dz0UUXcfbZZ9OzZ09uvfXWijbeeecdhg4dyqBBg5gwYQJbtmzhscce4y9/+UtFu3fffTf3338/YGLdjRo1itzcXC688EIOHToEGGvvtttuY+TIkfTq1YtPP/20fl+sxRJjWMvIUivGzh573L5L+l/CTSNuorCskEnPTTru+IzBM5gxeAb7C/cz9cWplY59NOOjGs/XqlUrRo4cyTvvvMOUKVOYM2cOl156KSLC73//e1q2bEl5eTkTJkxg+fLllaJh18TPfvYzfv7znzNmzBi2bdvGxIkTWbNmTaUyb7zxBueddx7Lli0L2d6yZctYunQpKSkp9O7dm5/85Cekpqby/e9/n08++YScnBwOHjxIy5YtufHGG8nMzOQXv/gFAB9++GFFO1dffTWPPPIIp59+OnfeeSf33HMPDz30EABer5evvvqKt956i3vuuYcPPvjAVV8tluoQkSeB84C9qjogyPErgNuczXzgh6r6TSRkscrIEvP4h+r8yujJJ58ETEqFWbNm4fV62bVrF6tXr3atjD744INKKRiOHDnC0aNHycrKqpOMEyZMoHnz5gD069ePrVu3cujQIU477TRycnKA4KkpAjl8+DB5eXmcfvrpAHzve99j2rRpFccvuugiAIYNG8aWLVvqJKfFUoXZwKNAdZOWm4HTVfWQiJyDSZB6UiQEiZgyEpHewAsBu7oDd2I6/QLQDdgCXKKqhyIhg1VG4acmSyY9Kb3G463TW4e0hIJxwQUXcMstt/D1119TVFTE0KFDXadUCEzbEHjc5/Pxv//9j7S0NNdyJCYm4nMupqrnSklJqfjs8Xjwer2uUlPUBv85/O1bLPVFVT8RkW41HP8iYHMh0ClSskQy7fg6VR2sqoOBYUAh8BpwO/ChqvYEPnS2I4JVRo2DzMxMxo4dy7XXXstll10GuE+p0LZtW9asWYPP5+O1116r2H/WWWfx6KOPVmy7GYrr1q0bS5YsAag2x1IgJ598Mh9//DGbN28GgqemCKR58+a0aNGiYj7o2WefrbCSLJY6kigiiwNeN9SjreuAiOUuaahhugnARlXdKiJTgLHO/qeBjzg2JhlWrDJqPFx22WVcdNFFzJkzB6iceqF79+7VplSYOXMm5513Hp07d2bAgAHk5+cD8PDDD/OjH/2I3NxcvF4vp512Go899liNMvziF7/gkksu4dlnn2X8+PEhZW7Tpg2zZs3ioosuwufzccIJJ/D+++8zefJkpk6dyty5c3nkkUcq1Xn66ae58cYbKSwspHv37jz11FNuvh6LpTq8qjq8vo2IyDiMMhpTf5GqOUdDpJBwJsm+VtVHRSRPVbMDjh1S1RZB6twA3ACQnJw8zO9NVRv+9S+44QbYsQM6dqy7/E0ZmwLAUh/s9RNd3KSQcIbp5gdzYHCO52JGtc5R1fXhl9JQo2UkIp2A6cCpQAegCFgJvAm8raohbQ4RSQbOB35VG8FUdRZmsoyMjIw6aUxrGVksFkvdEZEuwKvAVZFURFCDMhKRp4COwHzgPmAvkAr0As4GfiMit6vqJyHOcQ7GKtrjbO8RkfaquktE2jvtRgSrjCwWi6V6ROQ/mGmT1iKyA7gLSAJQ1ccwTmetgL87zjhhGfYLRk2W0QOqujLI/pXAq47F08XFOS4D/hOw/QbwPWCm8z7Xpay1xioji8ViqR5VvSzE8euB6xtClmq96apRRIHHS1V1Q01lRCQdOBNj5vmZCZwpIt86x2a6F7d2WGVksVgs8UFNw3QrgGrnalQ15OpCVS3EmHiB+w5gvOsijlVGFovFEh/UNEx3nvP+I+f9Wef9CsyaoZjHRu22WCyW+KCmYbqtqroVGK2qt6rqCud1OzCxunqxhLWMGgcej4fBgwczYMAApk2bRmGh+2eh2bNn8+Mf/zgscqxdu5bBgwczZMgQNm7cWOnYpEmTyMvLC8t54oktW7bw/PPPR1sMSyPATQSGDBGpWOgkIqcANfqtxwpWGTUO0tLSWLZsGStXriQ5Ofm4xanl5eUNIsfrr7/OlClTWLp0KT169Kh07K233iI7Ozus52uoftUHq4ws4cKNMroW+JuIbBGRzcDfnX0xj1VGjY9TTz2VDRs28NFHHzFu3Dguv/xyBg4cSHFxMddccw0DBw5kyJAhLFiwoKLO9u3bOfvss+nduzf33HNPyHMES+Pw1ltv8dBDD/H4448zbty44+p069aN/fv3s2XLFvr06cP111/PgAEDuOKKK/jggw8YPXo0PXv25KuvvgLg7rvv5qqrrmL8+PH07NmTf/3rXwCu+3XSSSexatWqivOPHTuWJUuWUFBQwLXXXsuIESMYMmQIc+caZ9XZs2dzwQUXMHnyZHJycnj00Ud58MEHGTJkCKNGjaoIVbRx40bOPvtshg0bxqmnnsratWsBmDFjBj/96U855ZRT6N69e0U4pNtvv51PP/2UwYMHhzUBoaXpEWrRqwcTsXWQiDTDRGw43DCi1R+rjMLLzTeDixButWLwYHAyJITE6/Xy9ttvc/bZZwPw1VdfsXLlSnJycnjggQcAWLFiBWvXruWss85i/fr1lcqlp6czYsQIzj33XIYPr36pRHVpHKqmfqiODRs28NJLLzFr1ixGjBjB888/z2effcYbb7zBH/7wB15//XUAli9fzsKFCykoKGDIkCGce+65rvs1ffp0XnzxRe655x527drFzp07GTZsGL/+9a8ZP348Tz75JHl5eYwcOZIzzjgDgJUrV7J06VKKi4s58cQTue+++1i6dCk///nPeeaZZ7j55pu54YYbeOyxx+jZsydffvklN910E//9738B2LVrF5999hlr167l/PPPZ+rUqcycOZP777+f+fPnu/sRLZZqqNEyUtVyYIrz+Ug8KSKwyqixUFRUxODBgxk+fDhdunThuuuuA2DkyJEV6Rk+++wzrrrqKgD69OlD165dK5TRmWeeSatWrUhLS+Oiiy7is88+q/ZcwdI4fPJJqHXdlcnJyWHgwIEkJCTQv39/JkyYgIgwcODASqkfpkyZQlpaGq1bt2bcuHEVVpObfl1yySW89NJLgEml4U818d577zFz5kwGDx7M2LFjKS4uZtu2bQCMGzeOrKws2rRpQ/PmzZk8eTJAhVz5+fl88cUXTJs2jcGDB/ODH/yAXbt2Vch7wQUXkJCQQL9+/dizZw8WSzhxEyj1cxF5FJP2ocC/U1W/jphUYcKvjOJg6D0ucGvBhBv/nFFVMjKOTV3WFGOxahqHcKZ1CEZgOomEhISK7YSEhEqpH6qTy02/OnbsSKtWrVi+fDkvvPAC//znPyvKv/LKK/Tu3btS+S+//DKkXD6fj+zs7GojmAfWb4iYlpamhZs5o1OA/sC9wAPO6/5IChUurGXUdDjttNN47rnnAFi/fj3btm2ruCG///77HDx4kKKiIl5//fVqI3xDw6ZxmDt3LsXFxRw4cICPPvqIESNGHFempn5Nnz6dP/3pTxw+fJiBAwcCMHHiRB555JEKZbF06VLX8jRr1oycnJwKi0tV+eabmpN6VpcOw2KpLSGVkaqOC/IKHT8/BrDKqOlw0003UV5ezsCBA7n00kuZPXt2xZP8mDFjuOqqqxg8eDAXX3xxxXzRpEmT2Llz53FtPf300/zyl78kNzeXZcuWceedd0ZE5pEjR3LuuecyatQo7rjjDjp06FCrfk2dOpU5c+ZwySWXVJS/4447KCsrIzc3lwEDBnDHHXfUSqbnnnuOJ554gkGDBtG/f/8KB4jqyM3NJTExkUGDBlkHBku9cJVCQkTOxVhHqf59qnpvBOWqREZGhhYUFIQuWIX33oOJE+Hzz+GUUyIgWBPApgCIDHfffbcrZ4h4x14/0cVNColYIaRlJCKPAZcCPwEEmAZ0jbBcYcFaRhaLxRIfuHFgOEVVc0VkuareIyIPUDnwacxilZElVrn77rujLYLFElO4UUZFznuhiHQADgA5kRMpfFhlFB5UNeIeaA1OSQl8+y0UF0NqKvTsCQHeYpb6Yz3uLLXBjTfdfBHJBv4MfA1soXJ+opjFKqP6k5qayoEDBxrfjcWviMC8f/ttdOVpZKgqBw4cIDU1NXRhiwUXlpGq/tb5+IqIzAdS42Xxq1VG9adTp07s2LGDffv2RVuU8LJjx/H7/GHeLWEhNTWVTp06RVsMS5wQUhmJyKfAJ8CnwOfxoojAppAIB0lJSRXRABoVU6fC2rXm4khIgD59ICDWm8ViaVjcDNN9D1gHXAx8ISKLRcTVggIRyRaRl0VkrYisEZGTRaSliLwvIt867y3q04GasJaRpVrmzTMKyOMx7/PmRVsii6VJ42aYbpOIFAGlzmsc4HbhwF+Bd1R1qogkA+nAr4EPVXWmiNwO3A7cVifpQ2CVkaVaune3lpDFEkO4WWe0EXgdaAs8AQxQ1bNd1GsGnObUQVVLVTUPE3j1aafY08AFdZDbFTY2ncVisVSPiDwpIntFZGU1x0VEHhaRDSKyXESGRkoWN8N0DwPbgMuAnwLfE5EeNVcBoDuwD3hKRJaKyOMikgG0VdVdAM77CcEqi8gNzpDg4sDgkrXBWkYWi8VSI7OBmoyLc4CezusG4B+REsRNbLq/quo04AxgCXA3sN5F24nAUOAfqjoEE/H7dreCqeosVR2uqsMTE90shzoeq4wsFoulelT1E+BgDUWmAM+oYSGQLSLtIyGLm2G6B0TkS+BLYBBwJ0ZLhmIHsENVv3S2X8Yopz3+zjjve+siuBusMrJYLJZ60RHYHrC9w9kXdtwM0y0EzlfV/qp6vao+raqbQlVS1d3AdhHxJ1aZAKwG3sB46OG81xwWuB5YZWSxWGKWTZugf39ITDTvm0LeVutCon+6w3ndUMv6wUKvRGQFvJvxr1eAy0UkR1V/KyJdgHaq+pWLuj8BnnM86TYB12AU4Isich1mLmpaHWUPiVVGFoslZpk8+dhat7VrzXb4PTy9qjq8HvV3AJ0DtjsBx+ddCQNulNHfAB8wHvgtcBSjoI7PBFYFVV0GBPsiJrgXse5YZWSxWGKWdeuO3Zx8PrMde7wB/FhE5gAnAYf9Dmjhxo0yOklVh4rIUgBVPeRYOjGPVUYWiyVm6d27chSQKqniGwIR+Q8wFmgtIjuAu4AkAFV9DHgLmARsAAoxo1sRwY0yKhMRD844oYi0wVhKMY9VRhaLJWaZN88Mza1bZxRRFKKAqOplIY4r8KOGkMWNMnoYeA04QUR+D0wF/i+iUoUJq4wsFkvMYqOAVKJGbzoRSQA2A7cCfwR2AReo6ksNIFu9scrIJQ3j1WOx1I2meH02wT5LqDw1IvI/VT25geQJSkZGhhYUFNS63o4d0LkzPP44XHddBARrLPTvbyNYW2KXpnh9hqnPIlKoqhkRkDDsuFln9J6IXCxxmOrTWkYuiQ+vHktTpSlen02wz26U0S3AS0CJiBwRkaMiciTCcoUFGyjVJb17H/uyouTVY7FUS1O8PuO0zyKSLiJ3iMi/nO2eInKem7puYtNlqWqCqiarajNnu1l9hW4IrGXkEpvbxxLLNMXrM377/BRQAvindnYAv3NTsW4RSOOEuFFGmzYd7+LZvXvDnd969VhimaZ4fcZvn3uo6qUichmAqha5neJxM0wXt8SNMvKHBSkvPxYWxGKxWOKPUhFJ49i61B4YSykk1jKKBZrgZKXFYqmZEq+5h6ckpkRZklpxF/AO0FlEngNGAzPcVLTKKBaIgbAgFoslOpSVl1HsLabIW0RRWVHFu099tMtsR8dmEcnYEBFU9X0R+RoYhYn4/TNV3e+mbp2UkYjMV1VXHhLRJG6UUQyEBbFYLJHFr3QCFU+xtxivr26ZrGMRETnN+XjUee8nIv4kfjVSV8vo+3Ws16DEjTKK38lKi8USgKpSWl5aoXQClU+5r0msMfllwOdUYCQmQ/j4UBXrpIwiFUI83MSNMrJYLHGF1+elxFtSSeGUlJvtUFFtGjOqWsn7SkQ6A39yU9dN2vEVIrK8yutTEfmLiLSqo8wNglVGUSJccbWaYHwuS+zgUx9FZUUcKjrE7vzdbMnbwsbF71Pcuwee5BQSBuay85vP2J2/m7ziPIrKipq0IqqGHcAANwXdWEZvA+XA8872dMzE1GFgNhCzfshWGUWJcGWwbJhMmJYmjE99lHhLKCkvqbB0/BZOWXnZceX7XXEjKRs2Iz4ldcNmTpxxC6sXvBgFyWMTEXmEY2nJE4DBwDdu6rpRRqNVdXTA9goR+VxVR4vIlSEE24KZyCrHSX8rIi2BF4BuwBbgElU95EbY2mKVUZQIl6u6dXm3hAFVDapsSrwllJaX1qqt1I1bEJ+514pPSd24JQISxzWLAz57gf+o6uduKrpRRpkicpKqfgkgIiOBzICThWJcFde+24EPVXWmiNzubN/mRtja4l/3a2PTNTDhclW3Lu/hJ9rRPiJIoLeaX+EUe4spLS8N2/BZcY9upDqWkSYIxT261bvN5K07OHHGLaRu3EJxj25smP0gpV071V/YKKCqT9e1rhtldD3wpIj4FdBR4HoRycDkOKotUzBpbgGeBj4igsooIcFaRg1OuFzVrct7+InzoU9VPc5Tza98GsJbbcPsB49THPXlxBm3VCi4eB36E5EVHBueq3QIkzA2N2Qbbp8YRKS5Uz6vFgJuBg45Qv5TVWeJSJ6qZgeUOaSqLYLUvQG4ASA5OXlYSYmriBLHkZQEt94Kv/99napbLI2LxMTKQwUeD3hjb52LqlLkNetw/Otx/EqnsTkJDO0yEik/9sSsngS+3vYVQL0XvTZUPiMR6VrTcVXdGqqNkJaRiLQF/gB0UNVzRKQfcLKqPuFCxtGqulNETgDeF5G1LuoAoKqzgFlgkuu5rVeVhAQ7TGexVBCDQ59+hROofBqj0qmOSAz9NTRulE0o3ARKnQ28C3RwttcDN7tpXFV3Ou97gdcwC6D2iEh7AOd9b60kriUej1VGFksFUUxNUO4rJ780n30F+9iat5W1+9eydNdSVu1dxaZDm9h1dBeHig41ubU6G2Y/SPGJOagngeITc8Iy9BctRGSUiCwSkXwRKRWRcrf579zMGbVW1RdF5FcAquoVkZC3d2dOKUFVjzqfzwLuBd4AvgfMdN7nuhG0rlhlFOM04gn1mKSBon14fV4KyworvfyBP5sCtXFKKO3aKWpzRCJyNvBXwAM8rqozqxxvDvwb6ILRF/er6lM1NPkoZvnPS8Bw4GrgRDeyuFFGBc7iVn9I8FGYNUahaAu85qSySASeV9V3RGQR8KKIXAdsA6a5EbSuWGUU48T5hLrFWDyFZYUUlBWY99KCWrtMNzbiwSlBRDzA34AzMYtTF4nIG6q6OqDYj4DVqjpZRNoA60TkOVWt9gdW1Q0i4lHVcuApEfnCjTxulNEtGGumh4h8DrQBpoaqpKqbgEFB9h8AJrgRLhxUna+1xBh2LVFc4XcsKCgtoKCsgILSAoq9xdEWK+aIk/VII4ENzr0aEZmD8XYOVEYKZDkJ8jKBg9S8pKdQRJKBZSLyJ2AX4MqBIqQyUtWvReR0oDfGTW+dqh6/NDlGiVFnIYufGJxQtxzD6/NSUFpAfml+hfLxqV0rEYoYckpIFJHAhaizHOcwgI7A9oBjO4CTqtR/FGOM7ASygEtVa7wArsL4IvwY+DnQGbjYlaBuCmE0aDen/FAnJPgzLutGFTtMF+PYtUQxRYm3hPzS/IqXtXrqRiTWI9URr6oOr+ZYsHTgVT1HJgLLMFG3e2C8oj9V1eqcEoYCbznH76mNoG5cu591hFiGCevjF9gqI0v9sekzokqxt5ijJUcrlE9Tn+sJF9F0SqgFOzCWi59OGAsokGuAmWrcGzc4a0f7AF9V0+b5wEMi8gkwB3hXVV2NTbmxjIYD/TROfS0TE+0wncXix698jpYaBRQsGKglvJT7yjlScoTDJYcrlH6zlGb0a9MPgMe/frzSsYKyAk7qeBLfG/S9SIu2COgpIjnAdxgvuMurlNmGmeP/1Flz2huoNny+ql4jIknAOU5bfxeR91X1+lDCuFFGK4F2mImouMNaRpamTGl5aYXyOVJyxCqfMOD3HsxKyQJgweYF7Di6g7ziPPKK8zhUfIguzbpw86ibAZjywhR25++u1MbYrmO5/6z7AXhh1QuUlpeSmZxJRnIGmUmZDbLOylmm82PMOlIP8KSqrhKRG53jjwG/BWY74X4EuC1UGnFVLRORtzEjaGkYp4iwKKPWwGoR+QqoWCigque7qBt1rDKyNCXKfeUcLT3K0RKjfOycT+04WnKU3fm7OVJ6hGHthwHw7PJn+XrX1+wv3M/+wv0cLDpIjxY9eP5ik1Xn6eVPs3LvSpISkshOzSY7NZsT0k+oaHPGoBmUaznNU5qTmZxJZnImrdNbVxx/98p3SRA38QfCj6q+BbxVZd9jAZ93YtaIusJZtzQdGIeJO/o4cImbum6U0d1uBYlFrGu3pbFTWFbI4eLDHCk5QkFZQZOKXlAbVJUDRQfYnb+bfQX7GJczDoDZy2bzzsZ32HV0FwVlBQA0T2nOh1d/CMC2w9vYk7+H1umt6dmyJ63TW9O5+bGplgfOfIDUxFTSk9IROd4nYGq/mlfCREsRRYgZmLmiH6hqrVY5V6uMRETU8HGoMrU5YUNjXbstjQ2vz2vmIBwF5PXZC9xPua+cPQV72H5kO0PbDSXJk8Rra1/j+RXP893R7yo5aHw641PSktJI9iTTPrM9Q9sNpX1We9pntqddZruKcr859Tc1nrNVekwnvG5QVHV6XevWZBktEJFXgLmqus2/01nQNAYTymcBJnZdzGKH6SyNAb/1c7jkMAWlBdEWJ6r4LZys5CxSElNYvHMxz614ju1HtvPdke8o85l5sZenvUy37G6kJ6XTtXlXRnceXaFs2me2J8mTBMDlAy/n8oFV5+0tDU1Nyuhs4FrgP463RR6Qipnoeg/4i6oui7SA9cUqoxjGxqWrFp/6KqyfwyWHm7Tjwb6Cfby36T02H9rM5jzzOlJyhL9N+hsndTyJYm8xu/J3kZOdw6ldTqVL8y50bta5wrqZ2GMiE3tMjHIvLKFwlc/IcdVrDRTVJp9RuMjIyNCCgro9DY4aBc2bw7vvhlkoS/3p379y9IU+fZr0mqOy8jIOlxwmrziPoyVHm1Skg4NFB1l/YD0bDm5gw8ENbMrbxBUDr2Bij4ms2beGq16/iuzUbLpndyenRQ452Tmc3vV02me1j7boESVe8hkFnO88zKLXWl+8riIwOOF/rGu3JbzYuHQUe4srXIKbwvCb1+dlS94W1h9YzwkZJzC8w3AOFB5g4nPHLJfW6a3p3qI7KZ4UAHq26sn7V75Pi7TjcnBaYo/pwF+dKZ6nVHWN24puwwHFLVYZxTBNNC5dQWlBhQJqzK7X5b5yPAkeAH7/6e9N3qK8TRUOF2f3OJvhHYbTMq0lt55yKznZOfRs1ZPs1OxK7SQmJFpFFCeo6pUi0gy4DBOxW4GngP+o6tGa6jZ6ZWQjMMQwTSguXX5pPoeKDpFXnNcoQ+4UlRWx7sA61uxfw5p9a1izfw2t01vzj3P/AcDu/N20Sm/FqE6j6NWqFz1b9qRrtslULSJc0t/VUhRLHKCqRxzLKA2TiPVC4Jci8rCqPlJdPTex6TIwc0U+EemFiUv0drxE7vZ4oLjxPnzGN404Lp2qGgVUbBRQY3JAKPYWs/7AerYd3sZ5vc4D4LYPb+OL7SZtTZv0NvRp3Yeh7YdW1HnknGrvQZZGhIicj4ln1wN4FhipqntFJB1YA9RdGQGfAKeKSAvgQ2AxcClwRX0FbwjsMJ1LmqpnW5j7fbTkKIeKD3Go6FCjWv+zbPcy3t34Lsv3LGfDwQ2UazmCMK7bODKSM7hq4FVM7TuVvq370iajTbTFtUSPqRhP608Cd6pqoYhcW1NFN8pInIauAx5R1T+JyFK3kjnZBBcD36nqeSLSEngBk5JiC3CJqh5y215tscrIJU0142oY+u0fgjtUfCjuLaD80nxW71vNir0rWLl3Jbeecivts9qz/sB63vz2TQa0GcDVg66mf5v+9GvTj/SkdABGdBwRZcktMcKuqopIRO5T1dtU9cOaKrpSRiJyMsYSuq4W9fz8DGOeNXO2bwc+VNWZInK7s31bLdqrFTEXDihWLZCm6tlWx34XlhVysOggh4oORXUOKHnrjuPy5pR27eSqrqri9XlJ8iSxet9q7v3kXjYe3Ig6KW1ysnM4UHSA9lntmdJ7Chf3vbjCISHW+mKJGc7k+Pv5OUH2HYcbpXIz8CvgNSeia3dM5IWQiEgn4Fzg95j05WAiuI51Pj+NCaYXMWUUc+GAYtUCaaKebbXpd4m3hINFBzlYdDBmvOBOnHFLRUbR1A2bOXHGLdXm0fH6vKw7sI6lu5aybPcylu1ZxvVDrmf6gOm0TGvJCeknML7beAaeMJABJwyoiEoNkJKYElN9scQWIvJD4Cagh4gsDziUBXzupg03acc/Bj52HBlw8qX/1KWMDwG3OgL5aauqu5y2donICcEqisgNwA0AycnJLk93PDE3TBerFkgT8myrRIh+e33eCgUUi+uAUjduQXzGkhGfkrpxS8Ux/xqmdpntKPGWcNa/z6oIBNoxqyNjOo+hR4segFlc+fA5Dze4/IHU1BdLzPM88DbwR8xol5+jqnrQTQNuvOlOBp4AMoEuIjIIE5H1phD1zgP2quoSERnrRphAnDzts8BEYKhtfT8xN0wXqxZII/Zsq5Eg/fapj7ziPA4WHeRIyZGYjoJd3KNbhTVxIB3+e1Ib5n35V5btXsaa/WsY1n4Yf5v0N1ISU5gxeAYdszoypN2QmHQyCOyLJgjFPbpFWySLe1RVt4jIj6oeEJGWbhSSm2G6hzB50N9wzviNiJzmot5o4HwRmYSJaddMRP4N7BGR9o5V1B7Y66KtOhNzw3RN1QKJA46WHOVA0QHyivMo98XSE0xw8kvzmfPgdUy/5QlSN27h4quT+bjdHhJX/od+bfpxxcArGNHhmGPBNYOviaK0odkw+8Hj5owsccPzwHnAEkxSvcBcGgqEnBh3Gw5oe5U8HSH/qar6K8xcE45l9Atnde6fMRG/Zzrvc93IUFdibpiuqVogMUqJt4QDRQc4UHgg5hejFpYVsmz3MhbvXMzinYtZe2AtgtD93Q/JTM7kyl1LuQKlX5t+pCamRlvcWlPatVPU54gSJIHEhEQSExLxJHjMu3jwJHjwiIcEScCTYN4FMe8iFTmJhOPzGSmKqqIoPvVVepX7yinX8op3r89b6RUvqOp5zntOXdtwo4y2i8gpgDrpI36K8Y6rKzOBFx1X8W3AtHq0FZKYU0aWqONTH4eKDrG/cD/5pfnRFqdair3FrNizgt6te9MspRmvr32dBxc+SGJCIgNPGMh1Q65jWPthFYpnSPshUZY4dhERkj3JJHuSSUpIMu+eJJISkire/QooloiXYLkiMrSm46r6dag23CijG4G/Ah2BHZj0EceNC4YQ5COM1xyqegCYUJv69SHm5owsUSO/NJ8DhQc4WHQwJv/k5b5y1uxfw8IdC1m0cxEr9q6gtLyUP4z/A2f1OIsJORPo3qI7g9oOIi0pLdrixhxJniRSPCmkJKZUvCd7kknxpFTkLoo34igL7AM1HFNgfKgG3HjT7SdOoi0EI+bmjCwNitfn5UDhAfYX7o8Zd+xAdh3dRWl5KV2zu7Irfxcz5s4AoHer3lzS7xKGdxjOkHbG4mmb2Za2mW2jKG308SR4SE1MJcWTQmpiqvmcaD7H0Y270aGq4+rbRk1pxx8BqnUjUlW37t1RxQ7TNU2OlBxhX8E+DpccjilvuILSApbsWsLCHQtZ+N1Cth3exsQeE/n9+N/TqVkn/nzGnxncbnCTj1LtSfCQlphGamIqaUlpFYon2VP3ZR6W4xGRszEjXx7gcVWdGaTMWIwjWxKwX1VPD1JmvKr+V0QuCnYeVX01lCw1WUaLnffRQD9MCB8wczxLQjUcK1hl1HQoKy9jf+F+9hfujxlnhHJfOTuP7qRz884AXD/ver49+C0pnhSGdRjG1L5TOaXzKRXlx+XU+wEzrhARo3AS00hLSqt4t0on8jih2v6GiZqwA1gkIm+o6uqAMtnA34GzVXVbdetCgdOB/wKTgxxToO7KSFWfdoSZAYzzR+kWkccw80ZxgU0h0fg5XHyY/YX7Y8YKOlx8mP/t+B+fb/+cL7Z/gdfn5cOrPyQxIZGbht9EamIqg9oNanI33MSERNKS0khPSict0bynJqZSxVPX0nCMBDY4gQwQkTmYCDmrA8pcDryqqtsAVDXoUhxVvct5r/P6ATcODB0wERT8i5YynX1xgbWMGidl5WUcKDrAvoJ9UbeC/G67CZLAi6te5P7/3Y9PfWSnZnNK51MY3Xl0hZI8teupUZW1oUj2JBul4yif9KT0Jqd8Y4REEVkcsD3LCSgAxilte8CxHcBJVer3ApJE5COMHvirqj5T3clEpBVwFzAGYxF9BtzrOK7VLGioAhhX7KUi4o9Hdzpwt4t6MYFVRo2LoyVH2Ve4j7zivKhaQQWlBXz13Vd8vv1zPt/+OfeOvZcRHUcw8ISBXDv4WkZ3Hk2/Nv1izlU4EvgVT0ZyRoXiSUxo9Hk74wWvqg6v5lgwk7TqnyoRGIbxgE4D/iciC1V1fTVtzsGkHbrY2b4CM8VzRihB3XjTPSUib2M0pgK3q+ruUPViBevaHf+U+8orrKBoe8TtL9zPnQvu5OvdX+P1eclIymBUp1EVrtZ92/Slb5u+UZUxkljF06jYAXQO2O4E7AxSZr+qFgAFIvIJMAioThm1VNXfBmz/TkQucCOM26toJOAfX1AgbmLYeDwmDJwq2KHp+KKorIh9hfs4UHggKuuCVJU1+9fw8daPaZbSjCsGXkF2ajYl5SVcNuAyxnQZw6C2gxrtzTgxIbFC6WQkZZCRnNFo+9pEWQT0FJEc4DtgOmaOKJC5wKMikggkY4ySv9TQ5gIRmQ74Q2lMBd50I4ybQKkzgRHAc86un4rIKU64n5jH44ySlJcbK8kS26gqh0sOs7dgL0dLjkZFhsU7F/PBpg/4ZNsn7C3YS4IkcHaPswFzg37i/CeiIlckSZCE4xSPneNp3KiqV0R+DLyLce1+0kkTdKNz/DFVXSMi7wDLAR/G/Xtl1bZE5CjHYtLdAvzbOZQA5GPmkWpEQo27O7kpBquaR1PHHXCpqua66XA4yMjI0IKCuoXv/8Mf4De/geJiSIl8ShZLHfH6vOwv3B8Vh4SjJUdZsmsJp3c9HRHhjgV3sGDLAk7udDKndz2dMV3GkJ2a3aAyRRIRIS0xrZLysREdGiciUqiqGdGWww1ubYVsjnnTNY+MKJHBbw15vRFURrGavTUOKCorYm/B3gYP0ZNXnMdHWz7iv5v/y5fffUm5lvPS1JfIaZHDz076Gb859TdxGWw0GMmeZDKSMyosnvSkdButwBIxRKQF0BOTrQGAqqnIg+FGGf2RY950ApyGE407HggcposYsZq9NYbJK85r8KE4VUVE+PK7L/np2z+lXMvpmNWRywdezrhu4+ia3RWA1umtG0ymcONJ8FQaastIyojbuGyW+ENErgd+hnGGWAaMAv5HmGLT/cfxMR+BUUa3xZM3XYMoo9pmb22ilpTfK25vwV5KvCUNcs79hfv57+b/8uHmDzmt62lcMfAKBrQZwNWDrmZCzgR6t+od14suUxNTyUzOrFA8drjNEmV+htEVC1V1nIj0Ae5xU9HtMF0CsN8p30tEerkxu2IB/zBdRJVRbbO3NjFLqrS8lL0Fe9lfuL/Bkta9tPol3t3wLt/s+QZFycnOISPJDJ1nJGfwoxG1CjwfE3gSPGQkZVRSPk1hHZMlrihW1WIRQURSVHWtiLhKZ+3Gm+4+4FJgFcabAozXRFwoI79lFNGQQLXN3lpbSypOKSgtYE/BngZZoJpXnMc3e77h9K4mhuOCzQvIL8vnhmE3VKReiDcCrZ7M5MxGM4dladTscOLZvQ68LyKHOH7tUlDcWEYXAL1VtWHGVcJMgwzT1TZ7a20tqVgjxDDjoaJD7C3YG/HEdQWlBXy89WPe3fguC3csRFHeueIdWqa15IGzHoirISu/a3VmcmaF9WOtHku8oaoXOh/vdvwMmgPvuKnrRhltwoQOr5UyEpFUjPWU4pznZVW9S0RaYsJDdAO2AJeo6qHatF0bGkQZ1ZbaWlKxRpBhRt/KFewv3N9g80Efb/2YX3/4a0rKS2iX2Y4rBl7BxBMn0iLVpF6IdUWU7EmuZPWkJabF9dyVxeLHyfrqj033uaq6WqvhRhkVAstE5EMCFJKLfEYlwHhVzReRJOAzJ6zQRcCHqjpTRG4HbgducyNsXQh07Y4ZamtJxRpVhhl13TpW7FmB1xeZL7ncV86inYt4d+O7nNrlVMbnjKd3q96c3/t8JvaYSG7b3Jh3VU5LSiMzObPiZReUWhojInInJs2QP2XEUyLykqr+LlRdN8roDedVK9RMEvjHaZKcl2JClI919j+NSUfetJRRvNO7N7p2LeLzoQlCcY+uEVFEGw5u4M1v3+TtDW+zv3A/GUkZ9GzZE4B2me24bXTELpt6kSAJpCelk5mcSVZKlnU0sDQlLgOGqGoxVETw+RqovzLy5zWqC060hiXAicDfVPVLEWmrqructndVl6xJRG4AbgBITq77U2SSs8SirKzOTVgCKCgtYP+zj3LCZdeTunELxT26sWH2g2Frv9hbTGpiKqrKr//7a7bmbWV0l9FMOnESY7qMiclJfL+XW1ZKVsWcjx1yszRRtmAWu/ojGqcAG91UjGi0NlUtBwY73hWviciAWtSdBcwCEw6orjL49ZhVRvXjcPFhdufvNk4J7Zqxf8GLoSu5pMRbwqfbPuXNb99k+Z7lvHn5m6QmpnLP6ffQLrNdzKXg9iR4yErOqrB87HyPpakjIo9gRr5KgFUi8r6zfSYmp1FIGiR0qKrmOQtnzwb2iEh7xypqDwTNHBgu/JZRaWxkoY4rVJWDRQfZU7CHorKisLe//fB2/r3i37y38T2Olh6lTXobpvSeQml5KamJqTGTisGvfLJSsshKzop55wiLJQr4E/gtAV4L2P+R2waqVUYi8qyqXiUiP1PVv9ZWMhFpA5Q5iigNk1zpPsz80/cwSfu+hwlRHjHsMF3t8amP/YX72ZO/J+xBS3fn78br89KpWSfyS/OZv34+43PGc27PcxnRYURMzK14EjzG6nEUUHpSerRFslhimsDpHBFJxmSIBVinqq7uvjVZRsNEpCtwrYg8Q5WsgKp6MHi1CtoDTzvzRgnAi6o6X0T+B7woItcB2zCeFxHDDtO5x+vzsq9gH3sL9obVIaG0vJRPtn7C3HVzWbhjIZN7TebO0++kT+s+vHfle2QkRzeosH+NT7OUZmQlG+Vjh90sltojImMxjmlbMDqjs4h8r76BUh/DLFbqjjG9Av+d6uyvFlVdDgwJsv8AJoVtg2Ato9CUlZexp2AP+wr2hT1y9uNfP86cVXPIK86jbUZbrhtyHZN7TQZMKoNoKaL0pHSapTSjWUozMpIzYt413GKJEx4AzlLVdQAi0gv4DyZ1eY1U+w9U1YdVtS8m4VJ3Vc0JeMVHbJVNm0i6ajoApdf90EQOqKEs/fsbX/D+/Wsu6+K8tW4rnOd3SYm3hK15W1mxdwV78veERREVlBbw1rdvVbRVUFbAsPbDePjsh3lj+hvcOPxGOjbrWO/z1JZkTzKt01vTvUV3BrUbRN82fenYrCNZKVlWEVks4SPJr4gAVHU9ZllPSEIm1wMQkUEcSzv+iWP1NBh1Tq7Xvz9L16QyVJfwmlzEBX3XVb/YtH//yiF6+vSp+8LUurQVzvOHoLCskN35uzlUFJ7AF6rKN3u+Ye66uXyw6QOKvEU8PvlxBrcbXJG2oaFJkASyUrIqrJ9YdAm3WCJNQyfXE5GnMDFMn3V2XQEkquo1Ieu6yPT6U8x6H/+K2guBWar6SJ0lriV1VkaJiawq780AVvEi05jmea361a+JiZVjBnk8dV8pW5e2wnn+asgvzWd3/m4OFx8OW5s7juzg5ndvZkveFtKT0jmr+1lM6T2FAScMaHAllJqYSvPU5hVzP3bex9LUiYIySgF+hAkHJJiQcH93E9vUjWv39cBJqlrgnOw+TLKkBlNGdaZ3b5LWlINCqaTWHJC0d29Yswb8ytnjMUNldckzVJdAqBEMnnqk5Ai7ju4KS+BSn/pYvHMxR0qOcEb3M2iX2Y7OzTpzVe5VnNn9zAb1PPNbP81TmtM8tbkNsWOxRBERSQCWqOoAoNYr4d0oIwECw4yWU8WzLmaZN4+kiTfCBihr3wXm1ZDjad486Nv32IIkr7fueYbqEgg1AsFTDxUdYnf+bgrLCuvdVl5xHvPXz+fVNa+y7cg2erXsxYScCSQmJPKXiX+pd/tuSfYk0zy1Oc1Tmtv5HoslhlBVn4h8IyJdVHVbbeu7Gaa7BbMeyL+Q6QJgtqo+VNuT1ZU6D9MB330HnTrBrFnw/e+HKBxsqKx37+NTJcRwplb/QtXd+bsp9haHruCCOSvn8PBXD1NaXkpu21ym9p3KhJwJpCSmhKX9UGQkZ5Cdmk3zlOZ2wanFUguiMEz3X0ym16+Aipu2qp4fqq6b2HQPOtET/GOA16jq0jpL28DUyrW76lCZxxM8I2sMZmpVVfYX7md3/u56L1TNL83nnQ3vMKrTKDo160SPFj2Y0nsKF/W5iJ6teoZJ4upJkASapTSjeWpzslOzSUxokEAhFkuTQ0TOBv4KeIDHVXVmNeVGAAuBS1X15RqadJViPOg5Ip2BMxzUxzI6dAhatoS//AVuvjlE4aoWj1/h+PE7FTSAs4FbfOpjX8E+9hTsoay8foup1h1YxyurX+Gdje9QWFbIzSfdzJW5V4ZJ0ppJTEg01o/jgGCH3yyW+lOTZeQEJFiPiR+3A1gEXKaqq4OUex8T/PTJYMrIyV93IyYo9grgCVWt1U2x0T9y1soyqppnqKq7td+poKoFlZNjyjbgsF04oyX41MeNb97I17u+JsWTwlk9zmJqv6n0a90vTNIGJyUxhezUbLJTs8lMzozouSwWy3GMBDao6iYAEZmDSfGzukq5nwCvYIbfquNpoAz4FDgH6Af8rDbCNHplVK9wQNU5FVTdX1raYMN24YqWsPnQZr7Y8QVXDLyCBElgcNvBjO82nkk9J9EspVkYJa5MWlJahQKyMd8sloiTKCKLA7ZnORkRADoC2wOO7QBOCqwsIh0xy3nGU7My6qeqA506T2DmjGonaE0HHfPsXVU9o7YNxwr1CgdUXUbWqvsTEytlPmWdswA5jI4OJd4Sdufv5kDRAVSV5K07OHHGLZVyCpV27VRjG2XlZSzYsoBX1rzCkl1LSExIZELOBNpltuOmETfVSS43+B0QWqS2aDCnB4vFAoBXVYdXcyyYV3TVeZuHgNtUtTzEur2KO6yqeuuyxq9GZeQIUCgizVU1fCslGxARM6UT0RQS1a0RCoOjQ2FZIXvy93Co+BCB83snzriF1A2bEZ+SumEzJ864hdU15BhauXclt7x3CweLDtIhswM/HvFjzu99Pi3TWtapy6HITM6kRVoLslOz7fofiyU22QF0DtjuBOysUmY4MMdRLq2BSSLiVdXXq5QbJCJHnM8CpDnbgkn8HXK4xc0wXTGwwkmWFOiq91MXdWOC5OQIB0qtbjhv3brgFpMLjpYcZXf+bo6UHAl6PHXjFsRnlJP4lNSNWyodL/eV8/n2z/EkeBjdeTTdsrsxtN1Qzu99PqM6jYqIg4BfAbVIbUGSx1U4KovFEj0WAT1FJAf4DpgOXB5YQFVz/J9FZDYwP4giQlXrnfvFjTJ603nFLUlJEVZG1Q3n1SGqQl5xHrvzd1NQWrP3YHGPbhWWkSYIxT26AbCvYB9z183ltbWvsadgD6M6jmJ059FkJmcy84ygXpv1IislixapxgKyCshiiR+c4bQfA+9iXLufVNVVInKjc/yxhpTHbaDUNKBLYDTWhqQ+rt0ArVvD9Onw6KNhFMoNLueMfOrjQOEB9hTsocQbMoQTQNA5o4f3zeepZU9RruWM6jiKi/tezKldTw37Oh2rgCyW+KChF73Wh5B3KRGZDNwPJAM5IjIYuNfNitpYISnJxZxRfZwNqqtbncXk4PV52Vuwl30F+/D6vCRv3UE/l04JpV078cXbs5i3fh6Te00mOzWbnr6eXD7wci7qcxGdm3cOWq+u2CE4i8USSdyEA1qCcev7SFWHOPtW+N34GoL6WkZdu8L48fDUUzUUqk8Kh1rWLSorYm/B3grPOD/9xl1SeejtxJzjnBL86RpeWfMKH2z6gDJfGfeOvZdJPSe5k7UWWAVkscQ3jcoywrgGHq7iqhdybE9EOgPPAO0w+S1mqepfRaQl8ALQDZOa9hJVDU9inWpwNWdUD2cDt3XzivPYW7CXoyVHgx4P5ZRQ7C1mxtwZbDi4gYykDC7scyEX9b2IE1ue6F7WEGQkZ9AitQUt0lpYLziLxdJguFFGK0XkcsAjIj2BnwJfuKjnBf6fqn4tIlnAEscjbwbwoarOFJHbgduB2+omfgic4bOkjS9Ttm8LbOpd/dBbfVI41FC33FfO/sL97CvcF3I+KJhTwpp9a1i5byXT+k0jNTGVER1GML3/dCb2mBi2oKHpSekVFpBdB2SxWKKBm2G6dOA3wFkYn/F3gd+qaq1CQovIXOBR5zVWVXeJSHvM8F+Nd/76ZHpl7VoG+b6mO5t5rd9vqh8+C/OcUUGntuwr3MehokOuIyX4nRLKt23mmXGt+NuZzVl9ZCMZSRm8dflbZCSHz9pOTUylZVpLWqS1sFlQLZZGSjwN07kOlCoizTCLl4KPMdVctxsm498AYJuqZgccO6SqLYLUuQGTYZbk5ORhJSXuvMwq4QQ0Hc4i2rGb+Z4Lag5o6kYh1VCm3FfOwaKD7C/cX+ccQp9t+4zf/Pc3FJQV0KNFDy7uezGTek4KS+y2lMQUo4BSW9hUDBZLE6BRKSMndPiTQJaz6zBwraoucXUCkUzgY+D3qvqqiOS5UUaB1NcyOtn3Gc04yrv9fl6zU4IbR4QgZY58/T8OFB4grziv1vHiir3FfLj5Q9pmtGV4h+HsLdjLo189ysX9Lib3hNx6p85O9iTTIq0FLdNa2lhwFksTo7Epo+XAj1T1U2d7DCaneW7IxkWSgPmY+HYPOvvW0VDDdI4Vc/rqf5CQnsqCFa1rHnpzkxqiShn1ePh625e1Fm1L3hZeXfMq87+dz5GSI0zuNZm7Tr+r1u0EIzEhsUIB2WjYFkvTJZ6UkRsHhqN+RQSgqp+JSMihOjGP9E8Aa/yKyOENTObYmc773NqJXAucdT6pE+HIvmKYPKzmIbgQTgzF3mI8PXuQuH4D4vM5TgZday3WvR/fyxvr38AjHsbnjOfivhczrP2w+vQUT4KnwgsuKzmr3haVxWKxNCTVWkYiMtT5eBWQDvwH49J9KXBIVX9TY8PGgvoUk2jJP3b1a+BL4EWgC7ANmKaqB2tqq77rjKZMgW3vrWFp6YCah+CCzAcVdm5HXnEeecV5FJUV1Sla9ndHvmPe+nnMGDyD1MRU5q2fx/7C/UzuNZnW6a3r3K8ESSA7NZuWaS1pltLMKiCLxVKJeLKMalJGC2qop6o6PjIiHU99ldH06bDshbWspe+xnf4huEAF5PGgXi++3j3Z/fy/ONC+ObJpc62VD5iUDx9t/Yi56+by1XdfkSAJ/O2cvzGiY00pQUIjIjRPaU7LtJY0T21uM6JaLJZqaRTKKJaorzKaMQM+enoLW8g5tjM5GUpKUMchQXw+FCfeeUD0AzdREaqyJ38Pl796OYdLDtM+sz3n9z6f83udT9vMtnWSX0TISs6iZVpLslOz8STUO0CuxWJpAsSTMnITmy4buBoTMaGifDylkEhLgyIquzKr18v6/evotc4oIjiWaSow+kGoqAgABaUFvLfpPY6WHOXqQVdzQsYJTO41mVGdRjGy48g6Wy8ZyRkVrtg2HI/FYmnMuHFgeAtYSOW5n7jhSMkRvAkJFEsaqo7lA5R06Uh+aX7lqAc4x0VQj4ehXUaiHg/4fIhSKVWDqrJ873Lmrp3L+5vep8hbRO4JuVyVexUiws2jbq6TvP7FqC3TWkYuGkIYM9BaLBZLOHCjjFJV9ZaISxIhth/eTnlCS4q0VdDjG2Y/6GRN3YImmnkkTUxEysoQBXyKJiVBeXnFnBHA40sf559L/kl6UjoTe0zkgj4X0L9N/zo5ESR5kioUUIOsBQpDBlqLxWIJJ26U0bMi8n3MeqGKMAihPOBiieRUpYxkfCTgwYcAKdu/q1xIoKRbZzbMfpABp15kFBEgqpTg5ZGP7+PNb9/kyuR9DKETZ+ScwQkZJ3Bm9zNJT0o3XnbjL3Xt6OBJ8JCdmk2rtFZkpWRVWy4i1CcorMVisUQAN8qoFPgzJj6d39tBgbgZ10lJNTfeIkkjUwsqDbcZq8gM06VuMJ5zxT26kbJhE4vaw9OD4YVc4eAHt9IqrRUHig4AkNMih5wWxxwigrVT1dHB74jQKr0V2anZ0fOEq09QWIvFYokAbpTRLcCJqro/0sJEiuQUo4zyuvcmY8uySsNtVR0UfFs3s2nBa3S95udMPn8zR1OEcZ1Gc87gaYzsOLLarKk1OTqkJaXRKq0VLdNaxoYjwrx5x88ZWSwWSxRxo4xWAXWL+hkDJG3ZTs6jHwN/pMiXyspPXwWoWDukHg/5Hh+v9YVnBsGmFsr6sVPxeMt56bMOJM38Mykn9ibji8X0Ou1UpLQMTU5i/XOPUNaxXaV28Cmix9I/tMloQ+v01rEXEy5YBlrr1GCxWKKIm9h0rwH9gQVUnjNqMNfu+qwzKu59Ii99ezJX67Osl5506mmspNQNm1nSTnngZHijNxQmQ/eDcPU38MvPId1beb3RkJyTkdKyCm88TU6ipFvnY554ApqUhJT7KO91Ignz5pPQI3xJ7yJOfTLdWiyWmKRRrTMCXndecUnKxi2k6yAACjSVz8tX0fMgdPUpO7Pg/R5w1XK4fKVw6hYl0BcucLjNr4jAWY9UWlZ5aE6Bch/i9br6UmMO69RgsTQ5RORs4K+AB3hcVWdWOX4Fx5Kf5gM/VNVvIiFLyPumqj4diRM3FMU9urL5YDHshwmXp3Gwl/LrFdn87rXDTPpW2Xk/JPlAkxNBvKBaORKDf11RchJUsYzKcrqS/O0ms2g2IQGpzhEgHobArFODxdKkEBEP8DfgTGAHsEhE3lDV1QHFNgOnq+ohETkHmAWcFAl5QrpzichmEdlU9RUJYcJNua+cgd8v45eTigDom5fN/bm3csHN/6T4xBw8PqOIBJCyMjQpEU1IQJOTKobo/I4O6597xOwHSE6i7K35pLz1LtKnj4lz16dP9Y4A/nU95eXH1vXEGvPmmT6E6ovFYmksjAQ2qOomVS0F5gBTAguo6heqesjZXAiEDsxZR9yMKA0P+JwKTANaRkac8OJJ8DCp/wUUpg3iiWdg+hV/4+STjgCwesGLDO0yEil3QgEpUFrGyi9eD7o+yHf6qRw4+B2t0luRIAlUxEZwM68SD0NgwZwawkWgZZjjuMNv3hy7VqLF0nhIFJHFAduzVHWW87kjsD3g2A5qtnquA94Os3wVuBmmO1Bl10Mi8hlwZ2RECi8/GPYDlnmEJ4CSksrREYp7dCN1/aZK80RV1wc1T21O24y29VuY2tSHwAIjPmzYcGy/jf5gsUQar6oOr+ZYsHAxQT3aRGQcRhmNCZdgVXETKHVowGYCxlJq4JAB9SPVWfRaUlx5VHLD7AcZcMoFFduCWS+UIAm0Sm9F24y24YkP19TX9QRahoHEqpVosTQNdgCdA7Y7ATurFhKRXOBx4JwgxknYcBMC4IGA1x+BYcAlkRIoEqSmOREYCit3t7RrJ4p7dUcTzAOCJiTg7XkiuW1z6dK8S/gClfqHwLzeY1ZA//4mhXn//mYYK97YtMl9H3r3NhZhVWLJSqxNfyyWxsEioKeI5IhIMjAdk4m7AhHpArwKXKWq6yMpTEhlpKrjAl5nqur3VTXk46yIPCkie0VkZcC+liLyvoh867y3qG8H3JDZrByA/CPH5wHaMPtBSk7sbhat9ulD0ptvRz5fUDw4NISiNn2YN8/c5KsSS44SjeE3sVhqgap6gR8D7wJrgBdVdZWI3CgiNzrF7gRaAX8XkWVV5p/CLlCNLyAFuByTMvxO/8tFvdOAocDKgH1/Am53Pt8O3BeqHVUlPT1da83Gjar9+qnP49H8nj0UVL9/y3e6+LvFuvyL17WwV3f1eTzq7dvblHXRlno85t1fvrb7/Xg8qnDs5fHUvn/RprZ9CFY+1PfUkDSG38RiqQJQoC7usbHwcjNMNxfj7ucFCgJeoZTcJ0DVyN5TAP+6paeBC1ycv244T7pSXk76xk1kJRyl4KixeHpd8wtSN2xBysvxrPs29FNwdU/Ntd3vJ3DYKpaGqmpDbfsQrHwsWSON4TexWOKZUNqKAMumti9MdthAyyivyvFDNdS9AVgMLE5OTq79I0GVJ92ObNdpV+Zpfkl+eJ7q67LfTyxZBHWltn0IVj6WrJHG8JtYLFWgkVlGX4jIwPCrwZpR1VmqOlxVhycGm28IRcCTriYk0Cy5GA56yBgy0jyJ+wl8Cq5uEru6p+ba7vdT1aEhHtfZ1LYPwcrHkjXSGH4TiyWOcaOMxgBLRGSdiCwXkRUisryO59sjIu0BnPe9dWwnNE5EAfV4kD59aNavE0c+XmaGgwIJnESvbtiouugEtd1vqYz9niwWi4ObqN1dg+1X1a0hGxfpBsxX1QHO9p+BA6o6U0RuB1qq6q2h2qlP1G7/6v+zVj9EPhl8wehjxzwmzXgFiYmVraaqxy0WiyWOaFRRu90onWCIyH+AsUBrEdkB3AXMBF4UkeuAbZjQQpHFsXaacZidtD+2P9iwUFOPlGCxWCxRImLZDlT1smoOTYjUOYPirP5vxhGO0Mzs83iCR0Jo6pESLBaLJUrEZeqdWuFYO818RzhMc+jXr3IstHhI72CxWCyNHDcODPGNM0neUvI4QnPKXq1i7cTSWheLxWJpojR+ZeS47La990cA7O03FlJSzFBd//7hTe9g45s1bezvb7HUmcavjBza/vNeAHb72kBpqVE8a9capRSutS7Wymra2N/fYqkzTUYZtdv5NQB7aHtsp89nXLfDtdYlHpLoWSKH/f0tljrT+JWRM3TS1mfSdOym3bFjIkYBVV15Hzjc0rOneXk8kJxs6oiYfZs2VS7r8Zhj4N7KivehnXiXP5zEUkQJS2xj/zfHEXLRayxQr0Wv/fvD2rUU+lLIoJA/8Ct+xcxjxzduPN57zqkTNCFcIP36mffAtUn+hbNuPfMCz5WQcEw5xgvxLn84sZ6ZFrc00P8mnha9Nn5lFBBVoTl5XM0zPMJPjx3v1+/4dUU9erhr2+PkPapP1IZ4j/oQ7/LHG1bhNQ6q/m+g8vrHMP2m8aSMGv8wXcDQSQ82soETKx+vOuFcm0nn3r3rPzQT70M78S5/vGGdJBoHwbIfN/HftPEro4BgnL3Td7CWPmZ/crK5GKpOOAebdD7xRFM2Kanyvnnz6h/sM96Dhca7/PGGdZJoHAT+bwJpwr9p41dGAakB+tx6Plslh6JChZISczFUfaqv+qTfrx98+615aiktNZl3Nm40yqxXL/MUM29e3VMPhCt1QbQmRG3qhYbFWqKNg8D/Tb9+9jelKSijAPr0Mbpk9WpnR7CnejdP+rE4VBKLMlnCj7VEGx/2NwWaggNDAN99B506wf33w//7f/VoKBYn7WNRJovFElWsA0OM0rEj9O0L77xTz4ZicagkFmWyWCwWlzQpZQQwbRp88EHAUF1diEWzOhZlslgsMY2InO1k8d7gJDytelxE5GHn+HIRGRoxWZrSMB3Avn3GEa5rV3jySRg27FjQhErY9RwWiyXOqWmYTkQ8wHrgTGAHsAi4TFVXB5SZBPwEmAScBPxVVU+KhKxRyWckImcDfwU8wOOqOjNElbDRpg289JKxkEaMgLQ0aNnSvBvZnNcWkNKXzb7VivTzgcu1sBaLxRIu/vlPGDMmIk2PBDao6iYAEZkDTAECx42mAM+osVoWiki2iLRX1V3hFqbBlZGjjf9GgDYWkTcCtXGkOess4509fz6sXAkHDxpPb9WA17eLAUURFIESgX4nhmzbYrFYwklG/dwPEkVkccD2LFWd5XzuCGwPOLYDY/0EEqxMRyD+lRHutHHEad0aZsyoocCKe46PHfXS1IYSz2KxWMKBV1WHV3Ms2ARF1XkbN2XCQjQcGKrTtJUQkRtEZLGILPZGw0XZOgRYLJbGzQ6gc8B2J2BnHcqEhWhYRq40rWNKzgLjwBBpoY7Dv0LaYrFYGieLgJ4ikgN8B0wHLq9S5g3gx84I1knA4UjMF0F0lFGDaVqLxWKxBEdVvSLyY+BdjDPZk6q6SkRudI4/BryF8aTbABQC10RKngZ37RaRRIw74QSMNl4EXK6q1Zoh4XTttlgslqZCPEVgaHDLqDpt3NByWCwWiyV2aHKLXi0Wi6WpEE+WUZMLB2SxWCyW2MMqI4vFYrFEnbgYphMRH1BUx+qJQFPLpWD73DSwfW4a1KfPaaoaF0ZHXCij+iAii2tYgdwosX1uGtg+Nw2aSp/jQmNaLBaLpXFjlZHFYrFYok5TUEazQhdpdNg+Nw1sn5sGTaLPjX7OyGKxWCyxT1OwjCwWi8US41hlZLFYLJao02iUkYicLSLrRGSDiNwe5LiIyMPO8eUiMjQacoYTF32+wunrchH5QkQGRUPOcBKqzwHlRohIuYjEdUZEN/0VkbEiskxEVonIxw0tY7hxcV03F5F5IvKN0+eIRZJuKETkSRHZKyIrqzne6O5fx6Gqcf/CBFzdCHQHkoFvgH5VykwC3sbkUxoFfBltuRugz6cALZzP5zSFPgeU+y8m/P3UaMsd4d84G5MluYuzfUK05W6APv8auM/53AY4CCRHW/Z69vs0YCiwsprjjer+FezVWCyjilTmqloK+FOZBzIFeEYNC4FsEWnf0IKGkZB9VtUvVPWQs7kQkzsqnnHzOwP8BHgF2NuQwkUAN/29HHhVVbcBqGpT6LMCWSIiQCZGGcV1VAZV/QTTj+pobPev42gsyshNKnNX6c7jiNr25zrMk1U8E7LPItIRuBB4rAHlihRufuNeQAsR+UhElojI1Q0mXWRw0+dHgb6YpJwrgJ+pqq9hxIsaje3+dRzRyPQaCdykMneV7jyOcN0fERmHUUZjIipR5HHT54eA21S13Dw4xzVu+psIDMMkq0wD/iciC1V1faSFixBu+jwRWAaMB3oA74vIp6p6JMKyRZPGdv86jsaijNykMm9s6c5d9UdEcoHHgXNU9UADyRYp3PR5ODDHUUStgUki4lXV1xtEwvDi9rrer6oFQIGIfAIMwmRTjkfc9PkaYKaayZQNIrIZ6AN81TAiRoXGdv86jsYyTLcI6CkiOSKSDEwH3qhS5g3gascrZRRwWFV3NbSgYSRkn0WkC/AqcFUcPykHErLPqpqjqt1UtRvwMnBTnCoicHddzwVOFZFEEUkHTgLWNLCc4cRNn7dhLEFEpC3QG9jUoFI2PI3t/nUcjcIy0mpSmYvIjc7xxzCeVZOADUAh5ukqbnHZ5zuBVsDfHUvBq3Ec/ddlnxsNbvqrqmtE5B1gOeADHlfVoO7B8YDL3/i3wGwRWYEZvrpNVfdHTegwICL/AcYCrUVkB3AXkASN8/4VDBsOyGKxWCxRp7EM01ksFosljrHKyGKxWCxRxyoji8VisUQdq4wsFovFEnWsMrJYLBZL1LHKyGKxWCxRxyoji8VisUQdq4xiBCcnzbMNeL4zgp1PRE4RkXvq2fY/RWR0fdqo43kfEJHVIvJIQ5870ohItojcFKVz54epnTQR+VhEPHWoW6n/IvJFPeRIFpFPRKRRLPpvLFhlFDsMBpbWVKAuf+IaGBTsfE7aibvq2fZJmJQVYaWm/otId2C0qvZT1Z+E+9xucEK1ROo/lQ3UShlFWJ66cC0m3UV5HepmE9B/VT2lrkI4qSk+BC6taxuW8BNLF2pTZxDQUUS+FJFNIjIWQEReEpEHRWQB8CsRmSoiC50sl5+JSBun3Gsi8jsR+VREdovIGc7+DiLyiogsFZG1IjIy4HztgpR/SUTGhGizr/NkuVxEfikiG/ydEJG+wHonavb3xKQ1WC4in9Ykj4j0cdpcJSIfiEjravqfIyJzRWSxiHwlIr1FpDfwMdDVaTejhu+puu/DL383Z//Tjtwvi4n5hoi87vRnlYjcEFB+jYj8Hfga6FxDubUi8riIrBSR58RYp5+LyLeBcojIlU7flomxMj3ATKCHs+/P1ZULJk9Au/dJZevibhH5f9X1Lcj3sjJg+xcicncImatyBSaWnr9O0HOKyNXOd/+NHLPeK/VfAqw1EbnF+U5XisjNVX6XfzntvyciaQGyvO7IY4kVop3dz77MC2Ol3O18Pgv41Pm8Frg3oFyrgM93AT9yPn8L/ML5fBHwFCb24DfAec7+dCDL+fwNcGtgeefzGqB5iDa/BoY4+/8BvB4g0y2YJ+AsTAbSZGd/dnXyACnAqoA2bwN+X7X/mFhdHwI9nO1JAXL/Dri+pu+ppu8joGw3TGj+0c72kwHfQUvnPQ1YiYn71w0TE25UQBvVlfMCAzEPgUuctgWTOO11p05fYB6Q5Gz/Hbjaqb8y4Bw1laskT0CdIcDHAduBGWKPkzmgXH6Q8/+CY9drUFmqnDsZ2F1lX7DvqT+wDmhdpUzV8+c778MwOY0yMIn2Vjn99H/fg51yLwJXBtT3APui/b+3r2MvaxnFAGLGrlsBf3B2LcMETEwFWgL3BhSf4TyBfoMZtih2ntybA39xyiQCecAFwBpVnQ+gqoWqelREkpx27w8s75wvSVUP19DmRcA3quof4luNucH7mQi8A5RjbjIPiMhwVa1WHmf/Z1XaPCFI/y/A3KxeEZFlwJ+AYufYwCpyHPc91XD+qmxX1c+dz//mWB6onzrtLcRYHD2d/VvVZN8kRLnNqrpCTSK4VcCHau6MKzA3TzDRqIcBi5w+TsCk4K5KTeWqyoPT36WY77WDiAwCDqmTIbYGmd3gRubWmOsnkGDnHA+8rE7gU1WtKfspmN/mNVUtUNV8TJT6U51jm1V1mfN5Cce+Y9QMFZaKSJa7LloijZ3Aiw36YVItlzrbQzE31v6YXPdeMMMXmLTM41U1X0zumlVOuSV6bCw+F/OkOZjgczf9MArFV6V8f4wioIY2czHK0s8AjPLBUWDZqrrT2R4ATAZmicjjQIca5FkRsD3QkaNS/zFDi79R1SeCtNHf+S5q+p7Oq+b8VakaPVjFDJueAZysqoUi8hGQ6hwv8BcMUa4koE1fwLaPY/9FAZ5W1V8FCiAi3arIVFO5AqrnZWAq0A6T0juUzH68VB7WDzweVJYqFAXWqeGcQu2SxtWUQTHw+/Y/HAWSwrGHGUuUsZZRbDAIyBGRFBHJxAwrPYS5KS8PKDcQ+MK5wV4MnIK5iQ+gsoLIdertxtykARBn3sQ53zdBygeer7o2D2BSXSMig4ErA9oaByxwjvV0nlbnAPMxN5rq5PkOo5D8jghXAc8E6f8uYKI4k/IiMlAMWUCZqhaG+J6qO39VuojIyc7ny4DPMFbiIefG2QcYVU1dt+Wq40Ngqoic4MjYUkS6AkcxQ5qhyoViDiZH0FSMYnIr8x6MVdVKRFIwit21LKp6CPA41m5N5/wQuEREWvnbcvZX7b+fT4ALRCRdRDIwKec/DfUlOO3vU9WyUGUtDYNVRrHBIOA54AtMtsqHnWGWqjfjpzFDG59iFMImNRk+B3K8tbISmA20dSZwlwH+G+ygKu36yweer7o2nwWGi8gizNzQFlX1JzY7B8dKAn4jIutE5GsgBzOPUJ08zwIdxOSnmQNcqyYrbdX+P4m5Ztc49W9zhrn8soX6nqo7f1XWAN8TkeWYYcJ/OP1KdPb9luotLLflgqKqq4H/A95z2ngfaO98H587k/R/rq6ci/ZXYW7q3+mx5GwhZXZu2vcCX2IeLtaGkjnI6d/j2JBn0HM68v0e+NgZwnvQ2V+p/wHn/hrzu37lyPZ4wHBvTYzD5AiyxAg2n5GlVohIpjM2j4j8EuPs8H/O9tfASfH8tOkMc81X1QHRlqWxISJDgFtU9aoYkOVV4Fequi7aslgM1jKy1JafB1gW3TBPtQCo6tB4VkSWyOJYLAskvOvlao2YdOavW0UUW1jLyGKxWCxRx1pGFovFYok6VhlZLBaLJepYZWSxWCyWqGOVkcVisViijlVGFovFYok6VhlZLBaLJepYZWSxWCyWqPP/AR5iwUQEbwl7AAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "