Skip to content

add python 3.13 #136

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions CHANGELOGS.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@
Change Logs
===========

0.5.2
=====

* :pr:`136`: adds Python 3.13 to CI, updates the package to support scikit-learn==1.7.1

0.5.1
=====

Expand Down
4 changes: 3 additions & 1 deletion _cmake/externals/FindLocalPyBind11.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
# pybind11
#

set(pybind11_TAG "v2.10.4")
set(pybind11_TAG "v2.13.5")

include(FetchContent)
FetchContent_Declare(
Expand All @@ -19,6 +19,8 @@ FetchContent_Declare(
FetchContent_GetProperties(pybind11)
if(NOT pybind11_POPULATED)
FetchContent_Populate(pybind11)
message(STATUS "pybind11_SOURCE_DIR=${pybind11_SOURCE_DIR}")
message(STATUS "pybind11_BINARY_DIR=${pybind11_BINARY_DIR}")
add_subdirectory(${pybind11_SOURCE_DIR} ${pybind11_BINARY_DIR})
else()
message(FATAL_ERROR "Pybind11 was not found.")
Expand Down
2 changes: 1 addition & 1 deletion _doc/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -98,5 +98,5 @@ Source are available at `sdpython/mlinsights <https://github.com/sdpython/mlinsi
Older versions
++++++++++++++

* `0.5.2 <../v0.5.1/index.html>`_
* `0.5.1 <../v0.5.1/index.html>`_
* `0.5.0 <../v0.5.0/index.html>`_
2 changes: 1 addition & 1 deletion _unittests/ut_sklapi/test_sklearn_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_pipeline_with_callable(self):
pipe.fit(X_train, y_train)
pred = pipe.predict(X_test)
score = accuracy_score(y_test, pred)
self.assertGreater(score, 0.8)
self.assertGreater(score, 0.75)
score2 = pipe.score(X_test, y_test)
self.assertEqualFloat(score, score2, precision=1e-5)
rp = repr(conv)
Expand Down
84 changes: 77 additions & 7 deletions azure-pipelines.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,76 @@
jobs:

- job: 'TestLinuxWheelNoCuda'
- job: 'TestLinuxWheelNoCuda313'
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
Python311-Linux:
python.version: '3.13'
maxParallel: 3

steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '$(python.version)'
architecture: 'x64'
- script: sudo apt-get update
displayName: 'AptGet Update'
- script: sudo apt-get install -y graphviz
displayName: 'Install Graphviz'
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
- script: pip install -r requirements.txt
displayName: 'Install Requirements'
- script: pip install -r requirements-dev.txt
displayName: 'Install Requirements dev'
- script: |
ruff check .
displayName: 'Ruff'
- script: |
black --diff .
displayName: 'Black'
- script: |
cmake-lint _cmake/Find* --disabled-codes C0103 C0113 --line-width=88
cmake-lint _cmake/CMake* --disabled-codes C0103 C0113 --line-width=88
displayName: 'cmake-lint'
- script: |
rstcheck -r ./_doc ./mlinsights
displayName: 'rstcheck'
- script: |
cython-lint .
displayName: 'cython-lint'
- script: |
export USE_CUDA=0
python -m pip install -e . --config-settings="--use_cuda=0" -v
displayName: 'pip install -e . --config-settings="--use_cuda=0" -v'
- script: |
python -m pytest _unittests --durations=10
displayName: 'Runs Unit Tests'
- script: |
# --config-settings does not work yet.
# python -m pip wheel . --config-settings="--use_cuda=0" -v
export USE_CUDA=0
python -m pip wheel . --config-settings="--use_cuda=0" -v
displayName: 'build wheel'
- script: |
mkdir dist
cp mlinsights*.whl dist
displayName: 'copy wheel'
- script: |
pip install auditwheel-symbols
auditwheel-symbols --manylinux 2014 dist/*.whl || exit 0
displayName: 'Audit wheel'
- script: |
pip install abi3audit
abi3audit dist/*.whl || exit 0
displayName: 'abi3audit wheel'
- task: PublishPipelineArtifact@0
inputs:
artifactName: 'wheel-linux-pip-$(python.version)'
targetPath: 'dist'

- job: 'TestLinuxWheelNoCuda312'
pool:
vmImage: 'ubuntu-latest'
strategy:
Expand Down Expand Up @@ -70,13 +140,13 @@ jobs:
artifactName: 'wheel-linux-pip-$(python.version)'
targetPath: 'dist'

- job: 'TestLinux'
- job: 'TestLinux311'
pool:
vmImage: 'ubuntu-latest'
strategy:
matrix:
Python311-Linux:
python.version: '3.10'
python.version: '3.11'
maxParallel: 3

steps:
Expand Down Expand Up @@ -158,13 +228,13 @@ jobs:
artifactName: 'wheel-linux-$(python.version)'
targetPath: 'dist'

- job: 'TestWindows'
- job: 'TestWindows312'
pool:
vmImage: 'windows-latest'
strategy:
matrix:
Python311-Windows:
python.version: '3.11'
python.version: '3.12'
maxParallel: 3

steps:
Expand Down Expand Up @@ -204,13 +274,13 @@ jobs:
artifactName: 'wheel-windows-$(python.version)'
targetPath: 'dist'

- job: 'TestMac'
- job: 'TestMac312'
pool:
vmImage: 'macOS-latest'
strategy:
matrix:
Python311-Mac:
python.version: '3.11'
python.version: '3.12'
maxParallel: 3

steps:
Expand Down
2 changes: 1 addition & 1 deletion mlinsights/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.5.1"
__version__ = "0.5.2"
__author__ = "Xavier Dupré"
__github__ = "https://github.com/sdpython/mlinsights"
__url__ = "https://sdpython.github.io/doc/dev/mlinsights/"
Expand Down
6 changes: 5 additions & 1 deletion mlinsights/mlmodel/interval_regressor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import numpy
import numpy.random
from sklearn.base import RegressorMixin, clone, BaseEstimator
from sklearn.utils._joblib import Parallel, delayed

try:
from sklearn.utils.parallel import Parallel, delayed
except ImportError:
from sklearn.utils._joblib import Parallel, delayed

try: # noqa: SIM105
from tqdm import tqdm
Expand Down
6 changes: 5 additions & 1 deletion mlinsights/mlmodel/piecewise_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,11 @@
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.utils._joblib import Parallel, delayed

try:
from sklearn.utils.parallel import Parallel, delayed
except ImportError:
from sklearn.utils._joblib import Parallel, delayed

try: # noqa: SIM105
from tqdm import tqdm
Expand Down
44 changes: 32 additions & 12 deletions mlinsights/mlmodel/quantile_mlpregressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,24 @@
from sklearn.metrics import mean_absolute_error


def absolute_loss(y_true, y_pred):
def absolute_loss(y_true, y_pred, sample_weight=None):
"""
Computes the absolute loss for regression.

:param y_true: array-like or label indicator matrix
Ground truth (correct) values.
:param y_pred: array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
:param sample_weight: sample weights
:return: loss, float
The degree to which the samples are correctly predicted.
"""
return np.sum(np.abs(y_true - y_pred)) / y_true.shape[0]
if sample_weight is None:
return np.sum(np.abs(y_true - y_pred)) / y_true.shape[0]
return (
np.average(np.abs(y_true - y_pred), weights=sample_weight, axis=0)
/ y_true.shape[0]
)


def float_sign(a):
Expand Down Expand Up @@ -132,7 +138,7 @@ def _modify_loss_derivatives(self, last_deltas):
return DERIVATIVE_LOSS_FUNCTIONS["absolute_loss"](last_deltas)
return last_deltas

def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
def _backprop(self, *args):
"""
Computes the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Expand All @@ -141,6 +147,8 @@ def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
The input data.
:param y: array-like, shape (n_samples,)
The target values.
:param sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
:param activations: list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
:param deltas: list, length = n_layers - 1
Expand All @@ -155,10 +163,18 @@ def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
:param intercept_grads: list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
:return: loss, float
:return: coef_grads, list, length = n_layers - 1
:return: intercept_grads, list, length = n_layers - 1
:return: loss (float),
coef_grads (list, length = n_layers - 1)
intercept_grads: (list, length = n_layers - 1)


"""
if len(args) == 6:
X, y, activations, deltas, coef_grads, intercept_grads = args
sample_weight = None
else:
X, y, sample_weight, activations, deltas, coef_grads, intercept_grads = args

n_samples = X.shape[0]

# Forward propagate
Expand All @@ -169,10 +185,12 @@ def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
if loss_func_name == "log_loss" and self.out_activation_ == "logistic":
loss_func_name = "binary_log_loss"
loss_function = self._get_loss_function(loss_func_name)
loss = loss_function(y, activations[-1])
loss = loss_function(y, activations[-1], sample_weight)
# Add L2 regularization term to loss
values = np.sum(np.array([np.dot(s.ravel(), s.ravel()) for s in self.coefs_]))
loss += (0.5 * self.alpha) * values / n_samples

sw_sum = n_samples if sample_weight is None else sample_weight.sum()
loss += (0.5 * self.alpha) * values / sw_sum

# Backward propagate
last = self.n_layers_ - 2
Expand All @@ -182,20 +200,22 @@ def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
if sample_weight is not None:
deltas[last] *= sample_weight.reshape(-1, 1)

# We insert the following modification to modify the gradient
# due to the modification of the loss function.
deltas[last] = self._modify_loss_derivatives(deltas[last])

# Compute gradient for the last layer
temp = self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads
last, sw_sum, activations, deltas, coef_grads, intercept_grads
)
if temp is None:
# recent version of scikit-learn
# Compute gradient for the last layer
self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads
last, sw_sum, activations, deltas, coef_grads, intercept_grads
)

inplace_derivative = DERIVATIVES[self.activation]
Expand All @@ -205,7 +225,7 @@ def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
inplace_derivative(activations[i], deltas[i - 1])

self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads, intercept_grads
i - 1, sw_sum, activations, deltas, coef_grads, intercept_grads
)
else:
coef_grads, intercept_grads = temp
Expand All @@ -220,7 +240,7 @@ def _backprop(self, X, y, activations, deltas, coef_grads, intercept_grads):
coef_grads,
intercept_grads,
) = self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads, intercept_grads
i - 1, sw_sum, activations, deltas, coef_grads, intercept_grads
)

return loss, coef_grads, intercept_grads
Expand Down
10 changes: 5 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ license = {file = "LICENSE.txt"}
name = "mlinsights"
readme = "README.rst"
requires-python = ">=3.10"
version = "0.5.1"
version = "0.5.2"

[project.urls]
homepage = "https://sdpython.github.io/doc/mlinsights/dev/"
Expand Down Expand Up @@ -109,7 +109,7 @@ manylinux-x86_64-image = "manylinux2014"
[tool.cibuildwheel.linux]
archs = ["x86_64"]
build = "cp*"
skip = "cp36-* cp37-* cp38-* cp39-* cp313-* cp314-* cp315-* pypy* *musllinux*"
skip = "cp36-* cp37-* cp38-* cp39-* cp310-* cp314-* cp315-* pypy* *musllinux*"
manylinux-x86_64-image = "manylinux2014"
before-build = "pip install auditwheel-symbols abi3audit"
build-verbosity = 1
Expand All @@ -127,13 +127,13 @@ environment = """
DYLD_LIBRARY_PATH='$(brew --prefix libomp)/lib:$DYLD_LIBRARY_PATH'
"""
build = "cp*"
skip = "cp36-* cp37-* cp38-* cp39-* cp313-* cp314-* cp315-* pypy* pp*"
skip = "cp36-* cp37-* cp38-* cp39-* cp310-* cp314-* cp315-* pypy* pp*"
before-build = "brew install libomp llvm&&echo 'export PATH=\"/opt/homebrew/opt/llvm/bin:$PATH\"' >> /Users/runner/.bash_profile"

[tool.cibuildwheel.windows]
archs = ["AMD64"]
build = "cp*"
skip = "cp36-* cp37-* cp38-* cp39-* cp313-* cp314-* cp315-* pypy*"
skip = "cp36-* cp37-* cp38-* cp39-* cp310-* cp314-* cp315-* pypy*"

[tool.cython-lint]
max-line-length = 88
Expand Down Expand Up @@ -189,7 +189,7 @@ select = [
"C401", "C408", "C413",
"RUF012", "RUF100", "RUF010",
"SIM108", "SIM910", "SIM110", "SIM102", "SIM114", "SIM103", "UP015",
"UP027", "UP031", "UP034", "UP032", "UP006", "UP035", "UP007", "UP038"
"UP027", "UP031", "UP034", "UP032", "UP006", "UP035", "UP007", "UP038", "UP045"
]
"**/plot*.py" = ["B018"]
"_unittests/**.py" = ["B904", "RUF015", "C400"]
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -670,7 +670,7 @@ def get_package_data():

setup(
name="mlinsights",
version=get_version_str(here, "0.5.1"),
version=get_version_str(here, "0.5.2"),
description=get_description(),
long_description=get_long_description(here),
author="Xavier Dupré",
Expand Down
Loading