import pickle
import os
import collections
import nibabel as nib
import numpy as np
from nilearn.image import reorder_img, new_img_like
from .nilearn_custom_utils.nilearn_utils import crop_img_to
from .sitk_utils import resample_to_spacing, calculate_origin_offset
from scipy.ndimage import map_coordinates
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.utils.validation import check_is_fitted, FLOAT_DTYPES, check_array
def get_image(data, affine=None, nib_class=nib.Nifti1Image):
if affine is None:
affine = np.eye(4)
return nib_class(dataobj=data, affine=affine)
def pickle_dump(item, out_file):
with open(out_file, "wb") as opened_file:
pickle.dump(item, opened_file)
def pickle_load(in_file):
with open(in_file, "rb") as opened_file:
return pickle.load(opened_file)
def get_affine(in_file):
return read_image(in_file).affine
def read_image_files(image_files, image_shape=None, crop=None, label_indices=None):
"""
:param image_files:
:param image_shape:
:param crop:
:param use_nearest_for_last_file: If True, will use nearest neighbor interpolation for the last file. This is used
because the last file may be the labels file. Using linear interpolation here would mess up the labels.
:return:
"""
if label_indices is None:
label_indices = []
elif not isinstance(label_indices, collections.Iterable) or isinstance(label_indices, str):
label_indices = [label_indices]
image_list = list()
for index, image_file in enumerate(image_files):
if (label_indices is None and (index + 1) == len(image_files)) \
or (label_indices is not None and index in label_indices):
interpolation = "nearest"
else:
interpolation = "linear"
image_list.append(read_image(image_file, image_shape=image_shape, crop=crop, interpolation=interpolation))
return image_list
def read_image(in_file, image_shape=None, interpolation='linear', crop=None):
print("Reading: {0}".format(in_file))
image = nib.load(os.path.abspath(in_file))
image = fix_shape(image)
if crop:
image = crop_img_to(image, crop, copy=True)
if image_shape:
return resize(image, new_shape=image_shape, interpolation=interpolation)
else:
return image
def read_img(in_file):
print("Reading: {0}".format(in_file))
image = nib.load(os.path.abspath(in_file))
return image
def fix_shape(image):
if image.shape[-1] == 1:
return image.__class__(dataobj=np.squeeze(image.get_data()), affine=image.affine)
return image
def resize(image, new_shape, interpolation="linear"):
image = reorder_img(image, resample=interpolation)
zoom_level = np.divide(new_shape, image.shape)
new_spacing = np.divide(image.header.get_zooms(), zoom_level)
new_data = resample_to_spacing(image.get_data(), image.header.get_zooms(), new_spacing,
interpolation=interpolation)
new_affine = np.copy(image.affine)
np.fill_diagonal(new_affine, new_spacing.tolist() + [1])
new_affine[:3, 3] += calculate_origin_offset(new_spacing, image.header.get_zooms())
return new_img_like(image, new_data, affine=new_affine)
def interpolate_affine_coords(data, affine, coords, mode='constant', order=0, cval=0):
in_vox_coords = np.array(np.meshgrid(*coords, indexing='ij'))
coords_last = in_vox_coords.transpose(1, 2, 3, 0)
mean_vox_coords = nib.affines.apply_affine(affine, coords_last)
coords_first_again = mean_vox_coords.transpose(3, 0, 1, 2)
resampled_mean_again = map_coordinates(data,
coords_first_again,
mode=mode, order=order, cval=cval)
return resampled_mean_again
def interpolate_affine_range(data, affine, ranges, mode='constant', order=0, cval=0):
return interpolate_affine_coords(data, affine, coords=[range(s, e) for s, e in ranges],
mode=mode, order=order, cval=cval)
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
Examples
--------
>>> from sklearn.preprocessing import MinMaxScaler
>>>
>>> data = [[-1, 2], [-0.5, 6], [0, 10], [1, 18]]
>>> scaler = MinMaxScaler()
>>> print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 1))
>>> print(scaler.data_max_)
[ 1. 18.]
>>> print(scaler.transform(data))
[[ 0. 0. ]
[ 0.25 0.25]
[ 0.5 0.5 ]
[ 1. 1. ]]
>>> print(scaler.transform([[2, 2]]))
[[ 1.5 0. ]]
See also
--------
minmax_scale: Equivalent function without the estimator API.
Notes
-----
For a comparison of the different scalers, transformers, and normalizers,
see :ref:`examples/preprocessing/plot_all_scaling.py
<sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES, ensure_2d=False, allow_nd=True)
data_min = np.min(X)
data_max = np.max(X)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES, ensure_2d=False, allow_nd=True)
X *= self.scale_
X += self.min_
X = np.minimum(X, self.feature_range[1])
X = np.maximum(X, self.feature_range[0])
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES, ensure_2d=False, allow_nd=True)
X -= self.min_
X /= self.scale_
return X