# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the combiner class."""
import numpy as np
from numpy import ma
try:
import bottleneck as bn
except ImportError:
HAS_BOTTLENECK = False
else:
HAS_BOTTLENECK = True
from .core import sigma_func
from astropy.nddata import CCDData, StdDevUncertainty
from astropy.stats import sigma_clip
from astropy.utils import deprecated_renamed_argument
from astropy import log
__all__ = ['Combiner', 'combine']
def _default_median(): # pragma: no cover
if HAS_BOTTLENECK:
return bn.nanmedian
else:
return np.nanmedian
def _default_average(): # pragma: no cover
if HAS_BOTTLENECK:
return bn.nanmean
else:
return np.nanmean
def _default_sum(): # pragma: no cover
if HAS_BOTTLENECK:
return bn.nansum
else:
return np.nansum
def _default_std(): # pragma: no cover
if HAS_BOTTLENECK:
return bn.nanstd
else:
return np.nanstd
[docs]
class Combiner:
A class for combining CCDData objects.
The Combiner class is used to combine together `~astropy.nddata.CCDData` objects
including the method for combining the data, rejecting outlying data,
and weighting used for combining frames.
Parameters
-----------
ccd_iter : list or generator
A list or generator of CCDData objects that will be combined together.
dtype : str or `numpy.dtype` or None, optional
Allows user to set dtype. See `numpy.array` ``dtype`` parameter
description. If ``None`` it uses ``np.float64``.
Default is ``None``.
Raises
------
TypeError
If the ``ccd_iter`` are not `~astropy.nddata.CCDData` objects, have different
units, or are different shapes.
Examples
--------
The following is an example of combining together different
`~astropy.nddata.CCDData` objects::
>>> import numpy as np
>>> import astropy.units as u
>>> from astropy.nddata import CCDData
>>> from ccdproc import Combiner
>>> ccddata1 = CCDData(np.ones((4, 4)), unit=u.adu)
>>> ccddata2 = CCDData(np.zeros((4, 4)), unit=u.adu)
>>> ccddata3 = CCDData(np.ones((4, 4)), unit=u.adu)
>>> c = Combiner([ccddata1, ccddata2, ccddata3])
>>> ccdall = c.average_combine()
>>> ccdall # doctest: +FLOAT_CMP
CCDData([[ 0.66666667, 0.66666667, 0.66666667, 0.66666667],
[ 0.66666667, 0.66666667, 0.66666667, 0.66666667],
[ 0.66666667, 0.66666667, 0.66666667, 0.66666667],
[ 0.66666667, 0.66666667, 0.66666667, 0.66666667]]...)
def __init__(self, ccd_iter, dtype=None):
if ccd_iter is None:
raise TypeError("ccd_iter should be a list or a generator of CCDData objects.")
if dtype is None:
dtype = np.float64
default_shape = None
default_unit = None
ccd_list = list(ccd_iter)
for ccd in ccd_list:
# raise an error if the objects aren't CCDData objects
if not isinstance(ccd, CCDData):
raise TypeError(
"ccd_list should only contain CCDData objects.")
# raise an error if the shape is different
if default_shape is None:
default_shape = ccd.shape
else:
if not (default_shape == ccd.shape):
raise TypeError("CCDData objects are not the same size.")
# raise an error if the units are different
if default_unit is None:
default_unit = ccd.unit
else:
if not (default_unit == ccd.unit):
raise TypeError("CCDData objects don't have the same unit.")
self.ccd_list = ccd_list
self.unit = default_unit
self.weights = None
self._dtype = dtype
# set up the data array
new_shape = (len(ccd_list),) + default_shape
self.data_arr = ma.masked_all(new_shape, dtype=dtype)
# populate self.data_arr
for i, ccd in enumerate(ccd_list):
self.data_arr[i] = ccd.data
if ccd.mask is not None:
self.data_arr.mask[i] = ccd.mask
else:
self.data_arr.mask[i] = ma.zeros(default_shape)
# Must be after self.data_arr is defined because it checks the
# length of the data array.
self.scaling = None
@property
def dtype(self):
return self._dtype
@property
def weights(self):
Weights used when combining the `~astropy.nddata.CCDData` objects.
Parameters
----------
weight_values : `numpy.ndarray` or None
An array with the weight values. The dimensions should match the
the dimensions of the data arrays being combined.
return self._weights
@weights.setter
def weights(self, value):
if value is not None:
if isinstance(value, np.ndarray):
if value.shape != self.data_arr.data.shape:
if value.ndim != 1:
raise ValueError("1D weights expected when shapes of the data and weights differ.")
if value.shape[0] != self.data_arr.data.shape[0]:
raise ValueError("Length of weights not compatible with specified axis.")
self._weights = value
else:
raise TypeError("weights must be a numpy.ndarray.")
else:
self._weights = None
@property
def scaling(self):
Scaling factor used in combining images.
Parameters
----------
scale : function or `numpy.ndarray`-like or None, optional
Images are multiplied by scaling prior to combining
them. Scaling may be either a function, which will be applied to
each image to determine the scaling factor, or a list or array
whose length is the number of images in the `~ccdproc.Combiner`.
return self._scaling
@scaling.setter
def scaling(self, value):
if value is None:
self._scaling = value
else:
n_images = self.data_arr.data.shape[0]
if callable(value):
self._scaling = [value(self.data_arr[i]) for
i in range(n_images)]
self._scaling = np.array(self._scaling)
else:
try:
len(value) == n_images
self._scaling = np.array(value)
except TypeError:
raise TypeError("scaling must be a function or an array "
"the same length as the number of images.")
# reshape so that broadcasting occurs properly
for i in range(len(self.data_arr.data.shape)-1):
self._scaling = self.scaling[:, np.newaxis]
# set up IRAF-like minmax clipping
[docs]
def clip_extrema(self, nlow=0, nhigh=0):
"""Mask pixels using an IRAF-like minmax clipping algorithm. The
algorithm will mask the lowest nlow values and the highest nhigh values
before combining the values to make up a single pixel in the resulting
image. For example, the image will be a combination of
Nimages-nlow-nhigh pixel values instead of the combination of Nimages.
Parameters
-----------
nlow : int or None, optional
If not None, the number of low values to reject from the
combination.
Default is 0.
nhigh : int or None, optional
If not None, the number of high values to reject from the
combination.
Default is 0.
Notes
-----
Note that this differs slightly from the nominal IRAF imcombine
behavior when other masks are in use. For example, if ``nhigh>=1`` and
any pixel is already masked for some other reason, then this algorithm
will count the masking of that pixel toward the count of nhigh masked
pixels.
Here is a copy of the relevant IRAF help text [0]_:
nlow = 1, nhigh = (minmax)
The number of low and high pixels to be rejected by the "minmax"
algorithm. These numbers are converted to fractions of the total
number of input images so that if no rejections have taken place
the specified number of pixels are rejected while if pixels have
been rejected by masking, thresholding, or nonoverlap, then the
fraction of the remaining pixels, truncated to an integer, is used.
References
----------
.. [0] image.imcombine help text.
http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?imcombine
if nlow is None:
nlow = 0
if nhigh is None:
nhigh = 0
argsorted = np.argsort(self.data_arr.data, axis=0)
mg = np.mgrid[[slice(ndim)
for i, ndim in enumerate(self.data_arr.shape) if i > 0]]
for i in range(-1*nhigh, nlow):
# create a tuple with the indices
where = tuple([argsorted[i, :, :].ravel()] +
[i.ravel() for i in mg])
self.data_arr.mask[where] = True