Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _array_views_to_copies(self):
dct = self.model.__dict__
self.parameters = {}
for k, v in dct.items():
if isinstance(v, BaseSignal):
v.data = v.data.copy()
if k not in ['signal', 'image', 'spectrum'] and not \
k.startswith('_'):
self.parameters[k] = None
if isinstance(v, np.ndarray):
dct[k] = v.copy()
def create_model(self, signal_dict, model_letter):
_logger.debug('Creating model in worker {}'.format(self.identity))
sig = BaseSignal(**signal_dict)
sig._assign_subclass()
self.model = sig.models[model_letter].restore()
for component in self.model:
component.active_is_multidimensional = False
component.active = True
for par in component.parameters:
par.map = par.map.copy()
if self.model.signal.metadata.has_item(
'Signal.Noise_properties.variance'):
var = self.model.signal.metadata.Signal.Noise_properties.variance
if isinstance(var, BaseSignal):
var.data = var.data.copy()
self._array_views_to_copies()
if isinstance(thing, np.ndarray):
return thing
else:
raise ValueError
else:
if isinstance(thing, np.ndarray):
thing = da.from_array(thing, chunks=chunks)
if isinstance(thing, da.Array):
if thing.chunks != chunks:
thing = thing.rechunk(chunks)
return thing
else:
raise ValueError
class LazySignal(BaseSignal):
"""A Lazy Signal instance that delays computation until explicitly saved
(assuming storing the full result of computation in memory is not feasible)
"""
_lazy = True
def compute(self, progressbar=True):
"""Attempt to store the full signal in memory.."""
if progressbar:
cm = ProgressBar
else:
cm = dummy_context_manager
with cm():
self.data = self.data.compute()
self._lazy = False
self._assign_subclass()
m = super().rebin(new_shape=new_shape, scale=scale, crop=crop, out=out)
m = out or m
time_factor = np.prod([factors[axis.index_in_array]
for axis in m.axes_manager.navigation_axes])
mdeels = m.metadata.Acquisition_instrument.TEM.Detector.EELS
m.get_dimensions_from_data()
if "Acquisition_instrument.TEM.Detector.EELS.dwell_time" in m.metadata:
mdeels.dwell_time *= time_factor
if "Acquisition_instrument.TEM.Detector.EELS.exposure" in m.metadata:
mdeels.exposure *= time_factor
if out is None:
return m
else:
out.events.data_changed.trigger(obj=out)
return m
rebin.__doc__ = hyperspy.signal.BaseSignal.rebin.__doc__
class EELSSpectrum(EELSSpectrum_mixin, Signal1D):
pass
class LazyEELSSpectrum(EELSSpectrum, LazySignal1D):
pass
self.args = args
self.kwargs = kwargs
if 'out' in self.kwargs:
self.f(*self.args, **self.kwargs)
self.out = self.kwargs.pop('out')
else:
self.out = self.f(*self.args, **self.kwargs)
try:
fargs = list(inspect.signature(self.f).parameters.keys())
except TypeError:
# This is probably a Cython function that is not supported by
# inspect.
fargs = []
has_out = "out" in fargs
# If it is a BaseSignal method
if hasattr(f, "__self__") and isinstance(f.__self__, BaseSignal):
if event == "auto":
event = self.f.__self__.events.data_changed
if recompute_out_event == "auto":
recompute_out_event = \
self.f.__self__.axes_manager.events.any_axis_changed
else:
event = None if event == "auto" else event
recompute_out_event = (None if recompute_out_event == "auto"
else recompute_out_event)
if recompute_out_event:
_connect_events(recompute_out_event, self.recompute_out)
if event:
if has_out:
_connect_events(event, self.update)
else:
# We "simulate" out by triggering `recompute_out` instead.
grad_ml = self._gradient_ml
grad_ls = self._gradient_ls
if method in ['ml', 'custom']:
weights = None
if fitter in ("leastsq", "odr", "mpfit"):
raise NotImplementedError(
'"leastsq", "mpfit" and "odr" optimizers only support'
'least squares ("ls") method')
elif method == "ls":
metadata = self.signal.metadata
if "Signal.Noise_properties.variance" not in metadata:
variance = 1
else:
variance = metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
if (variance.axes_manager.navigation_shape ==
self.signal.axes_manager.navigation_shape):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple)[
np.where(self.channel_switches)]
else:
raise AttributeError(
"The `navigation_shape` of the variance "
"signals is not equal to the variance shape "
"of the signal")
elif not isinstance(variance, numbers.Number):
raise AttributeError(
"Variance must be a number or a `Signal` instance "
"but currently it is a %s" % type(variance))
weights = 1. / np.sqrt(variance)
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif navigator is "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator is "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator is "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
def _calculate_chisq(self):
if self.signal.metadata.has_item('Signal.Noise_properties.variance'):
variance = self.signal.metadata.Signal.Noise_properties.variance
if isinstance(variance, BaseSignal):
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple)[np.where(
self.channel_switches)]
else:
variance = 1.0
d = self(onlyactive=True).ravel() - self.signal()[np.where(
self.channel_switches)]
d *= d / (1. * variance) # d = difference^2 / variance.
self.chisq.data[self.signal.axes_manager.indices[::-1]] = d.sum()
def create_model(self, signal_dict, model_letter):
_logger.debug('Creating model in worker {}'.format(self.identity))
sig = BaseSignal(**signal_dict)
sig._assign_subclass()
self.model = sig.models[model_letter].restore()
for component in self.model:
component.active_is_multidimensional = False
component.active = True
for par in component.parameters:
par.map = par.map.copy()
if self.model.signal.metadata.has_item(
'Signal.Noise_properties.variance'):
var = self.model.signal.metadata.Signal.Noise_properties.variance
if isinstance(var, BaseSignal):
var.data = var.data.copy()
self._array_views_to_copies()
Please note that this method only works when the navigation
dimension is greater than 0.
Parameters
----------
field : {'values', 'std', 'is_set'}
Raises
------
NavigationDimensionError : if the navigation dimension is 0
"""
from hyperspy.signal import BaseSignal
s = BaseSignal(data=self.map[field],
axes=self._axes_manager._get_navigation_axes_dicts())
if self.component is not None and \
self.component.active_is_multidimensional:
s.data[np.logical_not(self.component._active_array)] = np.nan
s.metadata.General.title = ("%s parameter" % self.name
if self.component is None
else "%s parameter of %s component" %
(self.name, self.component.name))
for axis in s.axes_manager._axes:
axis.navigate = False
if self._number_of_elements > 1:
s.axes_manager._append_axis(
size=self._number_of_elements,
name=self.name,
navigate=True)