Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Generate points for a fictitious hyperplane used as a starting point for energy minimization.
"""
coordinate_dict = {'component': components}
largest_energy = float(energy_limit)
if largest_energy < 0:
largest_energy *= 0.01
else:
largest_energy *= 10
if broadcast:
output_columns = [str(x) for x in statevar_dict.keys()] + ['points']
statevar_shape = tuple(len(np.atleast_1d(x)) for x in statevar_dict.values())
coordinate_dict.update({str(key): value for key, value in statevar_dict.items()})
# The internal dof for the fake points are all NaNs
expanded_points = np.full(statevar_shape + (len(components), maximum_internal_dof), np.nan)
data_arrays = {'X': (output_columns + ['component'],
broadcast_to(np.eye(len(components)), statevar_shape + (len(components), len(components)))),
'Y': (output_columns + ['internal_dof'], expanded_points),
'Phase': (output_columns, np.full(statevar_shape + (len(components),), '_FAKE_', dtype='S6')),
output: (output_columns, np.full(statevar_shape + (len(components),), largest_energy))
}
else:
output_columns = ['points']
statevar_shape = (len(components) * max([len(np.atleast_1d(x)) for x in statevar_dict.values()]),)
# The internal dof for the fake points are all NaNs
expanded_points = np.full(statevar_shape + (maximum_internal_dof,), np.nan)
data_arrays = {'X': (output_columns + ['component'],
broadcast_to(np.tile(np.eye(len(components)), (statevar_shape[0] / len(components), 1)),
statevar_shape + (len(components),))),
'Y': (output_columns + ['internal_dof'], expanded_points),
'Phase': (output_columns, np.full(statevar_shape, '_FAKE_', dtype='S6')),
output: (output_columns, np.full(statevar_shape, largest_energy))
}
phase_record.obj_2d(phase_output, dof)
for el_idx in range(len(pure_elements)):
phase_record.mass_obj_2d(phase_compositions[:, el_idx], dof, el_idx)
max_tieline_vertices = len(pure_elements)
if isinstance(phase_output, (float, int)):
phase_output = broadcast_to(phase_output, points.shape[:-1])
if isinstance(phase_compositions, (float, int)):
phase_compositions = broadcast_to(phase_output, points.shape[:-1] + (len(pure_elements),))
phase_output = np.asarray(phase_output, dtype=np.float)
phase_output.shape = points.shape[:-1]
phase_compositions = np.asarray(phase_compositions, dtype=np.float)
phase_compositions.shape = points.shape[:-1] + (len(pure_elements),)
if fake_points:
phase_output = np.concatenate((broadcast_to(largest_energy, points.shape[:-2] + (max_tieline_vertices,)), phase_output), axis=-1)
phase_names = np.concatenate((broadcast_to('_FAKE_', points.shape[:-2] + (max_tieline_vertices,)),
np.full(points.shape[:-1], phase_record.phase_name, dtype='U' + str(len(phase_record.phase_name)))), axis=-1)
else:
phase_names = np.full(points.shape[:-1], phase_record.phase_name, dtype='U'+str(len(phase_record.phase_name)))
if fake_points:
phase_compositions = np.concatenate((np.broadcast_to(np.eye(len(pure_elements)), points.shape[:-2] + (max_tieline_vertices, len(pure_elements))), phase_compositions), axis=-2)
coordinate_dict = {'component': pure_elements}
# Resize 'points' so it has the same number of columns as the maximum
# number of internal degrees of freedom of any phase in the calculation.
# We do this so that everything is aligned for concat.
# Waste of memory? Yes, but the alternatives are unclear.
# In each case, first check if we need to do this...
# It can be expensive for many points (~14s for 500M points)
if fake_points:
desired_shape = points.shape[:-2] + (max_tieline_vertices + points.shape[-2], maximum_internal_dof)
expanded_points = np.full(desired_shape, np.nan)
statevars = list(np.atleast_1d(x) for x in statevar_dict.values())
statevars_ = []
for statevar in statevars:
if (len(statevar) != len(points)) and (len(statevar) == 1):
statevar = np.repeat(statevar, len(points))
if (len(statevar) != len(points)) and (len(statevar) != 1):
raise ValueError('Length of state variable list and number of given points must be equal when '
'broadcast=False.')
statevars_.append(statevar)
statevars = statevars_
pure_elements = [list(x.constituents.keys()) for x in components]
pure_elements = sorted(set([el.upper() for constituents in pure_elements for el in constituents]))
pure_elements = [x for x in pure_elements if x != 'VA']
# func may only have support for vectorization along a single axis (no broadcasting)
# we need to force broadcasting and flatten the result before calling
bc_statevars = np.ascontiguousarray([broadcast_to(x, points.shape[:-1]).reshape(-1) for x in statevars])
pts = points.reshape(-1, points.shape[-1])
dof = np.ascontiguousarray(np.concatenate((bc_statevars.T, pts), axis=1))
phase_output = np.zeros(dof.shape[0], order='C')
phase_compositions = np.zeros((dof.shape[0], len(pure_elements)), order='F')
phase_record.obj_2d(phase_output, dof)
for el_idx in range(len(pure_elements)):
phase_record.mass_obj_2d(phase_compositions[:, el_idx], dof, el_idx)
max_tieline_vertices = len(pure_elements)
if isinstance(phase_output, (float, int)):
phase_output = broadcast_to(phase_output, points.shape[:-1])
if isinstance(phase_compositions, (float, int)):
phase_compositions = broadcast_to(phase_output, points.shape[:-1] + (len(pure_elements),))
phase_output = np.asarray(phase_output, dtype=np.float)
phase_output.shape = points.shape[:-1]
phase_compositions = np.asarray(phase_compositions, dtype=np.float)
coordinate_dict.update({str(key): value for key, value in statevar_dict.items()})
# The internal dof for the fake points are all NaNs
expanded_points = np.full(statevar_shape + (len(components), maximum_internal_dof), np.nan)
data_arrays = {'X': (output_columns + ['component'],
broadcast_to(np.eye(len(components)), statevar_shape + (len(components), len(components)))),
'Y': (output_columns + ['internal_dof'], expanded_points),
'Phase': (output_columns, np.full(statevar_shape + (len(components),), '_FAKE_', dtype='S6')),
output: (output_columns, np.full(statevar_shape + (len(components),), largest_energy))
}
else:
output_columns = ['points']
statevar_shape = (len(components) * max([len(np.atleast_1d(x)) for x in statevar_dict.values()]),)
# The internal dof for the fake points are all NaNs
expanded_points = np.full(statevar_shape + (maximum_internal_dof,), np.nan)
data_arrays = {'X': (output_columns + ['component'],
broadcast_to(np.tile(np.eye(len(components)), (statevar_shape[0] / len(components), 1)),
statevar_shape + (len(components),))),
'Y': (output_columns + ['internal_dof'], expanded_points),
'Phase': (output_columns, np.full(statevar_shape, '_FAKE_', dtype='S6')),
output: (output_columns, np.full(statevar_shape, largest_energy))
}
# Add state variables as data variables if broadcast=False
data_arrays.update({str(key): (output_columns, np.repeat(value, len(components)))
for key, value in statevar_dict.items()})
return Dataset(data_arrays, coords=coordinate_dict)
# func may only have support for vectorization along a single axis (no broadcasting)
# we need to force broadcasting and flatten the result before calling
bc_statevars = np.ascontiguousarray([broadcast_to(x, points.shape[:-1]).reshape(-1) for x in statevars])
pts = points.reshape(-1, points.shape[-1])
dof = np.ascontiguousarray(np.concatenate((bc_statevars.T, pts), axis=1))
phase_output = np.zeros(dof.shape[0], order='C')
phase_compositions = np.zeros((dof.shape[0], len(pure_elements)), order='F')
phase_record.obj_2d(phase_output, dof)
for el_idx in range(len(pure_elements)):
phase_record.mass_obj_2d(phase_compositions[:, el_idx], dof, el_idx)
max_tieline_vertices = len(pure_elements)
if isinstance(phase_output, (float, int)):
phase_output = broadcast_to(phase_output, points.shape[:-1])
if isinstance(phase_compositions, (float, int)):
phase_compositions = broadcast_to(phase_output, points.shape[:-1] + (len(pure_elements),))
phase_output = np.asarray(phase_output, dtype=np.float)
phase_output.shape = points.shape[:-1]
phase_compositions = np.asarray(phase_compositions, dtype=np.float)
phase_compositions.shape = points.shape[:-1] + (len(pure_elements),)
if fake_points:
phase_output = np.concatenate((broadcast_to(largest_energy, points.shape[:-2] + (max_tieline_vertices,)), phase_output), axis=-1)
phase_names = np.concatenate((broadcast_to('_FAKE_', points.shape[:-2] + (max_tieline_vertices,)),
np.full(points.shape[:-1], phase_record.phase_name, dtype='U' + str(len(phase_record.phase_name)))), axis=-1)
else:
phase_names = np.full(points.shape[:-1], phase_record.phase_name, dtype='U'+str(len(phase_record.phase_name)))
if fake_points:
phase_compositions = np.concatenate((np.broadcast_to(np.eye(len(pure_elements)), points.shape[:-2] + (max_tieline_vertices, len(pure_elements))), phase_compositions), axis=-2)
coordinate_dict = {'component': pure_elements}
# Resize 'points' so it has the same number of columns as the maximum
# number of internal degrees of freedom of any phase in the calculation.
Returns
-------
Dataset of the output attribute as a function of state variables
Examples
--------
None yet.
"""
if broadcast:
# Broadcast compositions and state variables along orthogonal axes
# This lets us eliminate an expensive Python loop
statevars = np.meshgrid(*itertools.chain(statevar_dict.values(),
[np.empty(points.shape[-2])]),
sparse=True, indexing='ij')[:-1]
points = broadcast_to(points, tuple(len(np.atleast_1d(x)) for x in statevar_dict.values()) + points.shape[-2:])
else:
statevars = list(np.atleast_1d(x) for x in statevar_dict.values())
statevars_ = []
for statevar in statevars:
if (len(statevar) != len(points)) and (len(statevar) == 1):
statevar = np.repeat(statevar, len(points))
if (len(statevar) != len(points)) and (len(statevar) != 1):
raise ValueError('Length of state variable list and number of given points must be equal when '
'broadcast=False.')
statevars_.append(statevar)
statevars = statevars_
pure_elements = [list(x.constituents.keys()) for x in components]
pure_elements = sorted(set([el.upper() for constituents in pure_elements for el in constituents]))
pure_elements = [x for x in pure_elements if x != 'VA']
# func may only have support for vectorization along a single axis (no broadcasting)
# we need to force broadcasting and flatten the result before calling
phase_compositions = np.zeros((dof.shape[0], len(pure_elements)), order='F')
phase_record.obj_2d(phase_output, dof)
for el_idx in range(len(pure_elements)):
phase_record.mass_obj_2d(phase_compositions[:, el_idx], dof, el_idx)
max_tieline_vertices = len(pure_elements)
if isinstance(phase_output, (float, int)):
phase_output = broadcast_to(phase_output, points.shape[:-1])
if isinstance(phase_compositions, (float, int)):
phase_compositions = broadcast_to(phase_output, points.shape[:-1] + (len(pure_elements),))
phase_output = np.asarray(phase_output, dtype=np.float)
phase_output.shape = points.shape[:-1]
phase_compositions = np.asarray(phase_compositions, dtype=np.float)
phase_compositions.shape = points.shape[:-1] + (len(pure_elements),)
if fake_points:
phase_output = np.concatenate((broadcast_to(largest_energy, points.shape[:-2] + (max_tieline_vertices,)), phase_output), axis=-1)
phase_names = np.concatenate((broadcast_to('_FAKE_', points.shape[:-2] + (max_tieline_vertices,)),
np.full(points.shape[:-1], phase_record.phase_name, dtype='U' + str(len(phase_record.phase_name)))), axis=-1)
else:
phase_names = np.full(points.shape[:-1], phase_record.phase_name, dtype='U'+str(len(phase_record.phase_name)))
if fake_points:
phase_compositions = np.concatenate((np.broadcast_to(np.eye(len(pure_elements)), points.shape[:-2] + (max_tieline_vertices, len(pure_elements))), phase_compositions), axis=-2)
coordinate_dict = {'component': pure_elements}
# Resize 'points' so it has the same number of columns as the maximum
# number of internal degrees of freedom of any phase in the calculation.
# We do this so that everything is aligned for concat.
# Waste of memory? Yes, but the alternatives are unclear.
# In each case, first check if we need to do this...
# It can be expensive for many points (~14s for 500M points)
if fake_points:
desired_shape = points.shape[:-2] + (max_tieline_vertices + points.shape[-2], maximum_internal_dof)
pure_elements = sorted(set([el.upper() for constituents in pure_elements for el in constituents]))
pure_elements = [x for x in pure_elements if x != 'VA']
# func may only have support for vectorization along a single axis (no broadcasting)
# we need to force broadcasting and flatten the result before calling
bc_statevars = np.ascontiguousarray([broadcast_to(x, points.shape[:-1]).reshape(-1) for x in statevars])
pts = points.reshape(-1, points.shape[-1])
dof = np.ascontiguousarray(np.concatenate((bc_statevars.T, pts), axis=1))
phase_output = np.zeros(dof.shape[0], order='C')
phase_compositions = np.zeros((dof.shape[0], len(pure_elements)), order='F')
phase_record.obj_2d(phase_output, dof)
for el_idx in range(len(pure_elements)):
phase_record.mass_obj_2d(phase_compositions[:, el_idx], dof, el_idx)
max_tieline_vertices = len(pure_elements)
if isinstance(phase_output, (float, int)):
phase_output = broadcast_to(phase_output, points.shape[:-1])
if isinstance(phase_compositions, (float, int)):
phase_compositions = broadcast_to(phase_output, points.shape[:-1] + (len(pure_elements),))
phase_output = np.asarray(phase_output, dtype=np.float)
phase_output.shape = points.shape[:-1]
phase_compositions = np.asarray(phase_compositions, dtype=np.float)
phase_compositions.shape = points.shape[:-1] + (len(pure_elements),)
if fake_points:
phase_output = np.concatenate((broadcast_to(largest_energy, points.shape[:-2] + (max_tieline_vertices,)), phase_output), axis=-1)
phase_names = np.concatenate((broadcast_to('_FAKE_', points.shape[:-2] + (max_tieline_vertices,)),
np.full(points.shape[:-1], phase_record.phase_name, dtype='U' + str(len(phase_record.phase_name)))), axis=-1)
else:
phase_names = np.full(points.shape[:-1], phase_record.phase_name, dtype='U'+str(len(phase_record.phase_name)))
if fake_points:
phase_compositions = np.concatenate((np.broadcast_to(np.eye(len(pure_elements)), points.shape[:-2] + (max_tieline_vertices, len(pure_elements))), phase_compositions), axis=-2)
coordinate_dict = {'component': pure_elements}