How to use the daal.algorithms.neural_networks.layers function in daal

To help you get started, we’ve selected a few daal examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github intel / daal / examples / python / source / neural_networks / neural_net_dense_batch.py View on Github external
def configureNet():
    # Create layers of the neural network
    # Create fully-connected layer and initialize layer parameters
    fullyConnectedLayer1 = layers.fullyconnected.Batch(5)
    fullyConnectedLayer1.parameter.weightsInitializer = initializers.uniform.Batch(-0.001, 0.001)
    fullyConnectedLayer1.parameter.biasesInitializer = initializers.uniform.Batch(0, 0.5)

    # Create fully-connected layer and initialize layer parameters
    fullyConnectedLayer2 = layers.fullyconnected.Batch(2)
    fullyConnectedLayer2.parameter.weightsInitializer = initializers.uniform.Batch(0.5, 1)
    fullyConnectedLayer2.parameter.biasesInitializer = initializers.uniform.Batch(0.5, 1)

    # Create softmax layer and initialize layer parameters
    softmaxCrossEntropyLayer = layers.loss.softmax_cross.Batch()

    # Create configuration of the neural network with layers
    topology = training.Topology()

    # Add layers to the topology of the neural network
    topology.push_back(fullyConnectedLayer1)
github intel / daal / examples / python / source / neural_networks / ave_pool2d_layer_dense_batch.py View on Github external
sys.path.insert(0, utils_folder)
from utils import printTensor, readTensorFromCSV, printNumericTable

# Input data set name
datasetFileName = os.path.join("..", "data", "batch", "layer.csv")

if __name__ == "__main__":

    # Read datasetFileName from a file and create a tensor to store input data
    data = readTensorFromCSV(datasetFileName)
    nDim = data.getNumberOfDimensions()

    printTensor(data, "Forward two-dimensional average pooling layer input (first 10 rows):", 10)

    # Create an algorithm to compute forward two-dimensional maximum pooling layer results using default method
    forwardLayer = layers.average_pooling2d.forward.Batch(nDim)
    forwardLayer.input.setInput(layers.forward.data, data)

    # Compute forward two-dimensional average pooling layer results and return them
    # Result class from layers.average_pooling2d.forward
    forwardResult = forwardLayer.compute()

    printTensor(forwardResult.getResult(layers.forward.value),
                "Forward two-dimensional average pooling layer result (first 5 rows):",
                5)
    printNumericTable(forwardResult.getLayerData(layers.average_pooling2d.auxInputDimensions),
                      "Forward two-dimensional average pooling layer input dimensions:")

    # Create an algorithm to compute backward two-dimensional average pooling layer results using default method
    backwardLayer = layers.average_pooling2d.backward.Batch(nDim)
    backwardLayer.input.setInput(layers.backward.inputGradient, forwardResult.getResult(layers.forward.value))
    backwardLayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))
github intel / daal / examples / python / source / neural_networks / split_layer_dense_batch.py View on Github external
# Create an algorithm to compute backward split layer results using default method
    splitLayerBackward = split.backward.Batch()

    # Set parameters for the backward split layer
    splitLayerBackward.parameter.nOutputs = nOutputs
    splitLayerBackward.parameter.nInputs = nInputs

    # Set input objects for the backward split layer
    splitLayerBackward.input.setInputLayerData(split.backward.inputGradientCollection,
                                               forwardResult.getResultLayerData(split.forward.valueCollection))

    # Compute backward split layer results
    backwardResult = splitLayerBackward.compute()

    # Print the results of the backward split layer
    printTensor(backwardResult.getResult(layers.backward.gradient), "Backward split layer result (first 5 rows):", 5)
github intel / daal / examples / python / source / neural_networks / loss_logistic_entr_layer_dense_batch.py View on Github external
# Input data set parameters
datasetName = os.path.join("..", "data", "batch", "logistic_cross_entropy_layer.csv")
datasetGroundTruthName = os.path.join("..", "data", "batch", "logistic_cross_entropy_layer_ground_truth.csv")

if __name__ == "__main__":

    # Retrieve the input data
    tensorData = readTensorFromCSV(datasetName)
    groundTruth = readTensorFromCSV(datasetGroundTruthName)

    # Create an algorithm to compute forward logistic cross-entropy layer results using default method
    logisticCrossLayerForward = loss.logistic_cross.forward.Batch(method=loss.logistic_cross.defaultDense)

    # Set input objects for the forward logistic_cross layer
    logisticCrossLayerForward.input.setInput(layers.forward.data, tensorData)
    logisticCrossLayerForward.input.setInput(loss.forward.groundTruth, groundTruth)

    # Compute forward logistic_cross layer results
    forwardResult = logisticCrossLayerForward.compute()

    # Print the results of the forward logistic_cross layer
    printTensor(forwardResult.getResult(layers.forward.value), "Forward logistic cross-entropy layer result (first 5 rows):", 5)
    printTensor(forwardResult.getLayerData(loss.logistic_cross.auxGroundTruth), "Logistic Cross-Entropy layer ground truth (first 5 rows):", 5)

    # Create an algorithm to compute backward logistic_cross layer results using default method
    logisticCrossLayerBackward = logistic_cross.backward.Batch(method=loss.logistic_cross.defaultDense)

    # Set input objects for the backward logistic_cross layer
    logisticCrossLayerBackward.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))

    # Compute backward logistic_cross layer results
github intel / daal / examples / python / source / neural_networks / ave_pool3d_layer_dense_batch.py View on Github external
[5,  6,  7,  8]],
                      [[9, 10, 11, 12],
                       [13, 14, 15, 16]],
                      [[17, 18, 19, 20],
                       [21, 22, 23, 24]]],
                     dtype=np.float64)

if __name__ == "__main__":

    dataTensor = HomogenTensor(dataArray)

    printTensor3d(dataTensor, "Forward average pooling layer input:")

    # Create an algorithm to compute forward pooling layer results using average method
    forwardLayer = layers.average_pooling3d.forward.Batch(nDim)
    forwardLayer.input.setInput(layers.forward.data, dataTensor)

    # Compute forward pooling layer results
    # Result class from layers.average_pooling3d.forward
    forwardResult = forwardLayer.compute()

    printTensor3d(forwardResult.getResult(layers.forward.value), "Forward average pooling layer result:")
    printNumericTable(forwardResult.getLayerData(layers.average_pooling3d.auxInputDimensions),
                      "Forward pooling layer input dimensions:")

    # Create an algorithm to compute backward pooling layer results using average method
    backwardLayer = layers.average_pooling3d.backward.Batch(nDim)
    backwardLayer.input.setInput(layers.backward.inputGradient, forwardResult.getResult(layers.forward.value))
    backwardLayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))

    # Compute backward pooling layer results
    # Result class from layers.average_pooling3d.backward
github intel / daal / examples / python / source / neural_networks / smoothrelu_layer_dense_batch.py View on Github external
# Compute forward smooth relu layer results
    forwardResult = smoothreluLayerForward.compute()

    # Print the results of the forward smooth relu layer
    printTensor(forwardResult.getResult(layers.forward.value), "Forward smooth ReLU layer result (first 5 rows):", 5)

    # Get the size of forward dropout smooth relu output
    gDims = forwardResult.getResult(layers.forward.value).getDimensions()
    tensorDataBack = HomogenTensor(gDims, TensorIface.doAllocate, 0.01)

    # Create an algorithm to compute backward smooth relu layer results using default method
    smoothreluLayerBackward = smoothrelu.backward.Batch()

    # Set input objects for the backward smooth relu layer
    smoothreluLayerBackward.input.setInput(layers.backward.inputGradient, tensorDataBack)
    smoothreluLayerBackward.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))

    # Compute backward smooth relu layer results
    backwardResult = smoothreluLayerBackward.compute()

    # Print the results of the backward smooth relu layer
    printTensor(backwardResult.getResult(layers.backward.gradient), "Backward smooth ReLU layer result (first 5 rows):", 5)
github intel / daal / examples / python / source / neural_networks / spat_ave_pool2d_layer_dense_batch.py View on Github external
[[[-2,  -4,  -6,  -8],
                                            [-10, -12, -14, -16]],
                                           [[-18, -20, -22, -24],
                                            [-26, -28, -30, -32]],
                                           [[-34, -36, -38, -40],
                                            [-42, -44, -46, -48]]]],
                     dtype=np.float64)

if __name__ == "__main__":
    data = HomogenTensor(dataArray)

    # Read datasetFileName from a file and create a tensor to store input data
    printTensor(data, "Forward two-dimensional spatial pyramid average pooling layer input (first 10 rows):", 10)

    # Create an algorithm to compute forward two-dimensional maximum pooling layer results using default method
    forwardLayer = layers.spatial_average_pooling2d.forward.Batch(2, nDim)
    forwardLayer.input.setInput(layers.forward.data, data)

    # Compute forward two-dimensional spatial pyramid average pooling layer results and return them
    # Result class from layers.spatial_average_pooling2d.forward
    forwardResult = forwardLayer.compute()

    printTensor(forwardResult.getResult(layers.forward.value),
                "Forward two-dimensional spatial pyramid average pooling layer result (first 5 rows):",
                5)
    printNumericTable(forwardResult.getLayerData(layers.spatial_average_pooling2d.auxInputDimensions),
                      "Forward two-dimensional spatial pyramid average pooling layer input dimensions:")

    # Create an algorithm to compute backward two-dimensional spatial pyramid average pooling layer results using default method
    backwardLayer = layers.spatial_average_pooling2d.backward.Batch(2, nDim)
    backwardLayer.input.setInput(layers.backward.inputGradient, forwardResult.getResult(layers.forward.value))
    backwardLayer.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))
github intel / daal / examples / python / source / neural_networks / lcn_layer_dense_batch.py View on Github external
utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
if utils_folder not in sys.path:
    sys.path.insert(0, utils_folder)
from utils import printTensor

# Input data set name
datasetFileName = os.path.join("..", "data", "batch", "layer.csv")

if __name__ == "__main__":

    # Create collection of dimension sizes of the input data tensor
    inDims = [2, 1, 3, 4]
    tensorData = HomogenTensor(inDims, TensorIface.doAllocate, 1.0)

    # Create an algorithm to compute forward two-dimensional convolution layer results using default method
    lcnLayerForward = layers.lcn.forward.Batch()
    lcnLayerForward.input.setInput(layers.forward.data, tensorData)

    # Compute forward two-dimensional convolution layer results
    forwardResult = lcnLayerForward.compute()

    printTensor(forwardResult.getResult(layers.forward.value),          "Forward local contrast normalization layer result:")
    printTensor(forwardResult.getLayerData(layers.lcn.auxCenteredData), "Centered data tensor:")
    printTensor(forwardResult.getLayerData(layers.lcn.auxSigma),        "Sigma tensor:")
    printTensor(forwardResult.getLayerData(layers.lcn.auxC),            "C tensor:")
    printTensor(forwardResult.getLayerData(layers.lcn.auxInvMax),       "Inverted max(sigma, C):")

    # Create input gradient tensor for backward two-dimensional convolution layer
    tensorDataBack = HomogenTensor(inDims, TensorIface.doAllocate, 0.01)

    # Create an algorithm to compute backward two-dimensional convolution layer results using default method
    lcnLayerBackward = layers.lcn.backward.Batch()
github intel / daal / examples / python / source / neural_networks / concat_layer_dense_batch.py View on Github external
# Set input objects for the forward concatenation layer
    concatLayerForward.input.setInputLayerData(layers.forward.inputLayerData, tensorDataCollection)

    # Compute forward concatenation layer results
    forwardResult = concatLayerForward.compute()

    printTensor(forwardResult.getResult(layers.forward.value), "Forward concatenation layer result value (first 5 rows):", 5)

    # Create an algorithm to compute backward concatenation layer results using default method
    concatLayerBackward = layers.concat.backward.Batch(concatDimension)

    # Set inputs for the backward concatenation layer
    concatLayerBackward.input.setInput(layers.backward.inputGradient, forwardResult.getResult(layers.forward.value))
    concatLayerBackward.input.setInputLayerData(layers.backward.inputFromForward, forwardResult.getResultLayerData(layers.forward.resultForBackward))

    printNumericTable(forwardResult.getLayerData(layers.concat.auxInputDimensions), "auxInputDimensions ")

    # Compute backward concatenation layer results
    backwardResult = concatLayerBackward.compute()

    for i in range(tensorDataCollection.size()):
        printTensor(backwardResult.getResultLayerData(layers.backward.resultLayerData, i),
                    "Backward concatenation layer backward result (first 5 rows):", 5)
github intel / daal / examples / python / source / neural_networks / initializers_dense_batch.py View on Github external
printTensor(tensorData, "Data with uniform distribution:")


    # Fill layer weights using xavier initializer
    # Create an algorithm to compute forward fully-connected layer results using default method
    fullyconnectedLayerForward = layers.fullyconnected.forward.Batch(5)

    # Set input objects and parameter for the forward fully-connected layer
    fullyconnectedLayerForward.input.setInput(layers.forward.data, tensorData)
    fullyconnectedLayerForward.parameter.weightsInitializer = initializers.xavier.Batch()

    # Compute forward fully-connected layer results
    fullyconnectedLayerForward.compute()

    # Print the results of the xavier initializer
    printTensor(fullyconnectedLayerForward.input.getInput(layers.forward.weights), "Weights filled by xavier initializer:")