Table of Contents

Class OperationsArray

Namespace
NeuralNetworks.Core.Operations
Assembly
NeuralNetworks.dll

Provides the baseline array-based CPU implementation of IOperations for deterministic reference execution.

public class OperationsArray : IOperations
Inheritance
OperationsArray
Implements
Derived
Inherited Members

Remarks

All kernels are written against standard multidimensional arrays and straightforward loops so the behavior mirrors higher-performance backends while remaining easy to debug, test, and teach. Use this backend when GPU acceleration is unavailable, when numerical traceability matters more than throughput, or as a correctness oracle for other implementations.

Properties

BackendType

public virtual OperationBackendType BackendType { get; }

Property Value

OperationBackendType

Methods

BiasAddOutput(float[,], float[])

public virtual float[,] BiasAddOutput(float[,] input, float[] bias)

Parameters

input float[,]
bias float[]

Returns

float[,]

BiasAddParamGradient(float[,])

public virtual float[] BiasAddParamGradient(float[,] outputGradient)

Parameters

outputGradient float[,]

Returns

float[]

BipolarSigmoidInputGradient(float[,], float[,], float)

public virtual float[,] BipolarSigmoidInputGradient(float[,] outputGradient, float[,] output, float scale)

Parameters

outputGradient float[,]
output float[,]
scale float

Returns

float[,]

BipolarSigmoidOutput(float[,], float)

public virtual float[,] BipolarSigmoidOutput(float[,] input, float scale)

Parameters

input float[,]
scale float

Returns

float[,]

Convolve2DInputGradient(float[,,,], float[,,,], float[,,,])

public virtual float[,,,] Convolve2DInputGradient(float[,,,] input, float[,,,] weights, float[,,,] outputGradient)

Parameters

input float[,,,]
weights float[,,,]
outputGradient float[,,,]

Returns

float[,,,]

Convolve2DOutput(float[,,,], float[,,,])

public virtual float[,,,] Convolve2DOutput(float[,,,] input, float[,,,] weights)

Parameters

input float[,,,]
weights float[,,,]

Returns

float[,,,]

Convolve2DParamGradient(float[,,,], float[,,,], int, int)

public virtual float[,,,] Convolve2DParamGradient(float[,,,] input, float[,,,] outputGradient, int kernelHeight, int kernelWidth)

Parameters

input float[,,,]
outputGradient float[,,,]
kernelHeight int
kernelWidth int

Returns

float[,,,]

CrossEntropyLoss(float[,], float[,], float)

public virtual float CrossEntropyLoss(float[,] predicted, float[,] target, float eps = 1E-07)

Parameters

predicted float[,]
target float[,]
eps float

Returns

float

CrossEntropyLossGradient(float[,], float[,])

public virtual float[,] CrossEntropyLossGradient(float[,] predicted, float[,] target)

Parameters

predicted float[,]
target float[,]

Returns

float[,]

DropoutInputGradient(float[,,,], float[,,,])

public float[,,,] DropoutInputGradient(float[,,,] outputGradient, float[,,,] mask)

Parameters

outputGradient float[,,,]
mask float[,,,]

Returns

float[,,,]

DropoutInputGradient(float[,], float[,])

public float[,] DropoutInputGradient(float[,] outputGradient, float[,] mask)

Parameters

outputGradient float[,]
mask float[,]

Returns

float[,]

DropoutOutput(float[,,,], bool, float, SeededRandom?, out float[,,,]?)

public virtual float[,,,] DropoutOutput(float[,,,] input, bool inference, float keepProb, SeededRandom? random, out float[,,,]? mask)

Parameters

input float[,,,]
inference bool
keepProb float
random SeededRandom
mask float[,,,]

Returns

float[,,,]

DropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)

public virtual float[,] DropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)

Parameters

input float[,]
inference bool
keepProb float
random SeededRandom
mask float[,]

Returns

float[,]

Flatten(float[,,,])

public virtual float[,] Flatten(float[,,,] source)

Parameters

source float[,,,]

Returns

float[,]

InvertedDropoutInputGradient(float[,], float[,], float)

public float[,] InvertedDropoutInputGradient(float[,] outputGradient, float[,] mask, float keepProb)

Parameters

outputGradient float[,]
mask float[,]
keepProb float

Returns

float[,]

InvertedDropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)

public virtual float[,] InvertedDropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)

Parameters

input float[,]
inference bool
keepProb float
random SeededRandom
mask float[,]

Returns

float[,]

LeakyReLUInputGradient(float[,,,], float[,,,], float, float)

public virtual float[,,,] LeakyReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float alfa, float beta)

Parameters

outputGradient float[,,,]
input float[,,,]
alfa float
beta float

Returns

float[,,,]

LeakyReLUInputGradient(float[,], float[,], float, float)

public virtual float[,] LeakyReLUInputGradient(float[,] outputGradient, float[,] input, float alfa, float beta)

Parameters

outputGradient float[,]
input float[,]
alfa float
beta float

Returns

float[,]

LeakyReLUOutput(float[,,,], float, float)

public virtual float[,,,] LeakyReLUOutput(float[,,,] input, float alpha = 0.01, float beta = 1)

Parameters

input float[,,,]
alpha float
beta float

Returns

float[,,,]

LeakyReLUOutput(float[,], float, float)

public virtual float[,] LeakyReLUOutput(float[,] input, float alpha = 0.01, float beta = 1)

Parameters

input float[,]
alpha float
beta float

Returns

float[,]

ReLUInputGradient(float[,,,], float[,,,], float)

public virtual float[,,,] ReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float beta)

Parameters

outputGradient float[,,,]
input float[,,,]
beta float

Returns

float[,,,]

ReLUInputGradient(float[,], float[,], float)

public virtual float[,] ReLUInputGradient(float[,] outputGradient, float[,] input, float beta)

Parameters

outputGradient float[,]
input float[,]
beta float

Returns

float[,]

ReLUOutput(float[,,,], float)

public virtual float[,,,] ReLUOutput(float[,,,] input, float beta = 1)

Parameters

input float[,,,]
beta float

Returns

float[,,,]

ReLUOutput(float[,], float)

Applies the rectified linear unit (ReLU) activation function to each element of the specified 2D array.

public virtual float[,] ReLUOutput(float[,] input, float beta = 1)

Parameters

input float[,]

The two-dimensional array of single-precision floating-point values to which the ReLU function is applied.

beta float

An optional scaling factor applied to non-negative values. The default is 1.0.

Returns

float[,]

A new two-dimensional array where each element is the result of applying the ReLU function to the corresponding element in the source array.

Remarks

The ReLU function sets all negative values to zero and multiplies non-negative values by the specified beta. The original array is not modified.

SigmoidInputGradient(float[,], float[,])

public virtual float[,] SigmoidInputGradient(float[,] outputGradient, float[,] output)

Parameters

outputGradient float[,]
output float[,]

Returns

float[,]

SigmoidOutput(float[,])

public virtual float[,] SigmoidOutput(float[,] input)

Parameters

input float[,]

Returns

float[,]

SoftplusInputGradient(float[,], float[,])

public virtual float[,] SoftplusInputGradient(float[,] outputGradient, float[,] output)

Parameters

outputGradient float[,]
output float[,]

Returns

float[,]

SoftplusOutput(float[,])

public virtual float[,] SoftplusOutput(float[,] input)

Parameters

input float[,]

Returns

float[,]

TanhInputGradient(float[,,,], float[,,,])

Calculates the gradient of the loss with respect to the input of the Tanh activation function.

public virtual float[,,,] TanhInputGradient(float[,,,] outputGradient, float[,,,] output)

Parameters

outputGradient float[,,,]

The gradient of the loss with respect to the output of the Tanh function (dL/dy).

output float[,,,]

The output of the Tanh function (tanh(x)).

Returns

float[,,,]

The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as outputGradient.

Remarks

Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).

The derivative of the Tanh function tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2. Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
  • tanh(x) => output
  • dL/dy => outputGradient
  • dL/dx => inputGradient

TanhInputGradient(float[,], float[,])

Calculates the gradient of the loss with respect to the input of the Tanh activation function.

public virtual float[,] TanhInputGradient(float[,] outputGradient, float[,] output)

Parameters

outputGradient float[,]

The gradient of the loss with respect to the output of the Tanh function (dL/dy).

output float[,]

The output of the Tanh function (tanh(x)).

Returns

float[,]

The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as outputGradient.

Remarks

Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).

The derivative of the Tanh function tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2. Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
  • tanh(x) => output
  • dL/dy => outputGradient
  • dL/dx => inputGradient

TanhOutput(float[,,,])

public virtual float[,,,] TanhOutput(float[,,,] input)

Parameters

input float[,,,]

Returns

float[,,,]

TanhOutput(float[,])

public virtual float[,] TanhOutput(float[,] input)

Parameters

input float[,]

Returns

float[,]

Unflatten(float[,], float[,,,])

public virtual float[,,,] Unflatten(float[,] source, float[,,,] targetSize)

Parameters

source float[,]
targetSize float[,,,]

Returns

float[,,,]

WeightMultiplyInputGradient(float[,], float[,])

public virtual float[,] WeightMultiplyInputGradient(float[,] outputGradient, float[,] weights)

Parameters

outputGradient float[,]
weights float[,]

Returns

float[,]

WeightMultiplyOutput(float[,], float[,])

public virtual float[,] WeightMultiplyOutput(float[,] input, float[,] weights)

Parameters

input float[,]
weights float[,]

Returns

float[,]

WeightMultiplyParamGradient(float[,], float[,])

public virtual float[,] WeightMultiplyParamGradient(float[,] input, float[,] outputGradient)

Parameters

input float[,]
outputGradient float[,]

Returns

float[,]