Class OperationsArray
- Namespace
- NeuralNetworks.Core.Operations
- Assembly
- NeuralNetworks.dll
Provides the baseline array-based CPU implementation of IOperations for deterministic reference execution.
public class OperationsArray : IOperations
- Inheritance
-
OperationsArray
- Implements
- Derived
- Inherited Members
Remarks
All kernels are written against standard multidimensional arrays and straightforward loops so the behavior mirrors higher-performance backends while remaining easy to debug, test, and teach. Use this backend when GPU acceleration is unavailable, when numerical traceability matters more than throughput, or as a correctness oracle for other implementations.
Properties
BackendType
public virtual OperationBackendType BackendType { get; }
Property Value
Methods
BiasAddOutput(float[,], float[])
public virtual float[,] BiasAddOutput(float[,] input, float[] bias)
Parameters
Returns
- float[,]
BiasAddParamGradient(float[,])
public virtual float[] BiasAddParamGradient(float[,] outputGradient)
Parameters
outputGradientfloat[,]
Returns
- float[]
BipolarSigmoidInputGradient(float[,], float[,], float)
public virtual float[,] BipolarSigmoidInputGradient(float[,] outputGradient, float[,] output, float scale)
Parameters
Returns
- float[,]
BipolarSigmoidOutput(float[,], float)
public virtual float[,] BipolarSigmoidOutput(float[,] input, float scale)
Parameters
Returns
- float[,]
Convolve2DInputGradient(float[,,,], float[,,,], float[,,,])
public virtual float[,,,] Convolve2DInputGradient(float[,,,] input, float[,,,] weights, float[,,,] outputGradient)
Parameters
Returns
- float[,,,]
Convolve2DOutput(float[,,,], float[,,,])
public virtual float[,,,] Convolve2DOutput(float[,,,] input, float[,,,] weights)
Parameters
Returns
- float[,,,]
Convolve2DParamGradient(float[,,,], float[,,,], int, int)
public virtual float[,,,] Convolve2DParamGradient(float[,,,] input, float[,,,] outputGradient, int kernelHeight, int kernelWidth)
Parameters
Returns
- float[,,,]
CrossEntropyLoss(float[,], float[,], float)
public virtual float CrossEntropyLoss(float[,] predicted, float[,] target, float eps = 1E-07)
Parameters
Returns
CrossEntropyLossGradient(float[,], float[,])
public virtual float[,] CrossEntropyLossGradient(float[,] predicted, float[,] target)
Parameters
Returns
- float[,]
DropoutInputGradient(float[,,,], float[,,,])
public float[,,,] DropoutInputGradient(float[,,,] outputGradient, float[,,,] mask)
Parameters
Returns
- float[,,,]
DropoutInputGradient(float[,], float[,])
public float[,] DropoutInputGradient(float[,] outputGradient, float[,] mask)
Parameters
Returns
- float[,]
DropoutOutput(float[,,,], bool, float, SeededRandom?, out float[,,,]?)
public virtual float[,,,] DropoutOutput(float[,,,] input, bool inference, float keepProb, SeededRandom? random, out float[,,,]? mask)
Parameters
inputfloat[,,,]inferenceboolkeepProbfloatrandomSeededRandommaskfloat[,,,]
Returns
- float[,,,]
DropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)
public virtual float[,] DropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)
Parameters
inputfloat[,]inferenceboolkeepProbfloatrandomSeededRandommaskfloat[,]
Returns
- float[,]
Flatten(float[,,,])
public virtual float[,] Flatten(float[,,,] source)
Parameters
sourcefloat[,,,]
Returns
- float[,]
InvertedDropoutInputGradient(float[,], float[,], float)
public float[,] InvertedDropoutInputGradient(float[,] outputGradient, float[,] mask, float keepProb)
Parameters
Returns
- float[,]
InvertedDropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)
public virtual float[,] InvertedDropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)
Parameters
inputfloat[,]inferenceboolkeepProbfloatrandomSeededRandommaskfloat[,]
Returns
- float[,]
LeakyReLUInputGradient(float[,,,], float[,,,], float, float)
public virtual float[,,,] LeakyReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float alfa, float beta)
Parameters
Returns
- float[,,,]
LeakyReLUInputGradient(float[,], float[,], float, float)
public virtual float[,] LeakyReLUInputGradient(float[,] outputGradient, float[,] input, float alfa, float beta)
Parameters
Returns
- float[,]
LeakyReLUOutput(float[,,,], float, float)
public virtual float[,,,] LeakyReLUOutput(float[,,,] input, float alpha = 0.01, float beta = 1)
Parameters
Returns
- float[,,,]
LeakyReLUOutput(float[,], float, float)
public virtual float[,] LeakyReLUOutput(float[,] input, float alpha = 0.01, float beta = 1)
Parameters
Returns
- float[,]
ReLUInputGradient(float[,,,], float[,,,], float)
public virtual float[,,,] ReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float beta)
Parameters
Returns
- float[,,,]
ReLUInputGradient(float[,], float[,], float)
public virtual float[,] ReLUInputGradient(float[,] outputGradient, float[,] input, float beta)
Parameters
Returns
- float[,]
ReLUOutput(float[,,,], float)
public virtual float[,,,] ReLUOutput(float[,,,] input, float beta = 1)
Parameters
Returns
- float[,,,]
ReLUOutput(float[,], float)
Applies the rectified linear unit (ReLU) activation function to each element of the specified 2D array.
public virtual float[,] ReLUOutput(float[,] input, float beta = 1)
Parameters
inputfloat[,]The two-dimensional array of single-precision floating-point values to which the ReLU function is applied.
betafloatAn optional scaling factor applied to non-negative values. The default is 1.0.
Returns
- float[,]
A new two-dimensional array where each element is the result of applying the ReLU function to the corresponding element in the source array.
Remarks
The ReLU function sets all negative values to zero and multiplies non-negative values by the specified beta. The original array is not modified.
SigmoidInputGradient(float[,], float[,])
public virtual float[,] SigmoidInputGradient(float[,] outputGradient, float[,] output)
Parameters
Returns
- float[,]
SigmoidOutput(float[,])
public virtual float[,] SigmoidOutput(float[,] input)
Parameters
inputfloat[,]
Returns
- float[,]
SoftplusInputGradient(float[,], float[,])
public virtual float[,] SoftplusInputGradient(float[,] outputGradient, float[,] output)
Parameters
Returns
- float[,]
SoftplusOutput(float[,])
public virtual float[,] SoftplusOutput(float[,] input)
Parameters
inputfloat[,]
Returns
- float[,]
TanhInputGradient(float[,,,], float[,,,])
Calculates the gradient of the loss with respect to the input of the Tanh activation function.
public virtual float[,,,] TanhInputGradient(float[,,,] outputGradient, float[,,,] output)
Parameters
outputGradientfloat[,,,]The gradient of the loss with respect to the output of the Tanh function (dL/dy).
outputfloat[,,,]The output of the Tanh function (
tanh(x)).
Returns
- float[,,,]
The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as
outputGradient.
Remarks
Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).
The derivative of the Tanh functiontanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2.
Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
- tanh(x) => output
- dL/dy => outputGradient
- dL/dx => inputGradient
TanhInputGradient(float[,], float[,])
Calculates the gradient of the loss with respect to the input of the Tanh activation function.
public virtual float[,] TanhInputGradient(float[,] outputGradient, float[,] output)
Parameters
outputGradientfloat[,]The gradient of the loss with respect to the output of the Tanh function (dL/dy).
outputfloat[,]The output of the Tanh function (
tanh(x)).
Returns
- float[,]
The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as
outputGradient.
Remarks
Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).
The derivative of the Tanh functiontanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2.
Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
- tanh(x) => output
- dL/dy => outputGradient
- dL/dx => inputGradient
TanhOutput(float[,,,])
public virtual float[,,,] TanhOutput(float[,,,] input)
Parameters
inputfloat[,,,]
Returns
- float[,,,]
TanhOutput(float[,])
public virtual float[,] TanhOutput(float[,] input)
Parameters
inputfloat[,]
Returns
- float[,]
Unflatten(float[,], float[,,,])
public virtual float[,,,] Unflatten(float[,] source, float[,,,] targetSize)
Parameters
Returns
- float[,,,]
WeightMultiplyInputGradient(float[,], float[,])
public virtual float[,] WeightMultiplyInputGradient(float[,] outputGradient, float[,] weights)
Parameters
Returns
- float[,]
WeightMultiplyOutput(float[,], float[,])
public virtual float[,] WeightMultiplyOutput(float[,] input, float[,] weights)
Parameters
Returns
- float[,]
WeightMultiplyParamGradient(float[,], float[,])
public virtual float[,] WeightMultiplyParamGradient(float[,] input, float[,] outputGradient)
Parameters
Returns
- float[,]