Interface IOperations
- Namespace
- NeuralNetworks.Core.Operations
- Assembly
- NeuralNetworks.dll
public interface IOperations
Properties
BackendType
OperationBackendType BackendType { get; }
Property Value
Methods
BiasAddOutput(float[,], float[])
float[,] BiasAddOutput(float[,] input, float[] bias)
Parameters
Returns
- float[,]
BiasAddParamGradient(float[,])
float[] BiasAddParamGradient(float[,] outputGradient)
Parameters
outputGradientfloat[,]
Returns
- float[]
BipolarSigmoidInputGradient(float[,], float[,], float)
float[,] BipolarSigmoidInputGradient(float[,] outputGradient, float[,] output, float scale)
Parameters
Returns
- float[,]
BipolarSigmoidOutput(float[,], float)
float[,] BipolarSigmoidOutput(float[,] input, float scale)
Parameters
Returns
- float[,]
Convolve2DInputGradient(float[,,,], float[,,,], float[,,,])
float[,,,] Convolve2DInputGradient(float[,,,] input, float[,,,] weights, float[,,,] outputGradient)
Parameters
Returns
- float[,,,]
Convolve2DOutput(float[,,,], float[,,,])
float[,,,] Convolve2DOutput(float[,,,] input, float[,,,] weights)
Parameters
Returns
- float[,,,]
Convolve2DParamGradient(float[,,,], float[,,,], int, int)
float[,,,] Convolve2DParamGradient(float[,,,] input, float[,,,] outputGradient, int kernelHeight, int kernelWidth)
Parameters
Returns
- float[,,,]
CrossEntropyLoss(float[,], float[,], float)
float CrossEntropyLoss(float[,] predicted, float[,] target, float eps = 1E-07)
Parameters
Returns
CrossEntropyLossGradient(float[,], float[,])
float[,] CrossEntropyLossGradient(float[,] predicted, float[,] target)
Parameters
Returns
- float[,]
DropoutInputGradient(float[,,,], float[,,,])
float[,,,] DropoutInputGradient(float[,,,] outputGradient, float[,,,] mask)
Parameters
Returns
- float[,,,]
DropoutInputGradient(float[,], float[,])
float[,] DropoutInputGradient(float[,] outputGradient, float[,] mask)
Parameters
Returns
- float[,]
DropoutOutput(float[,,,], bool, float, SeededRandom?, out float[,,,]?)
float[,,,] DropoutOutput(float[,,,] input, bool inference, float keepProb, SeededRandom? random, out float[,,,]? mask)
Parameters
inputfloat[,,,]inferenceboolkeepProbfloatrandomSeededRandommaskfloat[,,,]
Returns
- float[,,,]
DropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)
float[,] DropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)
Parameters
inputfloat[,]inferenceboolkeepProbfloatrandomSeededRandommaskfloat[,]
Returns
- float[,]
Flatten(float[,,,])
float[,] Flatten(float[,,,] source)
Parameters
sourcefloat[,,,]
Returns
- float[,]
InvertedDropoutInputGradient(float[,], float[,], float)
float[,] InvertedDropoutInputGradient(float[,] outputGradient, float[,] mask, float keepProb)
Parameters
Returns
- float[,]
InvertedDropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)
float[,] InvertedDropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)
Parameters
inputfloat[,]inferenceboolkeepProbfloatrandomSeededRandommaskfloat[,]
Returns
- float[,]
LeakyReLUInputGradient(float[,,,], float[,,,], float, float)
float[,,,] LeakyReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float alfa, float beta)
Parameters
Returns
- float[,,,]
LeakyReLUInputGradient(float[,], float[,], float, float)
float[,] LeakyReLUInputGradient(float[,] outputGradient, float[,] input, float alfa, float beta)
Parameters
Returns
- float[,]
LeakyReLUOutput(float[,,,], float, float)
float[,,,] LeakyReLUOutput(float[,,,] input, float alpha = 0.01, float beta = 1)
Parameters
Returns
- float[,,,]
LeakyReLUOutput(float[,], float, float)
float[,] LeakyReLUOutput(float[,] input, float alpha = 0.01, float beta = 1)
Parameters
Returns
- float[,]
ReLUInputGradient(float[,,,], float[,,,], float)
float[,,,] ReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float beta)
Parameters
Returns
- float[,,,]
ReLUInputGradient(float[,], float[,], float)
float[,] ReLUInputGradient(float[,] outputGradient, float[,] input, float beta)
Parameters
Returns
- float[,]
ReLUOutput(float[,,,], float)
float[,,,] ReLUOutput(float[,,,] input, float beta = 1)
Parameters
Returns
- float[,,,]
ReLUOutput(float[,], float)
Applies the rectified linear unit (ReLU) activation function to each element of the specified 2D array.
float[,] ReLUOutput(float[,] input, float beta = 1)
Parameters
inputfloat[,]The two-dimensional array of single-precision floating-point values to which the ReLU function is applied.
betafloatAn optional scaling factor applied to non-negative values. The default is 1.0.
Returns
- float[,]
A new two-dimensional array where each element is the result of applying the ReLU function to the corresponding element in the source array.
Remarks
The ReLU function sets all negative values to zero and multiplies non-negative values by the specified beta. The original array is not modified.
SigmoidInputGradient(float[,], float[,])
float[,] SigmoidInputGradient(float[,] outputGradient, float[,] output)
Parameters
Returns
- float[,]
SigmoidOutput(float[,])
float[,] SigmoidOutput(float[,] input)
Parameters
inputfloat[,]
Returns
- float[,]
SoftplusInputGradient(float[,], float[,])
float[,] SoftplusInputGradient(float[,] outputGradient, float[,] output)
Parameters
Returns
- float[,]
SoftplusOutput(float[,])
float[,] SoftplusOutput(float[,] input)
Parameters
inputfloat[,]
Returns
- float[,]
TanhInputGradient(float[,,,], float[,,,])
Calculates the gradient of the loss with respect to the input of the Tanh activation function.
float[,,,] TanhInputGradient(float[,,,] outputGradient, float[,,,] output)
Parameters
outputGradientfloat[,,,]The gradient of the loss with respect to the output of the Tanh function (dL/dy).
outputfloat[,,,]The output of the Tanh function (
tanh(x)).
Returns
- float[,,,]
The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as
outputGradient.
Remarks
Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).
The derivative of the Tanh functiontanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2.
Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
- tanh(x) => output
- dL/dy => outputGradient
- dL/dx => inputGradient
TanhInputGradient(float[,], float[,])
Calculates the gradient of the loss with respect to the input of the Tanh activation function.
float[,] TanhInputGradient(float[,] outputGradient, float[,] output)
Parameters
outputGradientfloat[,]The gradient of the loss with respect to the output of the Tanh function (dL/dy).
outputfloat[,]The output of the Tanh function (
tanh(x)).
Returns
- float[,]
The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as
outputGradient.
Remarks
Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).
The derivative of the Tanh functiontanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2.
Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
- tanh(x) => output
- dL/dy => outputGradient
- dL/dx => inputGradient
TanhOutput(float[,,,])
float[,,,] TanhOutput(float[,,,] source)
Parameters
sourcefloat[,,,]
Returns
- float[,,,]
TanhOutput(float[,])
float[,] TanhOutput(float[,] source)
Parameters
sourcefloat[,]
Returns
- float[,]
Unflatten(float[,], float[,,,])
float[,,,] Unflatten(float[,] source, float[,,,] targetSize)
Parameters
Returns
- float[,,,]
WeightMultiplyInputGradient(float[,], float[,])
float[,] WeightMultiplyInputGradient(float[,] outputGradient, float[,] weights)
Parameters
Returns
- float[,]
WeightMultiplyOutput(float[,], float[,])
float[,] WeightMultiplyOutput(float[,] input, float[,] weights)
Parameters
Returns
- float[,]
WeightMultiplyParamGradient(float[,], float[,])
float[,] WeightMultiplyParamGradient(float[,] input, float[,] outputGradient)
Parameters
Returns
- float[,]