Class OperationsSpan
- Namespace
- NeuralNetworks.Core.Operations
- Assembly
- NeuralNetworks.dll
public class OperationsSpan : OperationsArray, IOperations
- Inheritance
-
OperationsSpan
- Implements
- Derived
- Inherited Members
Properties
BackendType
public override OperationBackendType BackendType { get; }
Property Value
Methods
Convolve2DInputGradient(float[,,,], float[,,,], float[,,,])
public override float[,,,] Convolve2DInputGradient(float[,,,] input, float[,,,] weights, float[,,,] outputGradient)
Parameters
Returns
- float[,,,]
Convolve2DOutput(float[,,,], float[,,,])
public override float[,,,] Convolve2DOutput(float[,,,] input, float[,,,] weights)
Parameters
Returns
- float[,,,]
Convolve2DParamGradient(float[,,,], float[,,,], int, int)
public override float[,,,] Convolve2DParamGradient(float[,,,] input, float[,,,] outputGradient, int kernelHeight, int kernelWidth)
Parameters
Returns
- float[,,,]
CrossEntropyLoss(float[,], float[,], float)
public override float CrossEntropyLoss(float[,] predicted, float[,] target, float eps = 1E-07)
Parameters
Returns
CrossEntropyLossGradient(float[,], float[,])
public override float[,] CrossEntropyLossGradient(float[,] predicted, float[,] target)
Parameters
Returns
- float[,]
Flatten(float[,,,])
public override float[,] Flatten(float[,,,] source)
Parameters
sourcefloat[,,,]
Returns
- float[,]
LeakyReLUInputGradient(float[,,,], float[,,,], float, float)
public override float[,,,] LeakyReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float alfa, float beta)
Parameters
Returns
- float[,,,]
LeakyReLUOutput(float[,,,], float, float)
public override float[,,,] LeakyReLUOutput(float[,,,] input, float alpha = 0.01, float beta = 1)
Parameters
Returns
- float[,,,]
ReLUInputGradient(float[,,,], float[,,,], float)
public override float[,,,] ReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float beta)
Parameters
Returns
- float[,,,]
ReLUOutput(float[,,,], float)
public override float[,,,] ReLUOutput(float[,,,] input, float beta = 1)
Parameters
Returns
- float[,,,]
TanhInputGradient(float[,,,], float[,,,])
Calculates the gradient of the loss with respect to the input of the Tanh activation function.
public override float[,,,] TanhInputGradient(float[,,,] outputGradient, float[,,,] output)
Parameters
outputGradientfloat[,,,]The gradient of the loss with respect to the output of the Tanh function (dL/dy).
outputfloat[,,,]The output of the Tanh function (
tanh(x)).
Returns
- float[,,,]
The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as
outputGradient.
Remarks
Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).
The derivative of the Tanh functiontanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2.
Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
- tanh(x) => output
- dL/dy => outputGradient
- dL/dx => inputGradient
TanhOutput(float[,,,])
public override float[,,,] TanhOutput(float[,,,] source)
Parameters
sourcefloat[,,,]
Returns
- float[,,,]
Unflatten(float[,], float[,,,])
public override float[,,,] Unflatten(float[,] source, float[,,,] targetSize)
Parameters
Returns
- float[,,,]
WeightMultiplyInputGradient(float[,], float[,])
public override float[,] WeightMultiplyInputGradient(float[,] outputGradient, float[,] weights)
Parameters
Returns
- float[,]
WeightMultiplyOutput(float[,], float[,])
public override float[,] WeightMultiplyOutput(float[,] input, float[,] weights)
Parameters
Returns
- float[,]
WeightMultiplyParamGradient(float[,], float[,])
public override float[,] WeightMultiplyParamGradient(float[,] input, float[,] outputGradient)
Parameters
Returns
- float[,]