Table of Contents

Class OperationsSpan

Namespace
NeuralNetworks.Core.Operations
Assembly
NeuralNetworks.dll
public class OperationsSpan : OperationsArray, IOperations
Inheritance
OperationsSpan
Implements
Derived
Inherited Members

Properties

BackendType

public override OperationBackendType BackendType { get; }

Property Value

OperationBackendType

Methods

Convolve2DInputGradient(float[,,,], float[,,,], float[,,,])

public override float[,,,] Convolve2DInputGradient(float[,,,] input, float[,,,] weights, float[,,,] outputGradient)

Parameters

input float[,,,]
weights float[,,,]
outputGradient float[,,,]

Returns

float[,,,]

Convolve2DOutput(float[,,,], float[,,,])

public override float[,,,] Convolve2DOutput(float[,,,] input, float[,,,] weights)

Parameters

input float[,,,]
weights float[,,,]

Returns

float[,,,]

Convolve2DParamGradient(float[,,,], float[,,,], int, int)

public override float[,,,] Convolve2DParamGradient(float[,,,] input, float[,,,] outputGradient, int kernelHeight, int kernelWidth)

Parameters

input float[,,,]
outputGradient float[,,,]
kernelHeight int
kernelWidth int

Returns

float[,,,]

CrossEntropyLoss(float[,], float[,], float)

public override float CrossEntropyLoss(float[,] predicted, float[,] target, float eps = 1E-07)

Parameters

predicted float[,]
target float[,]
eps float

Returns

float

CrossEntropyLossGradient(float[,], float[,])

public override float[,] CrossEntropyLossGradient(float[,] predicted, float[,] target)

Parameters

predicted float[,]
target float[,]

Returns

float[,]

Flatten(float[,,,])

public override float[,] Flatten(float[,,,] source)

Parameters

source float[,,,]

Returns

float[,]

LeakyReLUInputGradient(float[,,,], float[,,,], float, float)

public override float[,,,] LeakyReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float alfa, float beta)

Parameters

outputGradient float[,,,]
input float[,,,]
alfa float
beta float

Returns

float[,,,]

LeakyReLUOutput(float[,,,], float, float)

public override float[,,,] LeakyReLUOutput(float[,,,] input, float alpha = 0.01, float beta = 1)

Parameters

input float[,,,]
alpha float
beta float

Returns

float[,,,]

ReLUInputGradient(float[,,,], float[,,,], float)

public override float[,,,] ReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float beta)

Parameters

outputGradient float[,,,]
input float[,,,]
beta float

Returns

float[,,,]

ReLUOutput(float[,,,], float)

public override float[,,,] ReLUOutput(float[,,,] input, float beta = 1)

Parameters

input float[,,,]
beta float

Returns

float[,,,]

TanhInputGradient(float[,,,], float[,,,])

Calculates the gradient of the loss with respect to the input of the Tanh activation function.

public override float[,,,] TanhInputGradient(float[,,,] outputGradient, float[,,,] output)

Parameters

outputGradient float[,,,]

The gradient of the loss with respect to the output of the Tanh function (dL/dy).

output float[,,,]

The output of the Tanh function (tanh(x)).

Returns

float[,,,]

The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as outputGradient.

Remarks

Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).

The derivative of the Tanh function tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2. Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
  • tanh(x) => output
  • dL/dy => outputGradient
  • dL/dx => inputGradient

TanhOutput(float[,,,])

public override float[,,,] TanhOutput(float[,,,] source)

Parameters

source float[,,,]

Returns

float[,,,]

Unflatten(float[,], float[,,,])

public override float[,,,] Unflatten(float[,] source, float[,,,] targetSize)

Parameters

source float[,]
targetSize float[,,,]

Returns

float[,,,]

WeightMultiplyInputGradient(float[,], float[,])

public override float[,] WeightMultiplyInputGradient(float[,] outputGradient, float[,] weights)

Parameters

outputGradient float[,]
weights float[,]

Returns

float[,]

WeightMultiplyOutput(float[,], float[,])

public override float[,] WeightMultiplyOutput(float[,] input, float[,] weights)

Parameters

input float[,]
weights float[,]

Returns

float[,]

WeightMultiplyParamGradient(float[,], float[,])

public override float[,] WeightMultiplyParamGradient(float[,] input, float[,] outputGradient)

Parameters

input float[,]
outputGradient float[,]

Returns

float[,]