Table of Contents

Interface IOperations

Namespace
NeuralNetworks.Core.Operations
Assembly
NeuralNetworks.dll
public interface IOperations

Properties

BackendType

OperationBackendType BackendType { get; }

Property Value

OperationBackendType

Methods

BiasAddOutput(float[,], float[])

float[,] BiasAddOutput(float[,] input, float[] bias)

Parameters

input float[,]
bias float[]

Returns

float[,]

BiasAddParamGradient(float[,])

float[] BiasAddParamGradient(float[,] outputGradient)

Parameters

outputGradient float[,]

Returns

float[]

BipolarSigmoidInputGradient(float[,], float[,], float)

float[,] BipolarSigmoidInputGradient(float[,] outputGradient, float[,] output, float scale)

Parameters

outputGradient float[,]
output float[,]
scale float

Returns

float[,]

BipolarSigmoidOutput(float[,], float)

float[,] BipolarSigmoidOutput(float[,] input, float scale)

Parameters

input float[,]
scale float

Returns

float[,]

Convolve2DInputGradient(float[,,,], float[,,,], float[,,,])

float[,,,] Convolve2DInputGradient(float[,,,] input, float[,,,] weights, float[,,,] outputGradient)

Parameters

input float[,,,]
weights float[,,,]
outputGradient float[,,,]

Returns

float[,,,]

Convolve2DOutput(float[,,,], float[,,,])

float[,,,] Convolve2DOutput(float[,,,] input, float[,,,] weights)

Parameters

input float[,,,]
weights float[,,,]

Returns

float[,,,]

Convolve2DParamGradient(float[,,,], float[,,,], int, int)

float[,,,] Convolve2DParamGradient(float[,,,] input, float[,,,] outputGradient, int kernelHeight, int kernelWidth)

Parameters

input float[,,,]
outputGradient float[,,,]
kernelHeight int
kernelWidth int

Returns

float[,,,]

CrossEntropyLoss(float[,], float[,], float)

float CrossEntropyLoss(float[,] predicted, float[,] target, float eps = 1E-07)

Parameters

predicted float[,]
target float[,]
eps float

Returns

float

CrossEntropyLossGradient(float[,], float[,])

float[,] CrossEntropyLossGradient(float[,] predicted, float[,] target)

Parameters

predicted float[,]
target float[,]

Returns

float[,]

DropoutInputGradient(float[,,,], float[,,,])

float[,,,] DropoutInputGradient(float[,,,] outputGradient, float[,,,] mask)

Parameters

outputGradient float[,,,]
mask float[,,,]

Returns

float[,,,]

DropoutInputGradient(float[,], float[,])

float[,] DropoutInputGradient(float[,] outputGradient, float[,] mask)

Parameters

outputGradient float[,]
mask float[,]

Returns

float[,]

DropoutOutput(float[,,,], bool, float, SeededRandom?, out float[,,,]?)

float[,,,] DropoutOutput(float[,,,] input, bool inference, float keepProb, SeededRandom? random, out float[,,,]? mask)

Parameters

input float[,,,]
inference bool
keepProb float
random SeededRandom
mask float[,,,]

Returns

float[,,,]

DropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)

float[,] DropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)

Parameters

input float[,]
inference bool
keepProb float
random SeededRandom
mask float[,]

Returns

float[,]

Flatten(float[,,,])

float[,] Flatten(float[,,,] source)

Parameters

source float[,,,]

Returns

float[,]

InvertedDropoutInputGradient(float[,], float[,], float)

float[,] InvertedDropoutInputGradient(float[,] outputGradient, float[,] mask, float keepProb)

Parameters

outputGradient float[,]
mask float[,]
keepProb float

Returns

float[,]

InvertedDropoutOutput(float[,], bool, float, SeededRandom?, out float[,]?)

float[,] InvertedDropoutOutput(float[,] input, bool inference, float keepProb, SeededRandom? random, out float[,]? mask)

Parameters

input float[,]
inference bool
keepProb float
random SeededRandom
mask float[,]

Returns

float[,]

LeakyReLUInputGradient(float[,,,], float[,,,], float, float)

float[,,,] LeakyReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float alfa, float beta)

Parameters

outputGradient float[,,,]
input float[,,,]
alfa float
beta float

Returns

float[,,,]

LeakyReLUInputGradient(float[,], float[,], float, float)

float[,] LeakyReLUInputGradient(float[,] outputGradient, float[,] input, float alfa, float beta)

Parameters

outputGradient float[,]
input float[,]
alfa float
beta float

Returns

float[,]

LeakyReLUOutput(float[,,,], float, float)

float[,,,] LeakyReLUOutput(float[,,,] input, float alpha = 0.01, float beta = 1)

Parameters

input float[,,,]
alpha float
beta float

Returns

float[,,,]

LeakyReLUOutput(float[,], float, float)

float[,] LeakyReLUOutput(float[,] input, float alpha = 0.01, float beta = 1)

Parameters

input float[,]
alpha float
beta float

Returns

float[,]

ReLUInputGradient(float[,,,], float[,,,], float)

float[,,,] ReLUInputGradient(float[,,,] outputGradient, float[,,,] input, float beta)

Parameters

outputGradient float[,,,]
input float[,,,]
beta float

Returns

float[,,,]

ReLUInputGradient(float[,], float[,], float)

float[,] ReLUInputGradient(float[,] outputGradient, float[,] input, float beta)

Parameters

outputGradient float[,]
input float[,]
beta float

Returns

float[,]

ReLUOutput(float[,,,], float)

float[,,,] ReLUOutput(float[,,,] input, float beta = 1)

Parameters

input float[,,,]
beta float

Returns

float[,,,]

ReLUOutput(float[,], float)

Applies the rectified linear unit (ReLU) activation function to each element of the specified 2D array.

float[,] ReLUOutput(float[,] input, float beta = 1)

Parameters

input float[,]

The two-dimensional array of single-precision floating-point values to which the ReLU function is applied.

beta float

An optional scaling factor applied to non-negative values. The default is 1.0.

Returns

float[,]

A new two-dimensional array where each element is the result of applying the ReLU function to the corresponding element in the source array.

Remarks

The ReLU function sets all negative values to zero and multiplies non-negative values by the specified beta. The original array is not modified.

SigmoidInputGradient(float[,], float[,])

float[,] SigmoidInputGradient(float[,] outputGradient, float[,] output)

Parameters

outputGradient float[,]
output float[,]

Returns

float[,]

SigmoidOutput(float[,])

float[,] SigmoidOutput(float[,] input)

Parameters

input float[,]

Returns

float[,]

SoftplusInputGradient(float[,], float[,])

float[,] SoftplusInputGradient(float[,] outputGradient, float[,] output)

Parameters

outputGradient float[,]
output float[,]

Returns

float[,]

SoftplusOutput(float[,])

float[,] SoftplusOutput(float[,] input)

Parameters

input float[,]

Returns

float[,]

TanhInputGradient(float[,,,], float[,,,])

Calculates the gradient of the loss with respect to the input of the Tanh activation function.

float[,,,] TanhInputGradient(float[,,,] outputGradient, float[,,,] output)

Parameters

outputGradient float[,,,]

The gradient of the loss with respect to the output of the Tanh function (dL/dy).

output float[,,,]

The output of the Tanh function (tanh(x)).

Returns

float[,,,]

The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as outputGradient.

Remarks

Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).

The derivative of the Tanh function tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2. Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
  • tanh(x) => output
  • dL/dy => outputGradient
  • dL/dx => inputGradient

TanhInputGradient(float[,], float[,])

Calculates the gradient of the loss with respect to the input of the Tanh activation function.

float[,] TanhInputGradient(float[,] outputGradient, float[,] output)

Parameters

outputGradient float[,]

The gradient of the loss with respect to the output of the Tanh function (dL/dy).

output float[,]

The output of the Tanh function (tanh(x)).

Returns

float[,]

The gradient of the loss with respect to the input of the Tanh function (dL/dx), having the same shape as outputGradient.

Remarks

Given the output gradient (dL/dy), the function calculates the source gradient (dL/dx).

The derivative of the Tanh function tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x)) is 1 - tanh(x)^2. Therefore, the source gradient is computed as: dL/dx = dL/dy * (1 - tanh(x)^2) = dL/dy * (1 - output^2).
  • tanh(x) => output
  • dL/dy => outputGradient
  • dL/dx => inputGradient

TanhOutput(float[,,,])

float[,,,] TanhOutput(float[,,,] source)

Parameters

source float[,,,]

Returns

float[,,,]

TanhOutput(float[,])

float[,] TanhOutput(float[,] source)

Parameters

source float[,]

Returns

float[,]

Unflatten(float[,], float[,,,])

float[,,,] Unflatten(float[,] source, float[,,,] targetSize)

Parameters

source float[,]
targetSize float[,,,]

Returns

float[,,,]

WeightMultiplyInputGradient(float[,], float[,])

float[,] WeightMultiplyInputGradient(float[,] outputGradient, float[,] weights)

Parameters

outputGradient float[,]
weights float[,]

Returns

float[,]

WeightMultiplyOutput(float[,], float[,])

float[,] WeightMultiplyOutput(float[,] input, float[,] weights)

Parameters

input float[,]
weights float[,]

Returns

float[,]

WeightMultiplyParamGradient(float[,], float[,])

float[,] WeightMultiplyParamGradient(float[,] input, float[,] outputGradient)

Parameters

input float[,]
outputGradient float[,]

Returns

float[,]