Skip to contents

It takes a list of inputs of size 2, and the axes corresponding to each input along with the dot product is to be performed.

Let's say x and y are the two input tensors with shapes (2, 3, 5) and (2, 10, 3). The batch dimension should be of same size for both the inputs, and axes should correspond to the dimensions that have the same size in the corresponding inputs. e.g. with axes = c(1, 2), the dot product of x, and y will result in a tensor with shape (2, 5, 10)

Usage

layer_dot(inputs, ..., axes, normalize = FALSE)

Arguments

inputs

layers to combine

...

Standard layer keyword arguments.

axes

Integer or list of integers, axis or axes along which to take the dot product. If a list, should be two integers corresponding to the desired axis from the first input and the desired axis from the second input, respectively. Note that the size of the two selected axes must match.

normalize

Whether to L2-normalize samples along the dot product axis before taking the dot product. If set to TRUE, then the output of the dot product is the cosine proximity between the two samples.

Value

A tensor, the dot product of the samples from the inputs.

Examples

x <- op_reshape(0:9,   c(1, 5, 2))
y <- op_reshape(10:19, c(1, 2, 5))
layer_dot(x, y, axes=c(2, 3))

## tf.Tensor(
## [[[260 360]
##   [320 445]]], shape=(1, 2, 2), dtype=int32)

Usage in a Keras model:

x1 <- op_reshape(0:9, c(5, 2)) |> layer_dense(8)
x2 <- op_reshape(10:19, c(5, 2)) |> layer_dense(8)
shape(x1)

## shape(5, 8)

shape(x2)

## shape(5, 8)

y <- layer_dot(x1, x2, axes=2)
shape(y)

## shape(5, 1)

See also

Other merging layers:
layer_add()
layer_average()
layer_concatenate()
layer_maximum()
layer_minimum()
layer_multiply()
layer_subtract()

Other layers:
Layer()
layer_activation()
layer_activation_elu()
layer_activation_leaky_relu()
layer_activation_parametric_relu()
layer_activation_relu()
layer_activation_softmax()
layer_activity_regularization()
layer_add()
layer_additive_attention()
layer_alpha_dropout()
layer_attention()
layer_average()
layer_average_pooling_1d()
layer_average_pooling_2d()
layer_average_pooling_3d()
layer_batch_normalization()
layer_bidirectional()
layer_category_encoding()
layer_center_crop()
layer_concatenate()
layer_conv_1d()
layer_conv_1d_transpose()
layer_conv_2d()
layer_conv_2d_transpose()
layer_conv_3d()
layer_conv_3d_transpose()
layer_conv_lstm_1d()
layer_conv_lstm_2d()
layer_conv_lstm_3d()
layer_cropping_1d()
layer_cropping_2d()
layer_cropping_3d()
layer_dense()
layer_depthwise_conv_1d()
layer_depthwise_conv_2d()
layer_discretization()
layer_dropout()
layer_einsum_dense()
layer_embedding()
layer_feature_space()
layer_flatten()
layer_flax_module_wrapper()
layer_gaussian_dropout()
layer_gaussian_noise()
layer_global_average_pooling_1d()
layer_global_average_pooling_2d()
layer_global_average_pooling_3d()
layer_global_max_pooling_1d()
layer_global_max_pooling_2d()
layer_global_max_pooling_3d()
layer_group_normalization()
layer_group_query_attention()
layer_gru()
layer_hashed_crossing()
layer_hashing()
layer_identity()
layer_integer_lookup()
layer_jax_model_wrapper()
layer_lambda()
layer_layer_normalization()
layer_lstm()
layer_masking()
layer_max_pooling_1d()
layer_max_pooling_2d()
layer_max_pooling_3d()
layer_maximum()
layer_mel_spectrogram()
layer_minimum()
layer_multi_head_attention()
layer_multiply()
layer_normalization()
layer_permute()
layer_random_brightness()
layer_random_contrast()
layer_random_crop()
layer_random_flip()
layer_random_rotation()
layer_random_translation()
layer_random_zoom()
layer_repeat_vector()
layer_rescaling()
layer_reshape()
layer_resizing()
layer_rnn()
layer_separable_conv_1d()
layer_separable_conv_2d()
layer_simple_rnn()
layer_spatial_dropout_1d()
layer_spatial_dropout_2d()
layer_spatial_dropout_3d()
layer_spectral_normalization()
layer_string_lookup()
layer_subtract()
layer_text_vectorization()
layer_tfsm()
layer_time_distributed()
layer_torch_module_wrapper()
layer_unit_normalization()
layer_upsampling_1d()
layer_upsampling_2d()
layer_upsampling_3d()
layer_zero_padding_1d()
layer_zero_padding_2d()
layer_zero_padding_3d()
rnn_cell_gru()
rnn_cell_lstm()
rnn_cell_simple()
rnn_cells_stack()