module emote.nn.layers

Classes

class Conv2dEncoder(nn.Module):

Multi-layer 2D convolutional encoder.

Methods

def __init__(
    self,
    input_shape,
    channels,
    kernels,
    strides,
    padding,
    channels_last,
    activation,
    flatten
) -> None

Arguments:

  • input_shape(tuple[int, int, int]): (tuple[int, int, int]) The input image shape, this should be consistent with channels_last.
  • channels(list[int]): (list[int]) The number of channels for each conv layer.
  • kernels(list[int]): (list[int]) The kernel size for each conv layer.
  • strides(list[int]): (list[int]) The strides for each conv layer.
  • padding(list[int]): (list[int]]) The padding.
  • channels_last(bool): (bool) Whether the input image has channels as the last dim, else first. (default: True)
  • activation(torch.nn.Module): (torch.nn.Module) The activation function.
  • flatten(bool): (bool) Flattens the output into a vector. (default: True)
def forward(self, obs) -> None
def get_encoder_output_size(self) -> None

class Conv1dEncoder(nn.Module):

Multi-layer 1D convolutional encoder.

Methods

def __init__(
    self,
    input_shape,
    channels,
    kernels,
    strides,
    padding,
    activation,
    flatten,
    name,
    channels_last
) -> None

Arguments:

  • input_shape(tuple[int, int]): (tuple[int, int]) The input shape
  • channels(list[int]): (list[int]) The number of channels for each conv layer.
  • kernels(list[int]): (list[int]) The kernel size for each conv layer.
  • strides(list[int]): (list[int]) The strides for each conv layer.
  • padding(list[int]): (list[int]) The padding.
  • activation(torch.nn.Module): (torch.nn.Module) The activation function.
  • flatten(bool): (bool) Flattens the output into a vector. (default: True)
  • name(str): (str) Name of the encoder (default: "conv1d") (default: conv1d)
  • channels_last(bool): (bool) Whether the input has channels as the last dim, else first. (default: True)
def forward(self, obs) -> None
def get_encoder_output_size(self) -> None