« home

Autoencoder

machine learningneural networkscetztikz

Variational autoencoder architecture. Made with https://github.com/battlesnake/neural.


Autoencoder

  Download

PNGPDFSVG

  Code

  LaTeX

autoencoder.tex (29 lines)

\documentclass[tikz]{standalone}

\usepackage{neuralnetwork}

\newcommand{\xin}[2]{$x_#2$}
\newcommand{\xout}[2]{$\hat x_#2$}

\begin{document}
\begin{neuralnetwork}[height=8]
  \tikzstyle{input neuron}=[neuron, fill=orange!70];
  \tikzstyle{output neuron}=[neuron, fill=blue!60!black, text=white];

  \inputlayer[count=8, bias=false, title=Input Layer, text=\xin]

  \hiddenlayer[count=5, bias=false]
  \linklayers

  \hiddenlayer[count=3, bias=false, title=Latent\\Representation]
  \linklayers

  \hiddenlayer[count=5, bias=false]
  \linklayers

  \outputlayer[count=8, title=Output Layer, text=\xout]
  \linklayers

\end{neuralnetwork}
\end{document}

  Typst

autoencoder.typ (61 lines)

#import "@preview/cetz:0.3.4": canvas, draw
#import draw: line, content

#set page(width: auto, height: auto, margin: 8pt)

#let neuron(pos, fill: white, text: none) = {
  draw.content(pos, text, frame: "circle", fill: fill, stroke: 0.5pt, padding: 1pt)
}

#let connect-layers(start-pos, start-count, end-pos, end-count) = {
  let start-y = start-count / 2 * 0.8
  let end-y = end-count / 2 * 0.8

  for ii in range(start-count) {
    for jj in range(end-count) {
      let start = (start-pos, start-y - ii * 0.8)
      let end = (end-pos, end-y - jj * 0.8)
      draw.line(start, end, stroke: rgb("#aaa") + .5pt)
    }
  }
}

#canvas({
  // Define layer configurations
  let layers = (
    // (x-pos, neuron-count, fill-color, label-prefix, label-superscript, y-offset)
    (0, 8, rgb("#f6db71"), "x", none, 3.2), // Input layer
    (2, 5, rgb("#eee"), "h", "1", 2), // First hidden layer
    (4, 3, rgb("#eee"), "z", none, 1.2), // Latent layer
    (6, 5, rgb("#eee"), "h", "2", 2), // Third hidden layer
    (8, 8, rgb("#cecef9"), "hat(x)", none, 3.2), // Output layer
  )

  // Draw connections first (so they appear behind nodes)
  for idx in range(layers.len() - 1) {
    let (x1, n1, ..) = layers.at(idx)
    let (x2, n2, ..) = layers.at(idx + 1)
    connect-layers(x1, n1, x2, n2)
  }

  // Layer labels
  content((layers.at(0).at(0), 4), align(center)[Input Layer])
  content((layers.at(2).at(0), 2.2), align(center)[Latent\ Representation])
  content((layers.at(-1).at(0), 4), align(center)[Output Layer])

  // Draw all layers
  for (x, count, fill, prefix, sup, y-offset) in layers {
    for idx in range(count) {
      let y-pos = y-offset - idx * 0.8
      let label = if sup != none {
        $prefix^sup_idx$
      } else if prefix == "hat(x)" {
        $hat(x)_idx$
      } else {
        $prefix_idx$
      }
      neuron((x, y-pos), fill: fill, text: label)
    }
  }
})