Library

Modelling

# Turing.@modelMacro.

@model(name, fbody)

Macro to specify a probabilistic model.

Example:

@model Gaussian(x) = begin
    s ~ InverseGamma(2,3)
    m ~ Normal(0,sqrt.(s))
    for i in 1:length(x)
        x[i] ~ Normal(m, sqrt.(s))
    end
    return (s, m)
end

Compiler design: sample(fname(x,y), sampler).

fname(x=nothing,y=nothing; compiler=compiler) = begin
    ex = quote
        # Pour in kwargs for those args where value != nothing.
        fname_model(vi::VarInfo, sampler::Sampler; x = x, y = y) = begin
            vi.logp = zero(Real)
          
            # Pour in model definition.
            x ~ Normal(0,1)
            y ~ Normal(x, 1)
            return x, y
        end
    end
    return Main.eval(ex)
end

source

# Turing.@~Macro.

macro: @~ var Distribution()

Tilde notation macro. This macro constructs Turing.observe or Turing.assume calls depending on the left-hand argument. Note that the macro is interconnected with the @model macro and assumes that a compiler struct is available.

Example:

@~ x Normal()

source

Samplers

# Turing.SamplerType.

Sampler{T}

Generic interface for implementing inference algorithms. An implementation of an algorithm should include the following:

  1. A type specifying the algorithm and its parameters, derived from InferenceAlgorithm
  2. A method of sample function that produces results of inference, which is where actual inference happens.

Turing translates models to chunks that call the modelling functions at specified points. The dispatch is based on the value of a sampler variable. To include a new inference algorithm implements the requirements mentioned above in a separate file, then include that file at the end of this one.

source

# Turing.GibbsType.

Gibbs(n_iters, alg_1, alg_2)

Compositional MCMC interface.

Example:

alg = Gibbs(1000, HMC(1, 0.2, 3, :v1), PG(20, 1, :v2))

source

# Turing.HMCType.

HMC(n_iters::Int, epsilon::Float64, tau::Int)

Hamiltonian Monte Carlo sampler.

Usage:

HMC(1000, 0.05, 10)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
    s ~ InverseGamma(2,3)
    m ~ Normal(0, sqrt(s))
    x[1] ~ Normal(m, sqrt(s))
    x[2] ~ Normal(m, sqrt(s))
    return s, m
end

sample(gdemo([1.5, 2]), HMC(1000, 0.05, 10))

source

# Turing.HMCDAType.

HMCDA(n_iters::Int, n_adapts::Int, delta::Float64, lambda::Float64)

Hamiltonian Monte Carlo sampler wiht Dual Averaging algorithm.

Usage:

HMCDA(1000, 200, 0.65, 0.3)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
  s ~ InverseGamma(2,3)
  m ~ Normal(0, sqrt(s))
  x[1] ~ Normal(m, sqrt(s))
  x[2] ~ Normal(m, sqrt(s))
  return s, m
end

sample(gdemo([1.5, 2]), HMCDA(1000, 200, 0.65, 0.3))

source

# Turing.IPMCMCType.

IPMCMC(n_particles::Int, n_iters::Int, n_nodes::Int, n_csmc_nodes::Int)

Particle Gibbs sampler.

Usage:

IPMCMC(100, 100, 4, 2)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
  s ~ InverseGamma(2,3)
  m ~ Normal(0,sqrt(s))
  x[1] ~ Normal(m, sqrt(s))
  x[2] ~ Normal(m, sqrt(s))
  return s, m
end

sample(gdemo([1.5, 2]), IPMCMC(100, 100, 4, 2))

source

# Turing.ISType.

IS(n_particles::Int)

Importance sampling algorithm object.

  • n_particles is the number of particles to use

Usage:

IS(1000)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
    s ~ InverseGamma(2,3)
    m ~ Normal(0,sqrt.(s))
    x[1] ~ Normal(m, sqrt.(s))
    x[2] ~ Normal(m, sqrt.(s))
    return s, m
end

sample(gdemo([1.5, 2]), IS(1000))

source

# Turing.MHType.

MH(n_iters::Int)

Metropolis-Hasting sampler.

Usage:

MH(100, (:m, (x) -> Normal(x, 0.1)))

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
  s ~ InverseGamma(2,3)
  m ~ Normal(0,sqrt(s))
  x[1] ~ Normal(m, sqrt(s))
  x[2] ~ Normal(m, sqrt(s))
  return s, m
end

sample(gdemo([1.5, 2]), MH(1000, (:m, (x) -> Normal(x, 0.1)), :s)))

source

# Turing.NUTSType.

NUTS(n_iters::Int, n_adapts::Int, delta::Float64)

No-U-Turn Sampler (NUTS) sampler.

Usage:

NUTS(1000, 200, 0.6j_max)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
  s ~ InverseGamma(2,3)
  m ~ Normal(0, sqrt(s))
  x[1] ~ Normal(m, sqrt(s))
  x[2] ~ Normal(m, sqrt(s))
  return s, m
end

sample(gdemo([1.j_max, 2]), NUTS(1000, 200, 0.6j_max))

source

# Turing.PGType.

PG(n_particles::Int, n_iters::Int)

Particle Gibbs sampler.

Usage:

PG(100, 100)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
  s ~ InverseGamma(2,3)
  m ~ Normal(0, sqrt(s))
  x[1] ~ Normal(m, sqrt(s))
  x[2] ~ Normal(m, sqrt(s))
  return s, m
end

sample(gdemo([1.5, 2]), PG(100, 100))

source

# Turing.PMMHType.

PMMH(n_iters::Int, smc_alg:::SMC, parameters_algs::Tuple{MH})

Particle independant Metropolis–Hastings and Particle marginal Metropolis–Hastings samplers.

Usage:

alg = PMMH(100, SMC(20, :v1), MH(1,:v2))
alg = PMMH(100, SMC(20, :v1), MH(1,(:v2, (x) -> Normal(x, 1))))

source

# Turing.SGHMCType.

SGHMC(n_iters::Int, learning_rate::Float64, momentum_decay::Float64)

Stochastic Gradient Hamiltonian Monte Carlo sampler.

Usage:

SGHMC(1000, 0.01, 0.1)

Example:

@model example begin
  ...
end

sample(example, SGHMC(1000, 0.01, 0.1))

source

# Turing.SGLDType.

SGLD(n_iters::Int, epsilon::Float64)

Stochastic Gradient Langevin Dynamics sampler.

Usage:

SGLD(1000, 0.5)

Example:

@model example begin
  ...
end

sample(example, SGLD(1000, 0.5))

source

# Turing.SMCType.

SMC(n_particles::Int)

Sequential Monte Carlo sampler.

Usage:

SMC(1000)

Example:

# Define a simple Normal model with unknown mean and variance.
@model gdemo(x) = begin
  s ~ InverseGamma(2,3)
  m ~ Normal(0, sqrt(s))
  x[1] ~ Normal(m, sqrt(s))
  x[2] ~ Normal(m, sqrt(s))
  return s, m
end

sample(gdemo([1.5, 2]), SMC(1000))

source

Data Structures

# Libtask.TArrayType.

TArray{T}(dims, ...)

Implementation of data structures that automatically perform copy-on-write after task copying.

If current*task is an existing key in s, then return s[current*task]. Otherwise, returns[current*task] = s[last*task].

Usage:

TArray(dim)

Example:

ta = TArray(4)              # init
for i in 1:4 ta[i] = i end  # assign
Array(ta)                   # convert to 4-element Array{Int64,1}: [1, 2, 3, 4]

Utilities

# Libtask.tzerosFunction.

 tzeros(dims, ...)

Construct a distributed array of zeros. Trailing arguments are the same as those accepted by TArray.

tzeros(dim)

Example:

tz = tzeros(4)              # construct
Array(tz)                   # convert to 4-element Array{Int64,1}: [0, 0, 0, 0]

Index