Skip to content

Commit

Permalink
first code commit
Browse files Browse the repository at this point in the history
  • Loading branch information
ngiann committed Aug 15, 2024
1 parent 81ace6d commit bf15993
Show file tree
Hide file tree
Showing 43 changed files with 2,086 additions and 1 deletion.
1 change: 1 addition & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{}
20 changes: 20 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,26 @@ uuid = "dfcf6cff-d197-40c3-bbe5-65491765b9c2"
authors = ["Nikos Gianniotis <[email protected]> and contributors"]
version = "1.0.0-DEV"

[deps]
Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
ExponentialExpectations = "514699ec-cad0-4573-b799-e90939df45ca"
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
ForwardNeuralNetworks = "41bbd1a4-0eca-4bcd-b6b1-8656d032eaeb"
GPLVMplusData = "4c3045de-0da2-483a-8ead-ab5165d83806"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LogExpFunctions = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
OptimizationBBO = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b"
OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Transducers = "28d57a85-8fef-5791-bfe6-a80928e7c999"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
julia = "1.6.7"

Expand Down
20 changes: 20 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,23 @@
# GPLVMplus

[![Build Status](https://github.com/ngiann/GPLVMplus.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/ngiann/GPLVMplus.jl/actions/workflows/CI.yml?query=branch%3Amain)


```
using GPLVMplusData # must be independently installed
X = GPLVMplusData.loadducks(;every=4); # load rubber duck images in 32x32 resolution
# warmup
let
gplvmplus(X; Q = 2, iterations = 1)
end
# Learn mapping from Q=2 latent dimensions to high-dimensional images.
# Use a two-hidden layer neural network for amortised inference.
result = GPLVMplus.gplvmplus(X; Q = 2, H1 = 20, H2 = 20, iterations = 5000);
# Plot latent coordinates
using PyPlot # must be independently installed
plot(result[:Z][1,:],result[:Z][2,:],"o")
```
78 changes: 78 additions & 0 deletions src/GPLVM.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
module GPLVM

using LinearAlgebra, Distributions, Random, Statistics, Distances, LogExpFunctions
# using Optim, BlackBoxOptim, Zygote
using ExponentialExpectations, FillArrays
using ForwardNeuralNetworks
using Printf, PyPlot
using JLD2
using Artifacts, LazyArtifacts
using Transducers
using Optimization, OptimizationOptimJL, OptimizationBBO, Zygote#, LineSearches

# common
include("common/covariance.jl" )
include("common/woodbury.jl")
include("common/rbf.jl")
include("common/myskip.jl")
include("common/inverterrors.jl")
include("common/toydata.jl")
include("common/expectation_latent_function_values.jl")
include("common/expectation_of_sum_D_log_prior_zero_mean.jl")
include("common/entropy.jl")
include("common/getfg!.jl")
include("common/callback.jl")

export toydata


# standard GPLVM
include("gplvm/gplvm.jl")
include("gplvm/predictgplvm.jl")
include("gplvm/unpackgplvm.jl")
include("gplvm/inferlatentgplvm.jl")

export gplvm, inferlatentgplvm

# variational GPLVM with auxiliary network modelling variational parameters
include("gplvmvar/gplvmvar.jl")
include("gplvmvar/marginallikelihood_gplvmvar.jl")
include("gplvmvar/marginallikelihood_VERIFY_gplvmvar.jl")
include("gplvmvar/predictgplvmvar.jl")
include("gplvmvar/numerically_VERIFY_gplvmvar.jl")
include("gplvmvar/unpack.jl")
include("gplvmvar/inferlatentgplvmvar.jl")
include("gplvmvar/inferlatentgplvmvar_photo.jl")
include("gplvmvar/unpack_inferlatent_gplvmvar.jl")

export predictgplvmvar, inferlatentgplvmvar, inferlatentgplvmvar_photo

# warped GPLVM
include("warpedgplvm/warpedgplvm.jl")
include("warpedgplvm/predictwarpedgplvm.jl")
include("warpedgplvm/inferlatentwarpedgplvm.jl")

export warpedgplvm, predictwarpedgplvm, inferlatentwarpedgplvm

# GPLVM₊
include("gplvmplus/gplvmplus.jl")
include("gplvmplus/predictivesampler.jl")
include("gplvmplus/marginallikelihood.jl")
include("gplvmplus/marginallikelihood_VERIFY.jl")
include("gplvmplus/infertestlatent.jl")
include("gplvmplus/inferlightcurve.jl")
include("gplvmplus/infertestlatent_photo.jl")
include("gplvmplus/numerically_VERIFY.jl")
include("gplvmplus/unpack_gplvmplus.jl")
include("gplvmplus/unpack_inferlatent_gplvmplus.jl")
include("gplvmplus/partial_objective.jl")

# saved model for GPLVM₊
include("loadbossmodel.jl")


export gplvmplus, gplvmvar
export inferlatent, infertestlatent_photo, inferlightcurve, predictivesampler, predictgplvm

export loadbossmodel, loadbossmodel_gplvm
end
35 changes: 34 additions & 1 deletion src/GPLVMplus.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,38 @@
module GPLVMplus

# Write your package code here.
using LinearAlgebra, Distributions, Random, Statistics, Distances, LogExpFunctions
using ExponentialExpectations, FillArrays
using ForwardNeuralNetworks
using Printf, PyPlot
using JLD2
# using Artifacts, LazyArtifacts
using Transducers
using Optimization, OptimizationOptimJL, OptimizationBBO, Zygote#, LineSearches

# common
include("common/covariance.jl" )
include("common/woodbury.jl")
# include("common/rbf.jl")
include("common/myskip.jl")
include("common/inverterrors.jl")
include("common/toydata.jl")
include("common/expectation_latent_function_values.jl")
include("common/expectation_of_sum_D_log_prior_zero_mean.jl")
include("common/entropy.jl")
include("common/getfg!.jl")
include("common/callback.jl")

# GPLVM₊
include("gplvmplus/gplvmplus.jl")
include("gplvmplus/predictivesampler.jl")
include("gplvmplus/marginallikelihood.jl")
include("gplvmplus/marginallikelihood_VERIFY.jl")
include("gplvmplus/infertestlatent.jl")
include("gplvmplus/inferlightcurve.jl")
include("gplvmplus/infertestlatent_photo.jl")
include("gplvmplus/numerically_VERIFY.jl")
include("gplvmplus/unpack_gplvmplus.jl")
include("gplvmplus/unpack_inferlatent_gplvmplus.jl")
include("gplvmplus/partial_objective.jl")

end
9 changes: 9 additions & 0 deletions src/common/callback.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
function callback(state, l)

# callback function to observe training

@printf("Iter %d, fitness = %4.6f\n", state.iter, l)

return false

end
5 changes: 5 additions & 0 deletions src/common/covariance.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
function covariance(D², θ)

θ[1] .* exp.(-.* θ[2])

end
7 changes: 7 additions & 0 deletions src/common/entropy.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
function entropy(Σ)

N = size(Σ, 1)

0.5*N*log(2*π*ℯ) + 0.5*logabsdet(Σ)[1]

end
11 changes: 11 additions & 0 deletions src/common/expectation_latent_function_values.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
function expectation_latent_function_values(;α = α, b = b, μ = μ, Σ = Σ)

E = exp.(α*μ .+ α^2*diag(Σ)' / 2 .+ b)

B = exp.(2*α*μ .+ (2*α)^2*diag(Σ)' / 2 .+ 2b)

V = B .- E.^2 # this is V[X] = E[X²] - (E[X])² # There may be a computational gain to be had here

return E, V

end
15 changes: 15 additions & 0 deletions src/common/expectation_of_sum_D_log_prior_zero_mean.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
function expectation_of_sum_D_log_prior_zero_mean(;K = K, μ = μ, Σ = Σ)

N = size(Σ, 1); @assert(size(Σ, 2) == N) # make sure it is square
@assert(size(Σ) == size(K))
D = size(μ, 1); @assert(size(μ, 2) == N)

U = cholesky(K).L

# tr(U'\(U\Σ)) is equivalent to tr(K\Σ)
# - sum(log.(diag(U))) is equivalent to -0.5*logdet(K)
# U\μ' is equivalent to ...

- 0.5*sum(abs2.(U\μ')) - 0.5*D*N*log(2π) - D*sum(log.(diag(U))) - 0.5*D*tr(U'\(U\Σ))

end
15 changes: 15 additions & 0 deletions src/common/getfg!.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
function getfg!(objective)

function fg!(F, G, x)

value, ∇f = Zygote.withgradient(objective,x)

isnothing(G) || copyto!(G, ∇f[1])

isnothing(F) || return value

nothing

end

end
3 changes: 3 additions & 0 deletions src/common/inverterrors.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
inverterrors(::Missing) = missing

inverterrors(𝛔) = 1.0 ./ 𝛔.^2
3 changes: 3 additions & 0 deletions src/common/myskip.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
myskip(x::Missing) = 0.0 # maybe we should make this zero(eltype(T))

myskip(x) = x
31 changes: 31 additions & 0 deletions src/common/rbf.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
struct RBF{T}
centres::T
M::Int64
end

function RBF(M)

RBF(collect(LinRange(-1.0, 1.0, M)), M)

end


distance(rbf::RBF, ζ) = pairwise(SqEuclidean(), reshape(ζ, 1, length(ζ)), reshape(rbf.centres, 1, rbf.M))

function (rbf::RBF)(ζ::Vector{T}, r) where T

= distance(rbf::RBF, ζ)

[exp.( -/ (2*r*r)) ones(length(ζ))]

end

function (rbf::RBF)(ζ::Vector{T}, w, r) where T

# @assert(length(w) == rbf.M+1)

(rbf)(ζ, r)*w

end

numweights(rbf::RBF) = rbf.M + 1 # plus 1 for bias term
17 changes: 17 additions & 0 deletions src/common/toydata.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
function toydata(;σ=0.01, N=101, seed = 1)

rg = MersenneTwister(seed)

θ = collect(LinRange(0,2π, N))[1:N-1]

X = Matrix(1*[sin.(θ) cos.(θ)]') .+ [3.0; 3.0]

A = rand(rg, 5, 2)

Y = A*X

S = ones(size(Y))*σ

return Y + randn(rg, size(Y)) .* S, S

end
58 changes: 58 additions & 0 deletions src/common/woodbury.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
"""
Calculates (K⁻¹ + Λ) where Λ is diagonal and `Λ½.^2 == Λ`.
Serves as a better mnemonic, for the directly called `woodbury_327` method.
"""
aux_invert_K⁻¹_plus_Λ(;K = K, Λroot = Λroot) = woodbury_327(;K = K, W½ = Λroot)


"""
Calculates (K + Λ⁻¹)⁻¹ where Λ is diagonal and `Λ½.^2 == Λ`.
Serves as a better mnemonic, for the directly called `woodbury_328` method.
"""
aux_invert_K_plus_Λ⁻¹(; K = K, Λroot = Λroot) = woodbury_328(;K = K, W½ = Λroot)



"""
woodbury_327(;K = K, W½ = W½)
Calculates (K⁻¹ + W)⁻¹ where W is diagonal and `W½.^2 == W`.
The results should be equivalent to `inv(inv(K)+Diagonal(W½).^2)``.
See equations (3.26) and (3.27) in book "Gaussian Processes for Machine Learning" by R&W.
"""
function woodbury_327(;K = K, W½::Diagonal = W½)

# sources:
# see 145 in matrix coobook where we set A⁻¹=K, B=I, C = W½
# see (A.9) in GPML
# The application of the woodbury identity is recommended in GPML, see (3.26) and (3.27).

KW½ = K*

B = Symmetric(I +*KW½) # equation (3.26) in GPML.

A = Symmetric(KW½ * (B \ KW½'))

Symmetric(K - A)

end


"""
woodbury_328(;K = K, W½ = W½)
Calculates (K + W⁻¹)⁻¹ where W is diagonal and `W½.^2 == W`.
The results should be equivalent to `inv(K + inv(Diagonal(W½).^2))``.
See equations (3.26) and (3.28) in book "Gaussian Processes for Machine Learning" by R&W.
"""
function woodbury_328(; K = K, W½::Diagonal = W½)

KW½ = K*

B = Symmetric(I +*KW½) # equation (3.26) in GPML

A =*(B\W½) # equation (3.28) in GPML.

Symmetric(A)

end
Loading

0 comments on commit bf15993

Please sign in to comment.