Skip to content

Commit

Permalink
getting ready to release
Browse files Browse the repository at this point in the history
  • Loading branch information
sshin23 committed Jul 30, 2023
1 parent 8aede90 commit 3852dcb
Show file tree
Hide file tree
Showing 11 changed files with 365 additions and 95 deletions.
16 changes: 16 additions & 0 deletions .github/workflows/CompatHelper.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
name: CompatHelper
on:
schedule:
- cron: '00 00 * * *'
workflow_dispatch:
jobs:
CompatHelper:
runs-on: ubuntu-latest
steps:
- name: Pkg.add("CompatHelper")
run: julia -e 'using Pkg; Pkg.add("CompatHelper")'
- name: CompatHelper.main()
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMPATHELPER_PRIV: ${{ secrets.COMPATHELPER_PRIV }} # optional
run: julia -e 'using CompatHelper; CompatHelper.main()'
15 changes: 15 additions & 0 deletions .github/workflows/TagBot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
name: TagBot
on:
issue_comment:
types:
- created
workflow_dispatch:
jobs:
TagBot:
if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot'
runs-on: ubuntu-latest
steps:
- uses: JuliaRegistries/TagBot@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
ssh: ${{ secrets.DOCUMENTER_KEY }}
25 changes: 25 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: build

on: [push, pull_request]

jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
julia-version: ['1.9']
julia-arch: [x64]
os: [ubuntu-latest,macos-latest,windows-latest]

steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@latest
with:
version: ${{ matrix.julia-version }}
- uses: julia-actions/julia-buildpkg@latest
- uses: julia-actions/julia-runtest@latest
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v1
with:
file: lcov.info
token: ${{ secrets.CODECOV_TOKEN }}
24 changes: 13 additions & 11 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,26 +6,28 @@ version = "0.1.0"
[deps]
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"

[compat]
julia = "1.9"
KernelAbstractions = "0.9"

[weakdeps]
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
oneAPI = "8f75cd03-7ff8-4ecb-9b8f-daf728133b1b"

[extensions]
SIMDiffSpecialFunctions = "SpecialFunctions"
SIMDiffKernelAbstractions = "KernelAbstractions"
SIMDiffCUDA = "CUDA"
SIMDiffKernelAbstractions = "KernelAbstractions"
SIMDiffOneAPI = "oneAPI"
SIMDiffSpecialFunctions = "SpecialFunctions"

[compat]
KernelAbstractions = "0.9"
julia = "1.9"

[extras]
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
NLPModelsIpopt = "f4238b75-b362-5c4c-b852-0801c9a21d71"
SIMDiffExamples = "ff8351d9-12a3-4c2d-a61a-51dfbae68567"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Test", "NLPModelsIpopt", "ADNLPModels"]
test = ["Test", "NLPModels", "NLPModelsIpopt", "ADNLPModels", "KernelAbstractions", "CUDA"]
16 changes: 16 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,17 @@
# SIMDiff.jl
*An implementation of SIMD abstraction for nonlinear programs and automatic differentiation.*

| **License** | **Documentation** | **Build Status** | **Coverage** | **Citation** |
|:-----------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) | [![doc](https://img.shields.io/badge/docs-dev-blue.svg)](https://sshin23.github.io/SIMDiff.jl/dev) | [![build](https://github.com/sshin23/SIMDiff.jl/actions/workflows/test.yml/badge.svg)](https://github.com/sshin23/SIMDiff.jl/actions/workflows/test.yml) | [![codecov](https://codecov.io/gh/sshin23/SIMDiff.jl/branch/main/graph/badge.svg?token=8ViJWBWnZt)](https://codecov.io/gh/sshin23/SIMDiff.jl) |

## Introduction
SIMDiff.jl employs what we call **SIMD abstraction for nonlinear programs** (NLPs), which allows for the **preservation of the parallelizable structure** within the model equations, facilitating **efficient, parallel derivative evaluations** on the **GPU**.

SIMDiff.jl is different from other algebraic modeling tools, such as JuMP or AMPL, in the following ways:
- **Modeling Interface**: SIMDiff.jl enforces users to specify the model equations always in the form of `Iterable`s. This allows SIMDiff.jl to preserve the SIMD-compatible structure in the model equations.
- **Performance**: SIMDiff.jl compiles (via Julia's compiler) derivative evaluation codes that are specific to each computation pattern, based on reverse-mode automatic differentiation. This makes the speed of derivative evaluation (even on the CPU) significantly faster than other existing tools.
- **Portability**: SIMDiff.jl can evaluate derivatives on GPU accelerators. The code is currently only tested for NVIDIA GPUs, but GPU code is implemented mostly based on the portable programming paradigm, KernelAbstractions.jl. In the future, we are interested in supporting Intel, AMD, and Apple GPUs.

## Bug reports and support
Please report issues and feature requests via the [GitHub issue tracker](https://github.com/sshin/SIMDiff.jl/issues).
70 changes: 70 additions & 0 deletions test/NLPTest/NLPTest.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
module NLPTest

using SIMDiff, Test, ADNLPModels, NLPModels, NLPModelsIpopt, KernelAbstractions, CUDA

const NLP_TEST_ARGUMENTS = [
(
"luksan_vlcek",
3
),
(
"luksan_vlcek",
20
),
]

const BACKENDS = Any[
nothing,
CPU()
]

if CUDA.has_cuda()
push!(BACKENDS, CUDABackend())
end

include("utils.jl")
include("luksan.jl")

function test_nlp(simdiff_model, adnlp_model, backend, args)

m1 = WrapperNLPModel(simdiff_model(backend,args...))
m2 = WrapperNLPModel(adnlp_model(backend,args...))

result1 = ipopt(m1; print_level = 0)
result2 = ipopt(m2; print_level = 0)

@test result1.status == result2.status

for field in [
:solution,
:multipliers,
:multipliers_L,
:multipliers_U
]
@test getfield(result1, field) getfield(result2, field) atol=1e-6
end
end

function runtests()
@testset "NLP tests" begin
for (name, args) in NLP_TEST_ARGUMENTS
for backend in BACKENDS
simdiff_model = getfield(
@__MODULE__,
Symbol(name * "_simdiff_model")
)
adnlp_model = getfield(
@__MODULE__,
Symbol(name * "_adnlp_model")
)

@testset "$name $args $backend" begin
test_nlp(simdiff_model, adnlp_model, backend, args)
end
end
end
end
end

end # NLPTest

38 changes: 38 additions & 0 deletions test/NLPTest/luksan.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
function luksan_vlcek_obj(x,i)
return 100*(x[i-1]^2-x[i])^2+(x[i-1]-1)^2
end

function luksan_vlcek_con(x,i)
return 3x[i+1]^3+2*x[i+2]-5+sin(x[i+1]-x[i+2])sin(x[i+1]+x[i+2])+4x[i+1]-x[i]exp(x[i]-x[i+1])-3
end

function luksan_vlcek_x0(i)
return mod(i,2)==1 ? -1.2 : 1.0
end

function luksan_vlcek_adnlp_model(backend, N)
return ADNLPModel(
x->sum(luksan_vlcek_obj(x,i) for i=2:N),
[luksan_vlcek_x0(i) for i=1:N],
fill(-Inf,N),
fill(Inf,N),
x->[luksan_vlcek_con(x,i) for i=1:N-2],
zeros(N-2),
zeros(N-2)
)
end

function luksan_vlcek_simdiff_model(backend, N)

c = SIMDiff.Core(backend)
x = SIMDiff.variable(
c, N;
start = (luksan_vlcek_x0(i) for i=1:N)
)
SIMDiff.constraint(
c,
luksan_vlcek_con(x,i)
for i in 1:N-2)
SIMDiff.objective(c, luksan_vlcek_obj(x,i) for i in 2:N)
return SIMDiff.Model(c)
end
167 changes: 167 additions & 0 deletions test/NLPTest/utils.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
struct WrapperNLPModel{
T, VT, VI,
I <: NLPModels.AbstractNLPModel{T,VT}
} <: NLPModels.AbstractNLPModel{Float64,Vector{Float64}}

inner::I

x_buffer:: VT
y_buffer:: VT

cons_buffer::VT
grad_buffer::VT

jac_buffer::VT
jac_I_buffer::VI
jac_J_buffer::VI

hess_buffer::VT
hess_I_buffer::VI
hess_J_buffer::VI

meta::NLPModels.AbstractNLPModelMeta{Float64,Vector{Float64}}
end

function WrapperNLPModel(m)
nvar = get_nvar(m)
ncon = get_ncon(m)
nnzj = get_nnzj(m)
nnzh = get_nnzh(m)

x0 = Vector{Float64}(undef, nvar)
lvar = Vector{Float64}(undef, nvar)
uvar = Vector{Float64}(undef, nvar)

y0 = Vector{Float64}(undef, ncon)
lcon = Vector{Float64}(undef, ncon)
ucon = Vector{Float64}(undef, ncon)

copyto!(x0, m.meta.x0)
copyto!(lvar, m.meta.lvar)
copyto!(uvar, m.meta.uvar)

copyto!(y0, m.meta.y0)
copyto!(lcon, m.meta.lcon)
copyto!(ucon, m.meta.ucon)

x_buffer = similar(get_x0(m), nvar)
y_buffer = similar(get_x0(m), ncon)
cons_buffer = similar(get_x0(m), ncon)
grad_buffer = similar(get_x0(m), nvar)
jac_buffer = similar(get_x0(m), nnzj)
jac_I_buffer = similar(get_x0(m), Int, nnzj)
jac_J_buffer = similar(get_x0(m), Int, nnzj)
hess_buffer = similar(get_x0(m), nnzh)
hess_I_buffer = similar(get_x0(m), Int, nnzh)
hess_J_buffer = similar(get_x0(m), Int, nnzh)

return WrapperNLPModel(
m,
x_buffer,
y_buffer,
cons_buffer,
grad_buffer,
jac_buffer,
jac_I_buffer,
jac_J_buffer,
hess_buffer,
hess_I_buffer,
hess_J_buffer,
NLPModelMeta(
nvar,
x0 = x0,
lvar = lvar,
uvar = uvar,
ncon = ncon,
y0 = y0,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
minimize = m.meta.minimize
)
)
end

function NLPModels.jac_structure!(
m::M,
rows::V,
cols::V
) where {M <: WrapperNLPModel, V <: AbstractVector}

jac_structure!(m.inner, m.jac_I_buffer, m.jac_J_buffer)
copyto!(rows, m.jac_I_buffer)
copyto!(cols, m.jac_J_buffer)
end

function NLPModels.hess_structure!(
m::M,
rows::V,
cols::V
) where {M <: WrapperNLPModel, V <: AbstractVector}

hess_structure!(m.inner, m.hess_I_buffer, m.hess_J_buffer)
copyto!(rows, m.hess_I_buffer)
copyto!(cols, m.hess_J_buffer)
end

function NLPModels.obj(
m::M,
x::V
) where {M <: WrapperNLPModel, V <: AbstractVector}

copyto!(m.x_buffer, x)
o = NLPModels.obj(m.inner, m.x_buffer)
return o
end
function NLPModels.cons!(
m::M,
x::V,
g::V
) where {M <: WrapperNLPModel, V <: AbstractVector}

copyto!(m.x_buffer, x)
NLPModels.cons!(m.inner, m.x_buffer, m.cons_buffer)
copyto!(g, m.cons_buffer)
return
end
function NLPModels.grad!(
m::M,
x::V,
f::V
) where {M <: WrapperNLPModel, V <: AbstractVector}

copyto!(m.x_buffer, x)
NLPModels.grad!(m.inner, m.x_buffer, m.grad_buffer)
copyto!(f, m.grad_buffer)
return
end
function NLPModels.jac_coord!(
m::M,
x::V,
jac::V
) where {M <: WrapperNLPModel, V <: AbstractVector}

copyto!(m.x_buffer, x)
NLPModels.jac_coord!(m.inner, m.x_buffer, m.jac_buffer)
copyto!(jac, m.jac_buffer)
return
end
function NLPModels.hess_coord!(
m::M,
x::V,
y::V,
hess::V;
obj_weight = one(eltype(x))
) where {M <: WrapperNLPModel, V <: AbstractVector}

copyto!(m.x_buffer, x)
copyto!(m.x_buffer, y)
NLPModels.hess_coord!(
m.inner, m.x_buffer, m.y_buffer, m.hess_buffer;
obj_weight=obj_weight
)
copyto!(hess, m.hess_buffer)

return
end
Loading

0 comments on commit 3852dcb

Please sign in to comment.